summaryrefslogtreecommitdiffstats
path: root/drivers/firmware
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-04-26 12:11:52 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-04-26 12:11:52 -0700
commit37f00ab4a003f371f81e0eae76cf372f06dec780 (patch)
treec6217483f22a0fac876f12af53f4b8948200f2fd /drivers/firmware
parent2b90506a8186df5f7c81ad1ebd250103d8469e27 (diff)
parent5ffa828534036348fa90fb3079ccc0972d202c4a (diff)
downloadlinux-37f00ab4a003f371f81e0eae76cf372f06dec780.tar.gz
linux-37f00ab4a003f371f81e0eae76cf372f06dec780.tar.xz
Merge tag 'arm-drivers-5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
Pull ARM SoC driver updates from Arnd Bergmann: "Updates for SoC specific drivers include a few subsystems that have their own maintainers but send them through the soc tree: TEE/OP-TEE: - Add tracepoints around calls to secure world Memory controller drivers: - Minor fixes for Renesas, Exynos, Mediatek and Tegra platforms - Add debug statistics to Tegra20 memory controller - Update Tegra bindings and convert to dtschema ARM SCMI Firmware: - Support for modular SCMI protocols and vendor specific extensions - New SCMI IIO driver - Per-cpu DVFS The other driver changes are all from the platform maintainers directly and reflect the drivers that don't fit into any other subsystem as well as treewide changes for a particular platform. SoCFPGA: - Various cleanups contributed by Krzysztof Kozlowski Mediatek: - add MT8183 support to mutex driver - MMSYS: use per SoC array to describe the possible routing - add MMSYS support for MT8183 and MT8167 - add support for PMIC wrapper with integrated arbiter - add support for MT8192/MT6873 Tegra: - Bug fixes to PMC and clock drivers NXP/i.MX: - Update SCU power domain driver to keep console domain power on. - Add missing ADC1 power domain to SCU power domain driver. - Update comments for single global power domain in SCU power domain driver. - Add i.MX51/i.MX53 unique id support to i.MX SoC driver. NXP/FSL SoC driver updates for v5.13 - Add ACPI support for RCPM driver - Use generic io{read,write} for QE drivers after performance optimized for PowerPC - Fix QBMAN probe to cleanup HW states correctly for kexec - Various cleanup and style fix for QBMAN/QE/GUTS drivers OMAP: - Preparation to use devicetree for genpd - ti-sysc needs iorange check improved when the interconnect target module has no control registers listed - ti-sysc needs to probe l4_wkup and l4_cfg interconnects first to avoid issues with missing resources and unnecessary deferred probe - ti-sysc debug option can now detect more devices - ti-sysc now warns if an old incomplete devicetree data is found as we now rely on it being complete for am3 and 4 - soc init code needs to check for prcm and prm nodes for omap4/5 and dra7 - omap-prm driver needs to enable autoidle retention support for omap4 - omap5 clocks are missing gpmc and ocmc clock registers - pci-dra7xx now needs to use builtin_platform_driver instead of using builtin_platform_driver_probe for deferred probe to work Raspberry Pi: - Fix-up all RPi firmware drivers so as for unbind to happen in an orderly fashion - Support for RPi's PoE hat PWM bus Qualcomm - Improved detection for SCM calling conventions - Support for OEM specific wifi firmware path - Added drivers for SC7280/SM8350: RPMH, LLCC< AOSS QMP" * tag 'arm-drivers-5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (165 commits) soc: aspeed: fix a ternary sign expansion bug memory: mtk-smi: Add device-link between smi-larb and smi-common memory: samsung: exynos5422-dmc: handle clk_set_parent() failure memory: renesas-rpc-if: fix possible NULL pointer dereference of resource clk: socfpga: fix iomem pointer cast on 64-bit soc: aspeed: Adapt to new LPC device tree layout pinctrl: aspeed-g5: Adapt to new LPC device tree layout ipmi: kcs: aspeed: Adapt to new LPC DTS layout ARM: dts: Remove LPC BMC and Host partitions dt-bindings: aspeed-lpc: Remove LPC partitioning soc: fsl: enable acpi support in RCPM driver soc: qcom: mdt_loader: Detect truncated read of segments soc: qcom: mdt_loader: Validate that p_filesz < p_memsz soc: qcom: pdr: Fix error return code in pdr_register_listener firmware: qcom_scm: Fix kernel-doc function names to match firmware: qcom_scm: Suppress sysfs bind attributes firmware: qcom_scm: Workaround lack of "is available" call on SC7180 firmware: qcom_scm: Reduce locking section for __get_convention() firmware: qcom_scm: Make __qcom_scm_is_call_available() return bool Revert "soc: fsl: qe: introduce qe_io{read,write}* wrappers" ...
Diffstat (limited to 'drivers/firmware')
-rw-r--r--drivers/firmware/Kconfig2
-rw-r--r--drivers/firmware/arm_scmi/base.c142
-rw-r--r--drivers/firmware/arm_scmi/bus.c100
-rw-r--r--drivers/firmware/arm_scmi/clock.c129
-rw-r--r--drivers/firmware/arm_scmi/common.h133
-rw-r--r--drivers/firmware/arm_scmi/driver.c798
-rw-r--r--drivers/firmware/arm_scmi/notify.c328
-rw-r--r--drivers/firmware/arm_scmi/notify.h40
-rw-r--r--drivers/firmware/arm_scmi/perf.c262
-rw-r--r--drivers/firmware/arm_scmi/power.c134
-rw-r--r--drivers/firmware/arm_scmi/reset.c146
-rw-r--r--drivers/firmware/arm_scmi/scmi_pm_domain.c26
-rw-r--r--drivers/firmware/arm_scmi/sensors.c232
-rw-r--r--drivers/firmware/arm_scmi/system.c63
-rw-r--r--drivers/firmware/arm_scmi/voltage.c126
-rw-r--r--drivers/firmware/imx/scu-pd.c41
-rw-r--r--drivers/firmware/qcom_scm-legacy.c4
-rw-r--r--drivers/firmware/qcom_scm-smc.c12
-rw-r--r--drivers/firmware/qcom_scm.c89
-rw-r--r--drivers/firmware/qcom_scm.h7
-rw-r--r--drivers/firmware/raspberrypi.c69
-rw-r--r--drivers/firmware/xilinx/zynqmp.c5
22 files changed, 2050 insertions, 838 deletions
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 5dd19dbd67a3..db0ea2d2d75a 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -206,7 +206,7 @@ config FW_CFG_SYSFS_CMDLINE
config INTEL_STRATIX10_SERVICE
tristate "Intel Stratix10 Service Layer"
- depends on (ARCH_STRATIX10 || ARCH_AGILEX) && HAVE_ARM_SMCCC
+ depends on ARCH_INTEL_SOCFPGA && ARM64 && HAVE_ARM_SMCCC
default n
help
Intel Stratix10 service layer runs at privileged exception level,
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index 017e5d8bd869..de416f9e7921 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -2,11 +2,12 @@
/*
* System Control and Management Interface (SCMI) Base Protocol
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications BASE - " fmt
+#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "common.h"
@@ -50,30 +51,30 @@ struct scmi_base_error_notify_payld {
* scmi_base_attributes_get() - gets the implementation details
* that are associated with the base protocol.
*
- * @handle: SCMI entity handle
+ * @ph: SCMI protocol handle
*
* Return: 0 on success, else appropriate SCMI error.
*/
-static int scmi_base_attributes_get(const struct scmi_handle *handle)
+static int scmi_base_attributes_get(const struct scmi_protocol_handle *ph)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_base_attributes *attr_info;
- struct scmi_revision_info *rev = handle->version;
+ struct scmi_revision_info *rev = ph->get_priv(ph);
- ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
- SCMI_PROTOCOL_BASE, 0, sizeof(*attr_info), &t);
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
+ 0, sizeof(*attr_info), &t);
if (ret)
return ret;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
attr_info = t->rx.buf;
rev->num_protocols = attr_info->num_protocols;
rev->num_agents = attr_info->num_agents;
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -81,19 +82,20 @@ static int scmi_base_attributes_get(const struct scmi_handle *handle)
/**
* scmi_base_vendor_id_get() - gets vendor/subvendor identifier ASCII string.
*
- * @handle: SCMI entity handle
+ * @ph: SCMI protocol handle
* @sub_vendor: specify true if sub-vendor ID is needed
*
* Return: 0 on success, else appropriate SCMI error.
*/
static int
-scmi_base_vendor_id_get(const struct scmi_handle *handle, bool sub_vendor)
+scmi_base_vendor_id_get(const struct scmi_protocol_handle *ph, bool sub_vendor)
{
u8 cmd;
int ret, size;
char *vendor_id;
struct scmi_xfer *t;
- struct scmi_revision_info *rev = handle->version;
+ struct scmi_revision_info *rev = ph->get_priv(ph);
+
if (sub_vendor) {
cmd = BASE_DISCOVER_SUB_VENDOR;
@@ -105,15 +107,15 @@ scmi_base_vendor_id_get(const struct scmi_handle *handle, bool sub_vendor)
size = ARRAY_SIZE(rev->vendor_id);
}
- ret = scmi_xfer_get_init(handle, cmd, SCMI_PROTOCOL_BASE, 0, size, &t);
+ ret = ph->xops->xfer_get_init(ph, cmd, 0, size, &t);
if (ret)
return ret;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret)
memcpy(vendor_id, t->rx.buf, size);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -123,30 +125,30 @@ scmi_base_vendor_id_get(const struct scmi_handle *handle, bool sub_vendor)
* implementation 32-bit version. The format of the version number is
* vendor-specific
*
- * @handle: SCMI entity handle
+ * @ph: SCMI protocol handle
*
* Return: 0 on success, else appropriate SCMI error.
*/
static int
-scmi_base_implementation_version_get(const struct scmi_handle *handle)
+scmi_base_implementation_version_get(const struct scmi_protocol_handle *ph)
{
int ret;
__le32 *impl_ver;
struct scmi_xfer *t;
- struct scmi_revision_info *rev = handle->version;
+ struct scmi_revision_info *rev = ph->get_priv(ph);
- ret = scmi_xfer_get_init(handle, BASE_DISCOVER_IMPLEMENT_VERSION,
- SCMI_PROTOCOL_BASE, 0, sizeof(*impl_ver), &t);
+ ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_IMPLEMENT_VERSION,
+ 0, sizeof(*impl_ver), &t);
if (ret)
return ret;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
impl_ver = t->rx.buf;
rev->impl_ver = le32_to_cpu(*impl_ver);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -155,23 +157,24 @@ scmi_base_implementation_version_get(const struct scmi_handle *handle)
* scmi_base_implementation_list_get() - gets the list of protocols it is
* OSPM is allowed to access
*
- * @handle: SCMI entity handle
+ * @ph: SCMI protocol handle
* @protocols_imp: pointer to hold the list of protocol identifiers
*
* Return: 0 on success, else appropriate SCMI error.
*/
-static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
- u8 *protocols_imp)
+static int
+scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
+ u8 *protocols_imp)
{
u8 *list;
int ret, loop;
struct scmi_xfer *t;
__le32 *num_skip, *num_ret;
u32 tot_num_ret = 0, loop_num_ret;
- struct device *dev = handle->dev;
+ struct device *dev = ph->dev;
- ret = scmi_xfer_get_init(handle, BASE_DISCOVER_LIST_PROTOCOLS,
- SCMI_PROTOCOL_BASE, sizeof(*num_skip), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_LIST_PROTOCOLS,
+ sizeof(*num_skip), 0, &t);
if (ret)
return ret;
@@ -183,7 +186,7 @@ static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
/* Set the number of protocols to be skipped/already read */
*num_skip = cpu_to_le32(tot_num_ret);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (ret)
break;
@@ -198,10 +201,10 @@ static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
tot_num_ret += loop_num_ret;
- scmi_reset_rx_to_maxsz(handle, t);
+ ph->xops->reset_rx_to_maxsz(ph, t);
} while (loop_num_ret);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -209,7 +212,7 @@ static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
/**
* scmi_base_discover_agent_get() - discover the name of an agent
*
- * @handle: SCMI entity handle
+ * @ph: SCMI protocol handle
* @id: Agent identifier
* @name: Agent identifier ASCII string
*
@@ -218,63 +221,63 @@ static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
*
* Return: 0 on success, else appropriate SCMI error.
*/
-static int scmi_base_discover_agent_get(const struct scmi_handle *handle,
+static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph,
int id, char *name)
{
int ret;
struct scmi_xfer *t;
- ret = scmi_xfer_get_init(handle, BASE_DISCOVER_AGENT,
- SCMI_PROTOCOL_BASE, sizeof(__le32),
- SCMI_MAX_STR_SIZE, &t);
+ ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_AGENT,
+ sizeof(__le32), SCMI_MAX_STR_SIZE, &t);
if (ret)
return ret;
put_unaligned_le32(id, t->tx.buf);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret)
strlcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_base_error_notify(const struct scmi_handle *handle, bool enable)
+static int scmi_base_error_notify(const struct scmi_protocol_handle *ph,
+ bool enable)
{
int ret;
u32 evt_cntl = enable ? BASE_TP_NOTIFY_ALL : 0;
struct scmi_xfer *t;
struct scmi_msg_base_error_notify *cfg;
- ret = scmi_xfer_get_init(handle, BASE_NOTIFY_ERRORS,
- SCMI_PROTOCOL_BASE, sizeof(*cfg), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, BASE_NOTIFY_ERRORS,
+ sizeof(*cfg), 0, &t);
if (ret)
return ret;
cfg = t->tx.buf;
cfg->event_control = cpu_to_le32(evt_cntl);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_base_set_notify_enabled(const struct scmi_handle *handle,
+static int scmi_base_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret;
- ret = scmi_base_error_notify(handle, enable);
+ ret = scmi_base_error_notify(ph, enable);
if (ret)
pr_debug("FAIL_ENABLED - evt[%X] ret:%d\n", evt_id, ret);
return ret;
}
-static void *scmi_base_fill_custom_report(const struct scmi_handle *handle,
+static void *scmi_base_fill_custom_report(const struct scmi_protocol_handle *ph,
u8 evt_id, ktime_t timestamp,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
@@ -318,17 +321,24 @@ static const struct scmi_event_ops base_event_ops = {
.fill_custom_report = scmi_base_fill_custom_report,
};
-int scmi_base_protocol_init(struct scmi_handle *h)
+static const struct scmi_protocol_events base_protocol_events = {
+ .queue_sz = 4 * SCMI_PROTO_QUEUE_SZ,
+ .ops = &base_event_ops,
+ .evts = base_events,
+ .num_events = ARRAY_SIZE(base_events),
+ .num_sources = SCMI_BASE_NUM_SOURCES,
+};
+
+static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph)
{
int id, ret;
u8 *prot_imp;
u32 version;
char name[SCMI_MAX_STR_SIZE];
- const struct scmi_handle *handle = h;
- struct device *dev = handle->dev;
- struct scmi_revision_info *rev = handle->version;
+ struct device *dev = ph->dev;
+ struct scmi_revision_info *rev = scmi_revision_area_get(ph);
- ret = scmi_version_get(handle, SCMI_PROTOCOL_BASE, &version);
+ ret = ph->xops->version_get(ph, &version);
if (ret)
return ret;
@@ -338,13 +348,15 @@ int scmi_base_protocol_init(struct scmi_handle *h)
rev->major_ver = PROTOCOL_REV_MAJOR(version),
rev->minor_ver = PROTOCOL_REV_MINOR(version);
+ ph->set_priv(ph, rev);
+
+ scmi_base_attributes_get(ph);
+ scmi_base_vendor_id_get(ph, false);
+ scmi_base_vendor_id_get(ph, true);
+ scmi_base_implementation_version_get(ph);
+ scmi_base_implementation_list_get(ph, prot_imp);
- scmi_base_attributes_get(handle);
- scmi_base_vendor_id_get(handle, false);
- scmi_base_vendor_id_get(handle, true);
- scmi_base_implementation_version_get(handle);
- scmi_base_implementation_list_get(handle, prot_imp);
- scmi_setup_protocol_implemented(handle, prot_imp);
+ scmi_setup_protocol_implemented(ph, prot_imp);
dev_info(dev, "SCMI Protocol v%d.%d '%s:%s' Firmware version 0x%x\n",
rev->major_ver, rev->minor_ver, rev->vendor_id,
@@ -352,16 +364,20 @@ int scmi_base_protocol_init(struct scmi_handle *h)
dev_dbg(dev, "Found %d protocol(s) %d agent(s)\n", rev->num_protocols,
rev->num_agents);
- scmi_register_protocol_events(handle, SCMI_PROTOCOL_BASE,
- (4 * SCMI_PROTO_QUEUE_SZ),
- &base_event_ops, base_events,
- ARRAY_SIZE(base_events),
- SCMI_BASE_NUM_SOURCES);
-
for (id = 0; id < rev->num_agents; id++) {
- scmi_base_discover_agent_get(handle, id, name);
+ scmi_base_discover_agent_get(ph, id, name);
dev_dbg(dev, "Agent %d: %s\n", id, name);
}
return 0;
}
+
+static const struct scmi_protocol scmi_base = {
+ .id = SCMI_PROTOCOL_BASE,
+ .owner = NULL,
+ .instance_init = &scmi_base_protocol_init,
+ .ops = NULL,
+ .events = &base_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(base, scmi_base)
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 1377ec76a45d..784cf0027da3 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -2,7 +2,7 @@
/*
* System Control and Management Interface (SCMI) Message Protocol bus layer
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -51,18 +51,53 @@ static int scmi_dev_match(struct device *dev, struct device_driver *drv)
return 0;
}
-static int scmi_protocol_init(int protocol_id, struct scmi_handle *handle)
+static int scmi_match_by_id_table(struct device *dev, void *data)
{
- scmi_prot_init_fn_t fn = idr_find(&scmi_protocols, protocol_id);
+ struct scmi_device *sdev = to_scmi_dev(dev);
+ struct scmi_device_id *id_table = data;
- if (unlikely(!fn))
- return -EINVAL;
- return fn(handle);
+ return sdev->protocol_id == id_table->protocol_id &&
+ !strcmp(sdev->name, id_table->name);
}
-static int scmi_protocol_dummy_init(struct scmi_handle *handle)
+struct scmi_device *scmi_child_dev_find(struct device *parent,
+ int prot_id, const char *name)
{
- return 0;
+ struct scmi_device_id id_table;
+ struct device *dev;
+
+ id_table.protocol_id = prot_id;
+ id_table.name = name;
+
+ dev = device_find_child(parent, &id_table, scmi_match_by_id_table);
+ if (!dev)
+ return NULL;
+
+ return to_scmi_dev(dev);
+}
+
+const struct scmi_protocol *scmi_protocol_get(int protocol_id)
+{
+ const struct scmi_protocol *proto;
+
+ proto = idr_find(&scmi_protocols, protocol_id);
+ if (!proto || !try_module_get(proto->owner)) {
+ pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
+ return NULL;
+ }
+
+ pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
+
+ return proto;
+}
+
+void scmi_protocol_put(int protocol_id)
+{
+ const struct scmi_protocol *proto;
+
+ proto = idr_find(&scmi_protocols, protocol_id);
+ if (proto)
+ module_put(proto->owner);
}
static int scmi_dev_probe(struct device *dev)
@@ -70,7 +105,6 @@ static int scmi_dev_probe(struct device *dev)
struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
struct scmi_device *scmi_dev = to_scmi_dev(dev);
const struct scmi_device_id *id;
- int ret;
id = scmi_dev_match_id(scmi_dev, scmi_drv);
if (!id)
@@ -79,14 +113,6 @@ static int scmi_dev_probe(struct device *dev)
if (!scmi_dev->handle)
return -EPROBE_DEFER;
- ret = scmi_protocol_init(scmi_dev->protocol_id, scmi_dev->handle);
- if (ret)
- return ret;
-
- /* Skip protocol initialisation for additional devices */
- idr_replace(&scmi_protocols, &scmi_protocol_dummy_init,
- scmi_dev->protocol_id);
-
return scmi_drv->probe(scmi_dev);
}
@@ -113,6 +139,10 @@ int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
{
int retval;
+ retval = scmi_protocol_device_request(driver->id_table);
+ if (retval)
+ return retval;
+
driver->driver.bus = &scmi_bus_type;
driver->driver.name = driver->name;
driver->driver.owner = owner;
@@ -129,6 +159,7 @@ EXPORT_SYMBOL_GPL(scmi_driver_register);
void scmi_driver_unregister(struct scmi_driver *driver)
{
driver_unregister(&driver->driver);
+ scmi_protocol_device_unrequest(driver->id_table);
}
EXPORT_SYMBOL_GPL(scmi_driver_unregister);
@@ -194,26 +225,45 @@ void scmi_set_handle(struct scmi_device *scmi_dev)
scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
}
-int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn)
+int scmi_protocol_register(const struct scmi_protocol *proto)
{
int ret;
+ if (!proto) {
+ pr_err("invalid protocol\n");
+ return -EINVAL;
+ }
+
+ if (!proto->instance_init) {
+ pr_err("missing init for protocol 0x%x\n", proto->id);
+ return -EINVAL;
+ }
+
spin_lock(&protocol_lock);
- ret = idr_alloc(&scmi_protocols, fn, protocol_id, protocol_id + 1,
- GFP_ATOMIC);
+ ret = idr_alloc(&scmi_protocols, (void *)proto,
+ proto->id, proto->id + 1, GFP_ATOMIC);
spin_unlock(&protocol_lock);
- if (ret != protocol_id)
- pr_err("unable to allocate SCMI idr slot, err %d\n", ret);
+ if (ret != proto->id) {
+ pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
+ proto->id, ret);
+ return ret;
+ }
- return ret;
+ pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(scmi_protocol_register);
-void scmi_protocol_unregister(int protocol_id)
+void scmi_protocol_unregister(const struct scmi_protocol *proto)
{
spin_lock(&protocol_lock);
- idr_remove(&scmi_protocols, protocol_id);
+ idr_remove(&scmi_protocols, proto->id);
spin_unlock(&protocol_lock);
+
+ pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
+
+ return;
}
EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 4645677d86f1..35b56c8ba0c0 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -2,9 +2,10 @@
/*
* System Control and Management Interface (SCMI) Clock Protocol
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
+#include <linux/module.h>
#include <linux/sort.h>
#include "common.h"
@@ -74,52 +75,53 @@ struct clock_info {
struct scmi_clock_info *clk;
};
-static int scmi_clock_protocol_attributes_get(const struct scmi_handle *handle,
- struct clock_info *ci)
+static int
+scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
+ struct clock_info *ci)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_clock_protocol_attributes *attr;
- ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
- SCMI_PROTOCOL_CLOCK, 0, sizeof(*attr), &t);
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
+ 0, sizeof(*attr), &t);
if (ret)
return ret;
attr = t->rx.buf;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
ci->num_clocks = le16_to_cpu(attr->num_clocks);
ci->max_async_req = attr->max_async_req;
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_clock_attributes_get(const struct scmi_handle *handle,
+static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
u32 clk_id, struct scmi_clock_info *clk)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_clock_attributes *attr;
- ret = scmi_xfer_get_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK,
- sizeof(clk_id), sizeof(*attr), &t);
+ ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
+ sizeof(clk_id), sizeof(*attr), &t);
if (ret)
return ret;
put_unaligned_le32(clk_id, t->tx.buf);
attr = t->rx.buf;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret)
strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
else
clk->name[0] = '\0';
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -136,7 +138,7 @@ static int rate_cmp_func(const void *_r1, const void *_r2)
}
static int
-scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
+scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
struct scmi_clock_info *clk)
{
u64 *rate = NULL;
@@ -148,8 +150,8 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
struct scmi_msg_clock_describe_rates *clk_desc;
struct scmi_msg_resp_clock_describe_rates *rlist;
- ret = scmi_xfer_get_init(handle, CLOCK_DESCRIBE_RATES,
- SCMI_PROTOCOL_CLOCK, sizeof(*clk_desc), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, CLOCK_DESCRIBE_RATES,
+ sizeof(*clk_desc), 0, &t);
if (ret)
return ret;
@@ -161,7 +163,7 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
/* Set the number of rates to be skipped/already read */
clk_desc->rate_index = cpu_to_le32(tot_rate_cnt);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (ret)
goto err;
@@ -171,7 +173,7 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
num_returned = NUM_RETURNED(rates_flag);
if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) {
- dev_err(handle->dev, "No. of rates > MAX_NUM_RATES");
+ dev_err(ph->dev, "No. of rates > MAX_NUM_RATES");
break;
}
@@ -179,7 +181,7 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
clk->range.min_rate = RATE_TO_U64(rlist->rate[0]);
clk->range.max_rate = RATE_TO_U64(rlist->rate[1]);
clk->range.step_size = RATE_TO_U64(rlist->rate[2]);
- dev_dbg(handle->dev, "Min %llu Max %llu Step %llu Hz\n",
+ dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
clk->range.min_rate, clk->range.max_rate,
clk->range.step_size);
break;
@@ -188,12 +190,12 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
rate = &clk->list.rates[tot_rate_cnt];
for (cnt = 0; cnt < num_returned; cnt++, rate++) {
*rate = RATE_TO_U64(rlist->rate[cnt]);
- dev_dbg(handle->dev, "Rate %llu Hz\n", *rate);
+ dev_dbg(ph->dev, "Rate %llu Hz\n", *rate);
}
tot_rate_cnt += num_returned;
- scmi_reset_rx_to_maxsz(handle, t);
+ ph->xops->reset_rx_to_maxsz(ph, t);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
@@ -208,42 +210,42 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
clk->rate_discrete = rate_discrete;
err:
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
static int
-scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value)
+scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
+ u32 clk_id, u64 *value)
{
int ret;
struct scmi_xfer *t;
- ret = scmi_xfer_get_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK,
- sizeof(__le32), sizeof(u64), &t);
+ ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
+ sizeof(__le32), sizeof(u64), &t);
if (ret)
return ret;
put_unaligned_le32(clk_id, t->tx.buf);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret)
*value = get_unaligned_le64(t->rx.buf);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
- u64 rate)
+static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
+ u32 clk_id, u64 rate)
{
int ret;
u32 flags = 0;
struct scmi_xfer *t;
struct scmi_clock_set_rate *cfg;
- struct clock_info *ci = handle->clk_priv;
+ struct clock_info *ci = ph->get_priv(ph);
- ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK,
- sizeof(*cfg), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
if (ret)
return ret;
@@ -258,26 +260,27 @@ static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
cfg->value_high = cpu_to_le32(rate >> 32);
if (flags & CLOCK_SET_ASYNC)
- ret = scmi_do_xfer_with_response(handle, t);
+ ret = ph->xops->do_xfer_with_response(ph, t);
else
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (ci->max_async_req)
atomic_dec(&ci->cur_async_req);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
static int
-scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config)
+scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
+ u32 config)
{
int ret;
struct scmi_xfer *t;
struct scmi_clock_set_config *cfg;
- ret = scmi_xfer_get_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK,
- sizeof(*cfg), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
+ sizeof(*cfg), 0, &t);
if (ret)
return ret;
@@ -285,33 +288,33 @@ scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config)
cfg->id = cpu_to_le32(clk_id);
cfg->attributes = cpu_to_le32(config);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_clock_enable(const struct scmi_handle *handle, u32 clk_id)
+static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id)
{
- return scmi_clock_config_set(handle, clk_id, CLOCK_ENABLE);
+ return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE);
}
-static int scmi_clock_disable(const struct scmi_handle *handle, u32 clk_id)
+static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id)
{
- return scmi_clock_config_set(handle, clk_id, 0);
+ return scmi_clock_config_set(ph, clk_id, 0);
}
-static int scmi_clock_count_get(const struct scmi_handle *handle)
+static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
{
- struct clock_info *ci = handle->clk_priv;
+ struct clock_info *ci = ph->get_priv(ph);
return ci->num_clocks;
}
static const struct scmi_clock_info *
-scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id)
+scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
{
- struct clock_info *ci = handle->clk_priv;
+ struct clock_info *ci = ph->get_priv(ph);
struct scmi_clock_info *clk = ci->clk + clk_id;
if (!clk->name[0])
@@ -320,7 +323,7 @@ scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id)
return clk;
}
-static const struct scmi_clk_ops clk_ops = {
+static const struct scmi_clk_proto_ops clk_proto_ops = {
.count_get = scmi_clock_count_get,
.info_get = scmi_clock_info_get,
.rate_get = scmi_clock_rate_get,
@@ -329,24 +332,24 @@ static const struct scmi_clk_ops clk_ops = {
.disable = scmi_clock_disable,
};
-static int scmi_clock_protocol_init(struct scmi_handle *handle)
+static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
{
u32 version;
int clkid, ret;
struct clock_info *cinfo;
- scmi_version_get(handle, SCMI_PROTOCOL_CLOCK, &version);
+ ph->xops->version_get(ph, &version);
- dev_dbg(handle->dev, "Clock Version %d.%d\n",
+ dev_dbg(ph->dev, "Clock Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
- cinfo = devm_kzalloc(handle->dev, sizeof(*cinfo), GFP_KERNEL);
+ cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL);
if (!cinfo)
return -ENOMEM;
- scmi_clock_protocol_attributes_get(handle, cinfo);
+ scmi_clock_protocol_attributes_get(ph, cinfo);
- cinfo->clk = devm_kcalloc(handle->dev, cinfo->num_clocks,
+ cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
sizeof(*cinfo->clk), GFP_KERNEL);
if (!cinfo->clk)
return -ENOMEM;
@@ -354,16 +357,20 @@ static int scmi_clock_protocol_init(struct scmi_handle *handle)
for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
struct scmi_clock_info *clk = cinfo->clk + clkid;
- ret = scmi_clock_attributes_get(handle, clkid, clk);
+ ret = scmi_clock_attributes_get(ph, clkid, clk);
if (!ret)
- scmi_clock_describe_rates_get(handle, clkid, clk);
+ scmi_clock_describe_rates_get(ph, clkid, clk);
}
cinfo->version = version;
- handle->clk_ops = &clk_ops;
- handle->clk_priv = cinfo;
-
- return 0;
+ return ph->set_priv(ph, cinfo);
}
-DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_CLOCK, clock)
+static const struct scmi_protocol scmi_clock = {
+ .id = SCMI_PROTOCOL_CLOCK,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_clock_protocol_init,
+ .ops = &clk_proto_ops,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index c0fb45e7c3e8..228bf4a71d23 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -4,7 +4,7 @@
* driver common header file containing some definitions, structures
* and function prototypes used in all the different SCMI protocols.
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
#ifndef _SCMI_COMMON_H
#define _SCMI_COMMON_H
@@ -14,11 +14,14 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include <linux/types.h>
#include <asm/unaligned.h>
+#include "notify.h"
+
#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0)
#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16)
#define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))
@@ -141,22 +144,92 @@ struct scmi_xfer {
struct completion *async_done;
};
-void scmi_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer);
-int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer);
-int scmi_do_xfer_with_response(const struct scmi_handle *h,
- struct scmi_xfer *xfer);
-int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
- size_t tx_size, size_t rx_size, struct scmi_xfer **p);
-void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
- struct scmi_xfer *xfer);
+struct scmi_xfer_ops;
+
+/**
+ * struct scmi_protocol_handle - Reference to an initialized protocol instance
+ *
+ * @dev: A reference to the associated SCMI instance device (handle->dev).
+ * @xops: A reference to a struct holding refs to the core xfer operations that
+ * can be used by the protocol implementation to generate SCMI messages.
+ * @set_priv: A method to set protocol private data for this instance.
+ * @get_priv: A method to get protocol private data previously set.
+ *
+ * This structure represents a protocol initialized against specific SCMI
+ * instance and it will be used as follows:
+ * - as a parameter fed from the core to the protocol initialization code so
+ * that it can access the core xfer operations to build and generate SCMI
+ * messages exclusively for the specific underlying protocol instance.
+ * - as an opaque handle fed by an SCMI driver user when it tries to access
+ * this protocol through its own protocol operations.
+ * In this case this handle will be returned as an opaque object together
+ * with the related protocol operations when the SCMI driver tries to access
+ * the protocol.
+ */
+struct scmi_protocol_handle {
+ struct device *dev;
+ const struct scmi_xfer_ops *xops;
+ int (*set_priv)(const struct scmi_protocol_handle *ph, void *priv);
+ void *(*get_priv)(const struct scmi_protocol_handle *ph);
+};
+
+/**
+ * struct scmi_xfer_ops - References to the core SCMI xfer operations.
+ * @version_get: Get this version protocol.
+ * @xfer_get_init: Initialize one struct xfer if any xfer slot is free.
+ * @reset_rx_to_maxsz: Reset rx size to max transport size.
+ * @do_xfer: Do the SCMI transfer.
+ * @do_xfer_with_response: Do the SCMI transfer waiting for a response.
+ * @xfer_put: Free the xfer slot.
+ *
+ * Note that all this operations expect a protocol handle as first parameter;
+ * they then internally use it to infer the underlying protocol number: this
+ * way is not possible for a protocol implementation to forge messages for
+ * another protocol.
+ */
+struct scmi_xfer_ops {
+ int (*version_get)(const struct scmi_protocol_handle *ph, u32 *version);
+ int (*xfer_get_init)(const struct scmi_protocol_handle *ph, u8 msg_id,
+ size_t tx_size, size_t rx_size,
+ struct scmi_xfer **p);
+ void (*reset_rx_to_maxsz)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+ int (*do_xfer)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+ int (*do_xfer_with_response)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+ void (*xfer_put)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+};
+
+struct scmi_revision_info *
+scmi_revision_area_get(const struct scmi_protocol_handle *ph);
int scmi_handle_put(const struct scmi_handle *handle);
struct scmi_handle *scmi_handle_get(struct device *dev);
void scmi_set_handle(struct scmi_device *scmi_dev);
-int scmi_version_get(const struct scmi_handle *h, u8 protocol, u32 *version);
-void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
+void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
u8 *prot_imp);
-int scmi_base_protocol_init(struct scmi_handle *h);
+typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *);
+
+/**
+ * struct scmi_protocol - Protocol descriptor
+ * @id: Protocol ID.
+ * @owner: Module reference if any.
+ * @instance_init: Mandatory protocol initialization function.
+ * @instance_deinit: Optional protocol de-initialization function.
+ * @ops: Optional reference to the operations provided by the protocol and
+ * exposed in scmi_protocol.h.
+ * @events: An optional reference to the events supported by this protocol.
+ */
+struct scmi_protocol {
+ const u8 id;
+ struct module *owner;
+ const scmi_prot_init_ph_fn_t instance_init;
+ const scmi_prot_init_ph_fn_t instance_deinit;
+ const void *ops;
+ const struct scmi_protocol_events *events;
+};
int __init scmi_bus_init(void);
void __exit scmi_bus_exit(void);
@@ -164,6 +237,7 @@ void __exit scmi_bus_exit(void);
#define DECLARE_SCMI_REGISTER_UNREGISTER(func) \
int __init scmi_##func##_register(void); \
void __exit scmi_##func##_unregister(void)
+DECLARE_SCMI_REGISTER_UNREGISTER(base);
DECLARE_SCMI_REGISTER_UNREGISTER(clock);
DECLARE_SCMI_REGISTER_UNREGISTER(perf);
DECLARE_SCMI_REGISTER_UNREGISTER(power);
@@ -172,17 +246,25 @@ DECLARE_SCMI_REGISTER_UNREGISTER(sensors);
DECLARE_SCMI_REGISTER_UNREGISTER(voltage);
DECLARE_SCMI_REGISTER_UNREGISTER(system);
-#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(id, name) \
-int __init scmi_##name##_register(void) \
-{ \
- return scmi_protocol_register((id), &scmi_##name##_protocol_init); \
-} \
-\
-void __exit scmi_##name##_unregister(void) \
-{ \
- scmi_protocol_unregister((id)); \
+#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(name, proto) \
+static const struct scmi_protocol *__this_proto = &(proto); \
+ \
+int __init scmi_##name##_register(void) \
+{ \
+ return scmi_protocol_register(__this_proto); \
+} \
+ \
+void __exit scmi_##name##_unregister(void) \
+{ \
+ scmi_protocol_unregister(__this_proto); \
}
+const struct scmi_protocol *scmi_protocol_get(int protocol_id);
+void scmi_protocol_put(int protocol_id);
+
+int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id);
+void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
+
/* SCMI Transport */
/**
* struct scmi_chan_info - Structure representing a SCMI channel information
@@ -227,6 +309,11 @@ struct scmi_transport_ops {
bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
};
+int scmi_protocol_device_request(const struct scmi_device_id *id_table);
+void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table);
+struct scmi_device *scmi_child_dev_find(struct device *parent,
+ int prot_id, const char *name);
+
/**
* struct scmi_desc - Description of SoC integration
*
@@ -265,4 +352,8 @@ void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
+void scmi_notification_instance_data_set(const struct scmi_handle *handle,
+ void *priv);
+void *scmi_notification_instance_data_get(const struct scmi_handle *handle);
+
#endif /* _SCMI_COMMON_H */
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 3e748e57deab..66eb3f0e5daf 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -11,18 +11,22 @@
* various power domain DVFS including the core/cluster, certain system
* clocks configuration, thermal sensors and many others.
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
#include <linux/bitmap.h>
+#include <linux/device.h>
#include <linux/export.h>
+#include <linux/idr.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/processor.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
#include "common.h"
@@ -53,6 +57,14 @@ static DEFINE_MUTEX(scmi_list_mutex);
/* Track the unique id for the transfers for debug & profiling purpose */
static atomic_t transfer_last_id;
+static DEFINE_IDR(scmi_requested_devices);
+static DEFINE_MUTEX(scmi_requested_devices_mtx);
+
+struct scmi_requested_dev {
+ const struct scmi_device_id *id_table;
+ struct list_head node;
+};
+
/**
* struct scmi_xfers_info - Structure to manage transfer information
*
@@ -69,6 +81,30 @@ struct scmi_xfers_info {
};
/**
+ * struct scmi_protocol_instance - Describe an initialized protocol instance.
+ * @handle: Reference to the SCMI handle associated to this protocol instance.
+ * @proto: A reference to the protocol descriptor.
+ * @gid: A reference for per-protocol devres management.
+ * @users: A refcount to track effective users of this protocol.
+ * @priv: Reference for optional protocol private data.
+ * @ph: An embedded protocol handle that will be passed down to protocol
+ * initialization code to identify this instance.
+ *
+ * Each protocol is initialized independently once for each SCMI platform in
+ * which is defined by DT and implemented by the SCMI server fw.
+ */
+struct scmi_protocol_instance {
+ const struct scmi_handle *handle;
+ const struct scmi_protocol *proto;
+ void *gid;
+ refcount_t users;
+ void *priv;
+ struct scmi_protocol_handle ph;
+};
+
+#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
+
+/**
* struct scmi_info - Structure representing a SCMI instance
*
* @dev: Device pointer
@@ -80,8 +116,15 @@ struct scmi_xfers_info {
* @rx_minfo: Universal Receive Message management info
* @tx_idr: IDR object to map protocol id to Tx channel info pointer
* @rx_idr: IDR object to map protocol id to Rx channel info pointer
+ * @protocols: IDR for protocols' instance descriptors initialized for
+ * this SCMI instance: populated on protocol's first attempted
+ * usage.
+ * @protocols_mtx: A mutex to protect protocols instances initialization.
* @protocols_imp: List of protocols implemented, currently maximum of
* MAX_PROTOCOLS_IMP elements allocated by the base protocol
+ * @active_protocols: IDR storing device_nodes for protocols actually defined
+ * in the DT and confirmed as implemented by fw.
+ * @notify_priv: Pointer to private data structure specific to notifications.
* @node: List head
* @users: Number of users of this instance
*/
@@ -94,7 +137,12 @@ struct scmi_info {
struct scmi_xfers_info rx_minfo;
struct idr tx_idr;
struct idr rx_idr;
+ struct idr protocols;
+ /* Ensure mutual exclusive access to protocols instance array */
+ struct mutex protocols_mtx;
u8 *protocols_imp;
+ struct idr active_protocols;
+ void *notify_priv;
struct list_head node;
int users;
};
@@ -136,6 +184,25 @@ static inline void scmi_dump_header_dbg(struct device *dev,
hdr->id, hdr->seq, hdr->protocol_id);
}
+void scmi_notification_instance_data_set(const struct scmi_handle *handle,
+ void *priv)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ info->notify_priv = priv;
+ /* Ensure updated protocol private date are visible */
+ smp_wmb();
+}
+
+void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ /* Ensure protocols_private_data has been updated */
+ smp_rmb();
+ return info->notify_priv;
+}
+
/**
* scmi_xfer_get() - Allocate one message
*
@@ -316,14 +383,16 @@ void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
}
/**
- * scmi_xfer_put() - Release a transmit message
+ * xfer_put() - Release a transmit message
*
- * @handle: Pointer to SCMI entity handle
+ * @ph: Pointer to SCMI protocol handle
* @xfer: message that was reserved by scmi_xfer_get
*/
-void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+static void xfer_put(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer)
{
- struct scmi_info *info = handle_to_scmi_info(handle);
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
__scmi_xfer_put(&info->tx_minfo, xfer);
}
@@ -340,23 +409,32 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
}
/**
- * scmi_do_xfer() - Do one transfer
+ * do_xfer() - Do one transfer
*
- * @handle: Pointer to SCMI entity handle
+ * @ph: Pointer to SCMI protocol handle
* @xfer: Transfer to initiate and wait for response
*
* Return: -ETIMEDOUT in case of no response, if transmit error,
* return corresponding error, else if all goes well,
* return 0.
*/
-int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+static int do_xfer(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer)
{
int ret;
int timeout;
- struct scmi_info *info = handle_to_scmi_info(handle);
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
struct device *dev = info->dev;
struct scmi_chan_info *cinfo;
+ /*
+ * Re-instate protocol id here from protocol handle so that cannot be
+ * overridden by mistake (or malice) by the protocol code mangling with
+ * the scmi_xfer structure.
+ */
+ xfer->hdr.protocol_id = pi->proto->id;
+
cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
if (unlikely(!cinfo))
return -EINVAL;
@@ -402,10 +480,11 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
return ret;
}
-void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
- struct scmi_xfer *xfer)
+static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer)
{
- struct scmi_info *info = handle_to_scmi_info(handle);
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
xfer->rx.len = info->desc->max_msg_size;
}
@@ -413,24 +492,27 @@ void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
/**
- * scmi_do_xfer_with_response() - Do one transfer and wait until the delayed
+ * do_xfer_with_response() - Do one transfer and wait until the delayed
* response is received
*
- * @handle: Pointer to SCMI entity handle
+ * @ph: Pointer to SCMI protocol handle
* @xfer: Transfer to initiate and wait for response
*
* Return: -ETIMEDOUT in case of no delayed response, if transmit error,
* return corresponding error, else if all goes well, return 0.
*/
-int scmi_do_xfer_with_response(const struct scmi_handle *handle,
- struct scmi_xfer *xfer)
+static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer)
{
int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
DECLARE_COMPLETION_ONSTACK(async_response);
+ xfer->hdr.protocol_id = pi->proto->id;
+
xfer->async_done = &async_response;
- ret = scmi_do_xfer(handle, xfer);
+ ret = do_xfer(ph, xfer);
if (!ret && !wait_for_completion_timeout(xfer->async_done, timeout))
ret = -ETIMEDOUT;
@@ -439,11 +521,10 @@ int scmi_do_xfer_with_response(const struct scmi_handle *handle,
}
/**
- * scmi_xfer_get_init() - Allocate and initialise one message for transmit
+ * xfer_get_init() - Allocate and initialise one message for transmit
*
- * @handle: Pointer to SCMI entity handle
+ * @ph: Pointer to SCMI protocol handle
* @msg_id: Message identifier
- * @prot_id: Protocol identifier for the message
* @tx_size: transmit message size
* @rx_size: receive message size
* @p: pointer to the allocated and initialised message
@@ -454,12 +535,14 @@ int scmi_do_xfer_with_response(const struct scmi_handle *handle,
* Return: 0 if all went fine with @p pointing to message, else
* corresponding error.
*/
-int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
- size_t tx_size, size_t rx_size, struct scmi_xfer **p)
+static int xfer_get_init(const struct scmi_protocol_handle *ph,
+ u8 msg_id, size_t tx_size, size_t rx_size,
+ struct scmi_xfer **p)
{
int ret;
struct scmi_xfer *xfer;
- struct scmi_info *info = handle_to_scmi_info(handle);
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
struct scmi_xfers_info *minfo = &info->tx_minfo;
struct device *dev = info->dev;
@@ -468,7 +551,7 @@ int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
tx_size > info->desc->max_msg_size)
return -ERANGE;
- xfer = scmi_xfer_get(handle, minfo);
+ xfer = scmi_xfer_get(pi->handle, minfo);
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "failed to get free message slot(%d)\n", ret);
@@ -478,7 +561,7 @@ int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
xfer->tx.len = tx_size;
xfer->rx.len = rx_size ? : info->desc->max_msg_size;
xfer->hdr.id = msg_id;
- xfer->hdr.protocol_id = prot_id;
+ xfer->hdr.protocol_id = pi->proto->id;
xfer->hdr.poll_completion = false;
*p = xfer;
@@ -487,43 +570,276 @@ int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
}
/**
- * scmi_version_get() - command to get the revision of the SCMI entity
+ * version_get() - command to get the revision of the SCMI entity
*
- * @handle: Pointer to SCMI entity handle
- * @protocol: Protocol identifier for the message
+ * @ph: Pointer to SCMI protocol handle
* @version: Holds returned version of protocol.
*
* Updates the SCMI information in the internal data structure.
*
* Return: 0 if all went fine, else return appropriate error.
*/
-int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
- u32 *version)
+static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
{
int ret;
__le32 *rev_info;
struct scmi_xfer *t;
- ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0,
- sizeof(*version), &t);
+ ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
if (ret)
return ret;
- ret = scmi_do_xfer(handle, t);
+ ret = do_xfer(ph, t);
if (!ret) {
rev_info = t->rx.buf;
*version = le32_to_cpu(*rev_info);
}
- scmi_xfer_put(handle, t);
+ xfer_put(ph, t);
return ret;
}
-void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
- u8 *prot_imp)
+/**
+ * scmi_set_protocol_priv - Set protocol specific data at init time
+ *
+ * @ph: A reference to the protocol handle.
+ * @priv: The private data to set.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
+ void *priv)
{
+ struct scmi_protocol_instance *pi = ph_to_pi(ph);
+
+ pi->priv = priv;
+
+ return 0;
+}
+
+/**
+ * scmi_get_protocol_priv - Set protocol specific data at init time
+ *
+ * @ph: A reference to the protocol handle.
+ *
+ * Return: Protocol private data if any was set.
+ */
+static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
+{
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+
+ return pi->priv;
+}
+
+static const struct scmi_xfer_ops xfer_ops = {
+ .version_get = version_get,
+ .xfer_get_init = xfer_get_init,
+ .reset_rx_to_maxsz = reset_rx_to_maxsz,
+ .do_xfer = do_xfer,
+ .do_xfer_with_response = do_xfer_with_response,
+ .xfer_put = xfer_put,
+};
+
+/**
+ * scmi_revision_area_get - Retrieve version memory area.
+ *
+ * @ph: A reference to the protocol handle.
+ *
+ * A helper to grab the version memory area reference during SCMI Base protocol
+ * initialization.
+ *
+ * Return: A reference to the version memory area associated to the SCMI
+ * instance underlying this protocol handle.
+ */
+struct scmi_revision_info *
+scmi_revision_area_get(const struct scmi_protocol_handle *ph)
+{
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+
+ return pi->handle->version;
+}
+
+/**
+ * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
+ * instance descriptor.
+ * @info: The reference to the related SCMI instance.
+ * @proto: The protocol descriptor.
+ *
+ * Allocate a new protocol instance descriptor, using the provided @proto
+ * description, against the specified SCMI instance @info, and initialize it;
+ * all resources management is handled via a dedicated per-protocol devres
+ * group.
+ *
+ * Context: Assumes to be called with @protocols_mtx already acquired.
+ * Return: A reference to a freshly allocated and initialized protocol instance
+ * or ERR_PTR on failure. On failure the @proto reference is at first
+ * put using @scmi_protocol_put() before releasing all the devres group.
+ */
+static struct scmi_protocol_instance *
+scmi_alloc_init_protocol_instance(struct scmi_info *info,
+ const struct scmi_protocol *proto)
+{
+ int ret = -ENOMEM;
+ void *gid;
+ struct scmi_protocol_instance *pi;
+ const struct scmi_handle *handle = &info->handle;
+
+ /* Protocol specific devres group */
+ gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
+ if (!gid) {
+ scmi_protocol_put(proto->id);
+ goto out;
+ }
+
+ pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
+ if (!pi)
+ goto clean;
+
+ pi->gid = gid;
+ pi->proto = proto;
+ pi->handle = handle;
+ pi->ph.dev = handle->dev;
+ pi->ph.xops = &xfer_ops;
+ pi->ph.set_priv = scmi_set_protocol_priv;
+ pi->ph.get_priv = scmi_get_protocol_priv;
+ refcount_set(&pi->users, 1);
+ /* proto->init is assured NON NULL by scmi_protocol_register */
+ ret = pi->proto->instance_init(&pi->ph);
+ if (ret)
+ goto clean;
+
+ ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
+ GFP_KERNEL);
+ if (ret != proto->id)
+ goto clean;
+
+ /*
+ * Warn but ignore events registration errors since we do not want
+ * to skip whole protocols if their notifications are messed up.
+ */
+ if (pi->proto->events) {
+ ret = scmi_register_protocol_events(handle, pi->proto->id,
+ &pi->ph,
+ pi->proto->events);
+ if (ret)
+ dev_warn(handle->dev,
+ "Protocol:%X - Events Registration Failed - err:%d\n",
+ pi->proto->id, ret);
+ }
+
+ devres_close_group(handle->dev, pi->gid);
+ dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
+
+ return pi;
+
+clean:
+ /* Take care to put the protocol module's owner before releasing all */
+ scmi_protocol_put(proto->id);
+ devres_release_group(handle->dev, gid);
+out:
+ return ERR_PTR(ret);
+}
+
+/**
+ * scmi_get_protocol_instance - Protocol initialization helper.
+ * @handle: A reference to the SCMI platform instance.
+ * @protocol_id: The protocol being requested.
+ *
+ * In case the required protocol has never been requested before for this
+ * instance, allocate and initialize all the needed structures while handling
+ * resource allocation with a dedicated per-protocol devres subgroup.
+ *
+ * Return: A reference to an initialized protocol instance or error on failure:
+ * in particular returns -EPROBE_DEFER when the desired protocol could
+ * NOT be found.
+ */
+static struct scmi_protocol_instance * __must_check
+scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
+{
+ struct scmi_protocol_instance *pi;
struct scmi_info *info = handle_to_scmi_info(handle);
+ mutex_lock(&info->protocols_mtx);
+ pi = idr_find(&info->protocols, protocol_id);
+
+ if (pi) {
+ refcount_inc(&pi->users);
+ } else {
+ const struct scmi_protocol *proto;
+
+ /* Fails if protocol not registered on bus */
+ proto = scmi_protocol_get(protocol_id);
+ if (proto)
+ pi = scmi_alloc_init_protocol_instance(info, proto);
+ else
+ pi = ERR_PTR(-EPROBE_DEFER);
+ }
+ mutex_unlock(&info->protocols_mtx);
+
+ return pi;
+}
+
+/**
+ * scmi_protocol_acquire - Protocol acquire
+ * @handle: A reference to the SCMI platform instance.
+ * @protocol_id: The protocol being requested.
+ *
+ * Register a new user for the requested protocol on the specified SCMI
+ * platform instance, possibly triggering its initialization on first user.
+ *
+ * Return: 0 if protocol was acquired successfully.
+ */
+int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
+{
+ return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
+}
+
+/**
+ * scmi_protocol_release - Protocol de-initialization helper.
+ * @handle: A reference to the SCMI platform instance.
+ * @protocol_id: The protocol being requested.
+ *
+ * Remove one user for the specified protocol and triggers de-initialization
+ * and resources de-allocation once the last user has gone.
+ */
+void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+ struct scmi_protocol_instance *pi;
+
+ mutex_lock(&info->protocols_mtx);
+ pi = idr_find(&info->protocols, protocol_id);
+ if (WARN_ON(!pi))
+ goto out;
+
+ if (refcount_dec_and_test(&pi->users)) {
+ void *gid = pi->gid;
+
+ if (pi->proto->events)
+ scmi_deregister_protocol_events(handle, protocol_id);
+
+ if (pi->proto->instance_deinit)
+ pi->proto->instance_deinit(&pi->ph);
+
+ idr_remove(&info->protocols, protocol_id);
+
+ scmi_protocol_put(protocol_id);
+
+ devres_release_group(handle->dev, gid);
+ dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
+ protocol_id);
+ }
+
+out:
+ mutex_unlock(&info->protocols_mtx);
+}
+
+void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
+ u8 *prot_imp)
+{
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
+
info->protocols_imp = prot_imp;
}
@@ -542,6 +858,102 @@ scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
return false;
}
+struct scmi_protocol_devres {
+ const struct scmi_handle *handle;
+ u8 protocol_id;
+};
+
+static void scmi_devm_release_protocol(struct device *dev, void *res)
+{
+ struct scmi_protocol_devres *dres = res;
+
+ scmi_protocol_release(dres->handle, dres->protocol_id);
+}
+
+/**
+ * scmi_devm_protocol_get - Devres managed get protocol operations and handle
+ * @sdev: A reference to an scmi_device whose embedded struct device is to
+ * be used for devres accounting.
+ * @protocol_id: The protocol being requested.
+ * @ph: A pointer reference used to pass back the associated protocol handle.
+ *
+ * Get hold of a protocol accounting for its usage, eventually triggering its
+ * initialization, and returning the protocol specific operations and related
+ * protocol handle which will be used as first argument in most of the
+ * protocols operations methods.
+ * Being a devres based managed method, protocol hold will be automatically
+ * released, and possibly de-initialized on last user, once the SCMI driver
+ * owning the scmi_device is unbound from it.
+ *
+ * Return: A reference to the requested protocol operations or error.
+ * Must be checked for errors by caller.
+ */
+static const void __must_check *
+scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
+ struct scmi_protocol_handle **ph)
+{
+ struct scmi_protocol_instance *pi;
+ struct scmi_protocol_devres *dres;
+ struct scmi_handle *handle = sdev->handle;
+
+ if (!ph)
+ return ERR_PTR(-EINVAL);
+
+ dres = devres_alloc(scmi_devm_release_protocol,
+ sizeof(*dres), GFP_KERNEL);
+ if (!dres)
+ return ERR_PTR(-ENOMEM);
+
+ pi = scmi_get_protocol_instance(handle, protocol_id);
+ if (IS_ERR(pi)) {
+ devres_free(dres);
+ return pi;
+ }
+
+ dres->handle = handle;
+ dres->protocol_id = protocol_id;
+ devres_add(&sdev->dev, dres);
+
+ *ph = &pi->ph;
+
+ return pi->proto->ops;
+}
+
+static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
+{
+ struct scmi_protocol_devres *dres = res;
+
+ if (WARN_ON(!dres || !data))
+ return 0;
+
+ return dres->protocol_id == *((u8 *)data);
+}
+
+/**
+ * scmi_devm_protocol_put - Devres managed put protocol operations and handle
+ * @sdev: A reference to an scmi_device whose embedded struct device is to
+ * be used for devres accounting.
+ * @protocol_id: The protocol being requested.
+ *
+ * Explicitly release a protocol hold previously obtained calling the above
+ * @scmi_devm_protocol_get.
+ */
+static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
+{
+ int ret;
+
+ ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
+ scmi_devm_protocol_match, &protocol_id);
+ WARN_ON(ret);
+}
+
+static inline
+struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
+{
+ info->users++;
+ return &info->handle;
+}
+
/**
* scmi_handle_get() - Get the SCMI handle for a device
*
@@ -563,8 +975,7 @@ struct scmi_handle *scmi_handle_get(struct device *dev)
list_for_each(p, &scmi_list) {
info = list_entry(p, struct scmi_info, node);
if (dev->parent == info->dev) {
- handle = &info->handle;
- info->users++;
+ handle = scmi_handle_get_from_info_unlocked(info);
break;
}
}
@@ -707,63 +1118,268 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
return ret;
}
-static inline void
-scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
- int prot_id, const char *name)
+/**
+ * scmi_get_protocol_device - Helper to get/create an SCMI device.
+ *
+ * @np: A device node representing a valid active protocols for the referred
+ * SCMI instance.
+ * @info: The referred SCMI instance for which we are getting/creating this
+ * device.
+ * @prot_id: The protocol ID.
+ * @name: The device name.
+ *
+ * Referring to the specific SCMI instance identified by @info, this helper
+ * takes care to return a properly initialized device matching the requested
+ * @proto_id and @name: if device was still not existent it is created as a
+ * child of the specified SCMI instance @info and its transport properly
+ * initialized as usual.
+ */
+static inline struct scmi_device *
+scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
+ int prot_id, const char *name)
{
struct scmi_device *sdev;
+ /* Already created for this parent SCMI instance ? */
+ sdev = scmi_child_dev_find(info->dev, prot_id, name);
+ if (sdev)
+ return sdev;
+
+ pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
+
sdev = scmi_device_create(np, info->dev, prot_id, name);
if (!sdev) {
dev_err(info->dev, "failed to create %d protocol device\n",
prot_id);
- return;
+ return NULL;
}
if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
dev_err(&sdev->dev, "failed to setup transport\n");
scmi_device_destroy(sdev);
- return;
+ return NULL;
}
+ return sdev;
+}
+
+static inline void
+scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
+ int prot_id, const char *name)
+{
+ struct scmi_device *sdev;
+
+ sdev = scmi_get_protocol_device(np, info, prot_id, name);
+ if (!sdev)
+ return;
+
/* setup handle now as the transport is ready */
scmi_set_handle(sdev);
}
-#define MAX_SCMI_DEV_PER_PROTOCOL 2
-struct scmi_prot_devnames {
- int protocol_id;
- char *names[MAX_SCMI_DEV_PER_PROTOCOL];
-};
+/**
+ * scmi_create_protocol_devices - Create devices for all pending requests for
+ * this SCMI instance.
+ *
+ * @np: The device node describing the protocol
+ * @info: The SCMI instance descriptor
+ * @prot_id: The protocol ID
+ *
+ * All devices previously requested for this instance (if any) are found and
+ * created by scanning the proper @&scmi_requested_devices entry.
+ */
+static void scmi_create_protocol_devices(struct device_node *np,
+ struct scmi_info *info, int prot_id)
+{
+ struct list_head *phead;
-static struct scmi_prot_devnames devnames[] = {
- { SCMI_PROTOCOL_POWER, { "genpd" },},
- { SCMI_PROTOCOL_SYSTEM, { "syspower" },},
- { SCMI_PROTOCOL_PERF, { "cpufreq" },},
- { SCMI_PROTOCOL_CLOCK, { "clocks" },},
- { SCMI_PROTOCOL_SENSOR, { "hwmon", "iiodev" },},
- { SCMI_PROTOCOL_RESET, { "reset" },},
- { SCMI_PROTOCOL_VOLTAGE, { "regulator" },},
-};
+ mutex_lock(&scmi_requested_devices_mtx);
+ phead = idr_find(&scmi_requested_devices, prot_id);
+ if (phead) {
+ struct scmi_requested_dev *rdev;
-static inline void
-scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info,
- int prot_id)
+ list_for_each_entry(rdev, phead, node)
+ scmi_create_protocol_device(np, info, prot_id,
+ rdev->id_table->name);
+ }
+ mutex_unlock(&scmi_requested_devices_mtx);
+}
+
+/**
+ * scmi_protocol_device_request - Helper to request a device
+ *
+ * @id_table: A protocol/name pair descriptor for the device to be created.
+ *
+ * This helper let an SCMI driver request specific devices identified by the
+ * @id_table to be created for each active SCMI instance.
+ *
+ * The requested device name MUST NOT be already existent for any protocol;
+ * at first the freshly requested @id_table is annotated in the IDR table
+ * @scmi_requested_devices, then a matching device is created for each already
+ * active SCMI instance. (if any)
+ *
+ * This way the requested device is created straight-away for all the already
+ * initialized(probed) SCMI instances (handles) and it remains also annotated
+ * as pending creation if the requesting SCMI driver was loaded before some
+ * SCMI instance and related transports were available: when such late instance
+ * is probed, its probe will take care to scan the list of pending requested
+ * devices and create those on its own (see @scmi_create_protocol_devices and
+ * its enclosing loop)
+ *
+ * Return: 0 on Success
+ */
+int scmi_protocol_device_request(const struct scmi_device_id *id_table)
{
- int loop, cnt;
+ int ret = 0;
+ unsigned int id = 0;
+ struct list_head *head, *phead = NULL;
+ struct scmi_requested_dev *rdev;
+ struct scmi_info *info;
- for (loop = 0; loop < ARRAY_SIZE(devnames); loop++) {
- if (devnames[loop].protocol_id != prot_id)
- continue;
+ pr_debug("Requesting SCMI device (%s) for protocol %x\n",
+ id_table->name, id_table->protocol_id);
- for (cnt = 0; cnt < ARRAY_SIZE(devnames[loop].names); cnt++) {
- const char *name = devnames[loop].names[cnt];
+ /*
+ * Search for the matching protocol rdev list and then search
+ * of any existent equally named device...fails if any duplicate found.
+ */
+ mutex_lock(&scmi_requested_devices_mtx);
+ idr_for_each_entry(&scmi_requested_devices, head, id) {
+ if (!phead) {
+ /* A list found registered in the IDR is never empty */
+ rdev = list_first_entry(head, struct scmi_requested_dev,
+ node);
+ if (rdev->id_table->protocol_id ==
+ id_table->protocol_id)
+ phead = head;
+ }
+ list_for_each_entry(rdev, head, node) {
+ if (!strcmp(rdev->id_table->name, id_table->name)) {
+ pr_err("Ignoring duplicate request [%d] %s\n",
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+ /*
+ * No duplicate found for requested id_table, so let's create a new
+ * requested device entry for this new valid request.
+ */
+ rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
+ if (!rdev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ rdev->id_table = id_table;
- if (name)
- scmi_create_protocol_device(np, info, prot_id,
- name);
+ /*
+ * Append the new requested device table descriptor to the head of the
+ * related protocol list, eventually creating such head if not already
+ * there.
+ */
+ if (!phead) {
+ phead = kzalloc(sizeof(*phead), GFP_KERNEL);
+ if (!phead) {
+ kfree(rdev);
+ ret = -ENOMEM;
+ goto out;
+ }
+ INIT_LIST_HEAD(phead);
+
+ ret = idr_alloc(&scmi_requested_devices, (void *)phead,
+ id_table->protocol_id,
+ id_table->protocol_id + 1, GFP_KERNEL);
+ if (ret != id_table->protocol_id) {
+ pr_err("Failed to save SCMI device - ret:%d\n", ret);
+ kfree(rdev);
+ kfree(phead);
+ ret = -EINVAL;
+ goto out;
}
+ ret = 0;
}
+ list_add(&rdev->node, phead);
+
+ /*
+ * Now effectively create and initialize the requested device for every
+ * already initialized SCMI instance which has registered the requested
+ * protocol as a valid active one: i.e. defined in DT and supported by
+ * current platform FW.
+ */
+ mutex_lock(&scmi_list_mutex);
+ list_for_each_entry(info, &scmi_list, node) {
+ struct device_node *child;
+
+ child = idr_find(&info->active_protocols,
+ id_table->protocol_id);
+ if (child) {
+ struct scmi_device *sdev;
+
+ sdev = scmi_get_protocol_device(child, info,
+ id_table->protocol_id,
+ id_table->name);
+ /* Set handle if not already set: device existed */
+ if (sdev && !sdev->handle)
+ sdev->handle =
+ scmi_handle_get_from_info_unlocked(info);
+ } else {
+ dev_err(info->dev,
+ "Failed. SCMI protocol %d not active.\n",
+ id_table->protocol_id);
+ }
+ }
+ mutex_unlock(&scmi_list_mutex);
+
+out:
+ mutex_unlock(&scmi_requested_devices_mtx);
+
+ return ret;
+}
+
+/**
+ * scmi_protocol_device_unrequest - Helper to unrequest a device
+ *
+ * @id_table: A protocol/name pair descriptor for the device to be unrequested.
+ *
+ * An helper to let an SCMI driver release its request about devices; note that
+ * devices are created and initialized once the first SCMI driver request them
+ * but they destroyed only on SCMI core unloading/unbinding.
+ *
+ * The current SCMI transport layer uses such devices as internal references and
+ * as such they could be shared as same transport between multiple drivers so
+ * that cannot be safely destroyed till the whole SCMI stack is removed.
+ * (unless adding further burden of refcounting.)
+ */
+void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
+{
+ struct list_head *phead;
+
+ pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
+ id_table->name, id_table->protocol_id);
+
+ mutex_lock(&scmi_requested_devices_mtx);
+ phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
+ if (phead) {
+ struct scmi_requested_dev *victim, *tmp;
+
+ list_for_each_entry_safe(victim, tmp, phead, node) {
+ if (!strcmp(victim->id_table->name, id_table->name)) {
+ list_del(&victim->node);
+ kfree(victim);
+ break;
+ }
+ }
+
+ if (list_empty(phead)) {
+ idr_remove(&scmi_requested_devices,
+ id_table->protocol_id);
+ kfree(phead);
+ }
+ }
+ mutex_unlock(&scmi_requested_devices_mtx);
}
static int scmi_probe(struct platform_device *pdev)
@@ -786,6 +1402,9 @@ static int scmi_probe(struct platform_device *pdev)
info->dev = dev;
info->desc = desc;
INIT_LIST_HEAD(&info->node);
+ idr_init(&info->protocols);
+ mutex_init(&info->protocols_mtx);
+ idr_init(&info->active_protocols);
platform_set_drvdata(pdev, info);
idr_init(&info->tx_idr);
@@ -794,6 +1413,8 @@ static int scmi_probe(struct platform_device *pdev)
handle = &info->handle;
handle->dev = info->dev;
handle->version = &info->version;
+ handle->devm_protocol_get = scmi_devm_protocol_get;
+ handle->devm_protocol_put = scmi_devm_protocol_put;
ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
if (ret)
@@ -806,9 +1427,14 @@ static int scmi_probe(struct platform_device *pdev)
if (scmi_notification_init(handle))
dev_err(dev, "SCMI Notifications NOT available.\n");
- ret = scmi_base_protocol_init(handle);
+ /*
+ * Trigger SCMI Base protocol initialization.
+ * It's mandatory and won't be ever released/deinit until the
+ * SCMI stack is shutdown/unloaded as a whole.
+ */
+ ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
if (ret) {
- dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
+ dev_err(dev, "unable to communicate with SCMI\n");
return ret;
}
@@ -831,6 +1457,19 @@ static int scmi_probe(struct platform_device *pdev)
continue;
}
+ /*
+ * Save this valid DT protocol descriptor amongst
+ * @active_protocols for this SCMI instance/
+ */
+ ret = idr_alloc(&info->active_protocols, child,
+ prot_id, prot_id + 1, GFP_KERNEL);
+ if (ret != prot_id) {
+ dev_err(dev, "SCMI protocol %d already activated. Skip\n",
+ prot_id);
+ continue;
+ }
+
+ of_node_get(child);
scmi_create_protocol_devices(child, info, prot_id);
}
@@ -844,9 +1483,10 @@ void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
static int scmi_remove(struct platform_device *pdev)
{
- int ret = 0;
+ int ret = 0, id;
struct scmi_info *info = platform_get_drvdata(pdev);
struct idr *idr = &info->tx_idr;
+ struct device_node *child;
mutex_lock(&scmi_list_mutex);
if (info->users)
@@ -860,6 +1500,14 @@ static int scmi_remove(struct platform_device *pdev)
scmi_notification_exit(&info->handle);
+ mutex_lock(&info->protocols_mtx);
+ idr_destroy(&info->protocols);
+ mutex_unlock(&info->protocols_mtx);
+
+ idr_for_each_entry(&info->active_protocols, child, id)
+ of_node_put(child);
+ idr_destroy(&info->active_protocols);
+
/* Safe to free channels since no more users */
ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
idr_destroy(&info->tx_idr);
@@ -942,6 +1590,8 @@ static int __init scmi_driver_init(void)
{
scmi_bus_init();
+ scmi_base_register();
+
scmi_clock_register();
scmi_perf_register();
scmi_power_register();
@@ -956,7 +1606,7 @@ subsys_initcall(scmi_driver_init);
static void __exit scmi_driver_exit(void)
{
- scmi_bus_exit();
+ scmi_base_unregister();
scmi_clock_unregister();
scmi_perf_unregister();
@@ -966,6 +1616,8 @@ static void __exit scmi_driver_exit(void)
scmi_voltage_unregister();
scmi_system_unregister();
+ scmi_bus_exit();
+
platform_driver_unregister(&scmi_driver);
}
module_exit(scmi_driver_exit);
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
index 66196b293b6c..d860bebd984a 100644
--- a/drivers/firmware/arm_scmi/notify.c
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -2,7 +2,7 @@
/*
* System Control and Management Interface (SCMI) Notification support
*
- * Copyright (C) 2020 ARM Ltd.
+ * Copyright (C) 2020-2021 ARM Ltd.
*/
/**
* DOC: Theory of operation
@@ -91,6 +91,7 @@
#include <linux/types.h>
#include <linux/workqueue.h>
+#include "common.h"
#include "notify.h"
#define SCMI_MAX_PROTO 256
@@ -177,7 +178,7 @@
#define REVT_NOTIFY_SET_STATUS(revt, eid, sid, state) \
({ \
typeof(revt) r = revt; \
- r->proto->ops->set_notify_enabled(r->proto->ni->handle, \
+ r->proto->ops->set_notify_enabled(r->proto->ph, \
(eid), (sid), (state)); \
})
@@ -190,7 +191,7 @@
#define REVT_FILL_REPORT(revt, ...) \
({ \
typeof(revt) r = revt; \
- r->proto->ops->fill_custom_report(r->proto->ni->handle, \
+ r->proto->ops->fill_custom_report(r->proto->ph, \
__VA_ARGS__); \
})
@@ -278,6 +279,7 @@ struct scmi_registered_event;
* events' descriptors, whose fixed-size is determined at
* compile time.
* @registered_mtx: A mutex to protect @registered_events_handlers
+ * @ph: SCMI protocol handle reference
* @registered_events_handlers: An hashtable containing all events' handlers
* descriptors registered for this protocol
*
@@ -302,6 +304,7 @@ struct scmi_registered_events_desc {
struct scmi_registered_event **registered_events;
/* mutex to protect registered_events_handlers */
struct mutex registered_mtx;
+ const struct scmi_protocol_handle *ph;
DECLARE_HASHTABLE(registered_events_handlers, SCMI_REGISTERED_HASH_SZ);
};
@@ -368,7 +371,7 @@ static struct scmi_event_handler *
scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key);
static void scmi_put_active_handler(struct scmi_notify_instance *ni,
struct scmi_event_handler *hndl);
-static void scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
+static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
struct scmi_event_handler *hndl);
/**
@@ -579,11 +582,9 @@ int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,
struct scmi_event_header eh;
struct scmi_notify_instance *ni;
- /* Ensure notify_priv is updated */
- smp_rmb();
- if (!handle->notify_priv)
+ ni = scmi_notification_instance_data_get(handle);
+ if (!ni)
return 0;
- ni = handle->notify_priv;
r_evt = SCMI_GET_REVT(ni, proto_id, evt_id);
if (!r_evt)
@@ -732,14 +733,10 @@ scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni,
/**
* scmi_register_protocol_events() - Register Protocol Events with the core
* @handle: The handle identifying the platform instance against which the
- * the protocol's events are registered
+ * protocol's events are registered
* @proto_id: Protocol ID
- * @queue_sz: Size in bytes of the associated queue to be allocated
- * @ops: Protocol specific event-related operations
- * @evt: Event descriptor array
- * @num_events: Number of events in @evt array
- * @num_sources: Number of possible sources for this protocol on this
- * platform.
+ * @ph: SCMI protocol handle.
+ * @ee: A structure describing the events supported by this protocol.
*
* Used by SCMI Protocols initialization code to register with the notification
* core the list of supported events and their descriptors: takes care to
@@ -748,60 +745,69 @@ scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni,
*
* Return: 0 on Success
*/
-int scmi_register_protocol_events(const struct scmi_handle *handle,
- u8 proto_id, size_t queue_sz,
- const struct scmi_event_ops *ops,
- const struct scmi_event *evt, int num_events,
- int num_sources)
+int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
+ const struct scmi_protocol_handle *ph,
+ const struct scmi_protocol_events *ee)
{
int i;
+ unsigned int num_sources;
size_t payld_sz = 0;
struct scmi_registered_events_desc *pd;
struct scmi_notify_instance *ni;
+ const struct scmi_event *evt;
- if (!ops || !evt)
+ if (!ee || !ee->ops || !ee->evts || !ph ||
+ (!ee->num_sources && !ee->ops->get_num_sources))
return -EINVAL;
- /* Ensure notify_priv is updated */
- smp_rmb();
- if (!handle->notify_priv)
+ ni = scmi_notification_instance_data_get(handle);
+ if (!ni)
return -ENOMEM;
- ni = handle->notify_priv;
- /* Attach to the notification main devres group */
- if (!devres_open_group(ni->handle->dev, ni->gid, GFP_KERNEL))
- return -ENOMEM;
+ /* num_sources cannot be <= 0 */
+ if (ee->num_sources) {
+ num_sources = ee->num_sources;
+ } else {
+ int nsrc = ee->ops->get_num_sources(ph);
- for (i = 0; i < num_events; i++)
+ if (nsrc <= 0)
+ return -EINVAL;
+ num_sources = nsrc;
+ }
+
+ evt = ee->evts;
+ for (i = 0; i < ee->num_events; i++)
payld_sz = max_t(size_t, payld_sz, evt[i].max_payld_sz);
payld_sz += sizeof(struct scmi_event_header);
- pd = scmi_allocate_registered_events_desc(ni, proto_id, queue_sz,
- payld_sz, num_events, ops);
+ pd = scmi_allocate_registered_events_desc(ni, proto_id, ee->queue_sz,
+ payld_sz, ee->num_events,
+ ee->ops);
if (IS_ERR(pd))
- goto err;
+ return PTR_ERR(pd);
- for (i = 0; i < num_events; i++, evt++) {
+ pd->ph = ph;
+ for (i = 0; i < ee->num_events; i++, evt++) {
struct scmi_registered_event *r_evt;
r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt),
GFP_KERNEL);
if (!r_evt)
- goto err;
+ return -ENOMEM;
r_evt->proto = pd;
r_evt->evt = evt;
r_evt->sources = devm_kcalloc(ni->handle->dev, num_sources,
sizeof(refcount_t), GFP_KERNEL);
if (!r_evt->sources)
- goto err;
+ return -ENOMEM;
r_evt->num_sources = num_sources;
mutex_init(&r_evt->sources_mtx);
r_evt->report = devm_kzalloc(ni->handle->dev,
evt->max_report_sz, GFP_KERNEL);
if (!r_evt->report)
- goto err;
+ return -ENOMEM;
pd->registered_events[i] = r_evt;
/* Ensure events are updated */
@@ -815,8 +821,6 @@ int scmi_register_protocol_events(const struct scmi_handle *handle,
/* Ensure protocols are updated */
smp_wmb();
- devres_close_group(ni->handle->dev, ni->gid);
-
/*
* Finalize any pending events' handler which could have been waiting
* for this protocol's events registration.
@@ -824,13 +828,33 @@ int scmi_register_protocol_events(const struct scmi_handle *handle,
schedule_work(&ni->init_work);
return 0;
+}
-err:
- dev_warn(handle->dev, "Proto:%X - Registration Failed !\n", proto_id);
- /* A failing protocol registration does not trigger full failure */
- devres_close_group(ni->handle->dev, ni->gid);
+/**
+ * scmi_deregister_protocol_events - Deregister protocol events with the core
+ * @handle: The handle identifying the platform instance against which the
+ * protocol's events are registered
+ * @proto_id: Protocol ID
+ */
+void scmi_deregister_protocol_events(const struct scmi_handle *handle,
+ u8 proto_id)
+{
+ struct scmi_notify_instance *ni;
+ struct scmi_registered_events_desc *pd;
- return -ENOMEM;
+ ni = scmi_notification_instance_data_get(handle);
+ if (!ni)
+ return;
+
+ pd = ni->registered_protocols[proto_id];
+ if (!pd)
+ return;
+
+ ni->registered_protocols[proto_id] = NULL;
+ /* Ensure protocols are updated */
+ smp_wmb();
+
+ cancel_work_sync(&pd->equeue.notify_work);
}
/**
@@ -900,9 +924,21 @@ static inline int scmi_bind_event_handler(struct scmi_notify_instance *ni,
if (!r_evt)
return -EINVAL;
- /* Remove from pending and insert into registered */
+ /*
+ * Remove from pending and insert into registered while getting hold
+ * of protocol instance.
+ */
hash_del(&hndl->hash);
+ /*
+ * Acquire protocols only for NON pending handlers, so as NOT to trigger
+ * protocol initialization when a notifier is registered against a still
+ * not registered protocol, since it would make little sense to force init
+ * protocols for which still no SCMI driver user exists: they wouldn't
+ * emit any event anyway till some SCMI driver starts using it.
+ */
+ scmi_protocol_acquire(ni->handle, KEY_XTRACT_PROTO_ID(hndl->key));
hndl->r_evt = r_evt;
+
mutex_lock(&r_evt->proto->registered_mtx);
hash_add(r_evt->proto->registered_events_handlers,
&hndl->hash, hndl->key);
@@ -1193,41 +1229,65 @@ static int scmi_disable_events(struct scmi_event_handler *hndl)
* * unregister and free the handler itself
*
* Context: Assumes all the proper locking has been managed by the caller.
+ *
+ * Return: True if handler was freed (users dropped to zero)
*/
-static void scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
+static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
struct scmi_event_handler *hndl)
{
+ bool freed = false;
+
if (refcount_dec_and_test(&hndl->users)) {
if (!IS_HNDL_PENDING(hndl))
scmi_disable_events(hndl);
scmi_free_event_handler(hndl);
+ freed = true;
}
+
+ return freed;
}
static void scmi_put_handler(struct scmi_notify_instance *ni,
struct scmi_event_handler *hndl)
{
+ bool freed;
+ u8 protocol_id;
struct scmi_registered_event *r_evt = hndl->r_evt;
mutex_lock(&ni->pending_mtx);
- if (r_evt)
+ if (r_evt) {
+ protocol_id = r_evt->proto->id;
mutex_lock(&r_evt->proto->registered_mtx);
+ }
- scmi_put_handler_unlocked(ni, hndl);
+ freed = scmi_put_handler_unlocked(ni, hndl);
- if (r_evt)
+ if (r_evt) {
mutex_unlock(&r_evt->proto->registered_mtx);
+ /*
+ * Only registered handler acquired protocol; must be here
+ * released only AFTER unlocking registered_mtx, since
+ * releasing a protocol can trigger its de-initialization
+ * (ie. including r_evt and registered_mtx)
+ */
+ if (freed)
+ scmi_protocol_release(ni->handle, protocol_id);
+ }
mutex_unlock(&ni->pending_mtx);
}
static void scmi_put_active_handler(struct scmi_notify_instance *ni,
struct scmi_event_handler *hndl)
{
+ bool freed;
struct scmi_registered_event *r_evt = hndl->r_evt;
+ u8 protocol_id = r_evt->proto->id;
mutex_lock(&r_evt->proto->registered_mtx);
- scmi_put_handler_unlocked(ni, hndl);
+ freed = scmi_put_handler_unlocked(ni, hndl);
mutex_unlock(&r_evt->proto->registered_mtx);
+ if (freed)
+ scmi_protocol_release(ni->handle, protocol_id);
}
/**
@@ -1247,7 +1307,7 @@ static int scmi_event_handler_enable_events(struct scmi_event_handler *hndl)
}
/**
- * scmi_register_notifier() - Register a notifier_block for an event
+ * scmi_notifier_register() - Register a notifier_block for an event
* @handle: The handle identifying the platform instance against which the
* callback is registered
* @proto_id: Protocol ID
@@ -1279,8 +1339,8 @@ static int scmi_event_handler_enable_events(struct scmi_event_handler *hndl)
*
* Return: 0 on Success
*/
-static int scmi_register_notifier(const struct scmi_handle *handle,
- u8 proto_id, u8 evt_id, u32 *src_id,
+static int scmi_notifier_register(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id, const u32 *src_id,
struct notifier_block *nb)
{
int ret = 0;
@@ -1288,11 +1348,9 @@ static int scmi_register_notifier(const struct scmi_handle *handle,
struct scmi_event_handler *hndl;
struct scmi_notify_instance *ni;
- /* Ensure notify_priv is updated */
- smp_rmb();
- if (!handle->notify_priv)
+ ni = scmi_notification_instance_data_get(handle);
+ if (!ni)
return -ENODEV;
- ni = handle->notify_priv;
evt_key = MAKE_HASH_KEY(proto_id, evt_id,
src_id ? *src_id : SRC_ID_MASK);
@@ -1313,7 +1371,7 @@ static int scmi_register_notifier(const struct scmi_handle *handle,
}
/**
- * scmi_unregister_notifier() - Unregister a notifier_block for an event
+ * scmi_notifier_unregister() - Unregister a notifier_block for an event
* @handle: The handle identifying the platform instance against which the
* callback is unregistered
* @proto_id: Protocol ID
@@ -1328,19 +1386,17 @@ static int scmi_register_notifier(const struct scmi_handle *handle,
*
* Return: 0 on Success
*/
-static int scmi_unregister_notifier(const struct scmi_handle *handle,
- u8 proto_id, u8 evt_id, u32 *src_id,
+static int scmi_notifier_unregister(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id, const u32 *src_id,
struct notifier_block *nb)
{
u32 evt_key;
struct scmi_event_handler *hndl;
struct scmi_notify_instance *ni;
- /* Ensure notify_priv is updated */
- smp_rmb();
- if (!handle->notify_priv)
+ ni = scmi_notification_instance_data_get(handle);
+ if (!ni)
return -ENODEV;
- ni = handle->notify_priv;
evt_key = MAKE_HASH_KEY(proto_id, evt_id,
src_id ? *src_id : SRC_ID_MASK);
@@ -1356,7 +1412,7 @@ static int scmi_unregister_notifier(const struct scmi_handle *handle,
scmi_put_handler(ni, hndl);
/*
- * This balances the initial get issued in @scmi_register_notifier.
+ * This balances the initial get issued in @scmi_notifier_register.
* If this notifier_block happened to be the last known user callback
* for this event, the handler is here freed and the event's generation
* stopped.
@@ -1371,6 +1427,129 @@ static int scmi_unregister_notifier(const struct scmi_handle *handle,
return 0;
}
+struct scmi_notifier_devres {
+ const struct scmi_handle *handle;
+ u8 proto_id;
+ u8 evt_id;
+ u32 __src_id;
+ u32 *src_id;
+ struct notifier_block *nb;
+};
+
+static void scmi_devm_release_notifier(struct device *dev, void *res)
+{
+ struct scmi_notifier_devres *dres = res;
+
+ scmi_notifier_unregister(dres->handle, dres->proto_id, dres->evt_id,
+ dres->src_id, dres->nb);
+}
+
+/**
+ * scmi_devm_notifier_register() - Managed registration of a notifier_block
+ * for an event
+ * @sdev: A reference to an scmi_device whose embedded struct device is to
+ * be used for devres accounting.
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID
+ * @src_id: Source ID, when NULL register for events coming form ALL possible
+ * sources
+ * @nb: A standard notifier block to register for the specified event
+ *
+ * Generic devres managed helper to register a notifier_block against a
+ * protocol event.
+ */
+static int scmi_devm_notifier_register(struct scmi_device *sdev,
+ u8 proto_id, u8 evt_id,
+ const u32 *src_id,
+ struct notifier_block *nb)
+{
+ int ret;
+ struct scmi_notifier_devres *dres;
+
+ dres = devres_alloc(scmi_devm_release_notifier,
+ sizeof(*dres), GFP_KERNEL);
+ if (!dres)
+ return -ENOMEM;
+
+ ret = scmi_notifier_register(sdev->handle, proto_id,
+ evt_id, src_id, nb);
+ if (ret) {
+ devres_free(dres);
+ return ret;
+ }
+
+ dres->handle = sdev->handle;
+ dres->proto_id = proto_id;
+ dres->evt_id = evt_id;
+ dres->nb = nb;
+ if (src_id) {
+ dres->__src_id = *src_id;
+ dres->src_id = &dres->__src_id;
+ } else {
+ dres->src_id = NULL;
+ }
+ devres_add(&sdev->dev, dres);
+
+ return ret;
+}
+
+static int scmi_devm_notifier_match(struct device *dev, void *res, void *data)
+{
+ struct scmi_notifier_devres *dres = res;
+ struct scmi_notifier_devres *xres = data;
+
+ if (WARN_ON(!dres || !xres))
+ return 0;
+
+ return dres->proto_id == xres->proto_id &&
+ dres->evt_id == xres->evt_id &&
+ dres->nb == xres->nb &&
+ ((!dres->src_id && !xres->src_id) ||
+ (dres->src_id && xres->src_id &&
+ dres->__src_id == xres->__src_id));
+}
+
+/**
+ * scmi_devm_notifier_unregister() - Managed un-registration of a
+ * notifier_block for an event
+ * @sdev: A reference to an scmi_device whose embedded struct device is to
+ * be used for devres accounting.
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID
+ * @src_id: Source ID, when NULL register for events coming form ALL possible
+ * sources
+ * @nb: A standard notifier block to register for the specified event
+ *
+ * Generic devres managed helper to explicitly un-register a notifier_block
+ * against a protocol event, which was previously registered using the above
+ * @scmi_devm_notifier_register.
+ */
+static int scmi_devm_notifier_unregister(struct scmi_device *sdev,
+ u8 proto_id, u8 evt_id,
+ const u32 *src_id,
+ struct notifier_block *nb)
+{
+ int ret;
+ struct scmi_notifier_devres dres;
+
+ dres.handle = sdev->handle;
+ dres.proto_id = proto_id;
+ dres.evt_id = evt_id;
+ if (src_id) {
+ dres.__src_id = *src_id;
+ dres.src_id = &dres.__src_id;
+ } else {
+ dres.src_id = NULL;
+ }
+
+ ret = devres_release(&sdev->dev, scmi_devm_release_notifier,
+ scmi_devm_notifier_match, &dres);
+
+ WARN_ON(ret);
+
+ return ret;
+}
+
/**
* scmi_protocols_late_init() - Worker for late initialization
* @work: The work item to use associated to the proper SCMI instance
@@ -1428,8 +1607,10 @@ static void scmi_protocols_late_init(struct work_struct *work)
* directly from an scmi_driver to register its own notifiers.
*/
static const struct scmi_notify_ops notify_ops = {
- .register_event_notifier = scmi_register_notifier,
- .unregister_event_notifier = scmi_unregister_notifier,
+ .devm_event_notifier_register = scmi_devm_notifier_register,
+ .devm_event_notifier_unregister = scmi_devm_notifier_unregister,
+ .event_notifier_register = scmi_notifier_register,
+ .event_notifier_unregister = scmi_notifier_unregister,
};
/**
@@ -1490,8 +1671,8 @@ int scmi_notification_init(struct scmi_handle *handle)
INIT_WORK(&ni->init_work, scmi_protocols_late_init);
+ scmi_notification_instance_data_set(handle, ni);
handle->notify_ops = &notify_ops;
- handle->notify_priv = ni;
/* Ensure handle is up to date */
smp_wmb();
@@ -1503,7 +1684,7 @@ int scmi_notification_init(struct scmi_handle *handle)
err:
dev_warn(handle->dev, "Initialization Failed.\n");
- devres_release_group(handle->dev, NULL);
+ devres_release_group(handle->dev, gid);
return -ENOMEM;
}
@@ -1515,15 +1696,10 @@ void scmi_notification_exit(struct scmi_handle *handle)
{
struct scmi_notify_instance *ni;
- /* Ensure notify_priv is updated */
- smp_rmb();
- if (!handle->notify_priv)
+ ni = scmi_notification_instance_data_get(handle);
+ if (!ni)
return;
- ni = handle->notify_priv;
-
- handle->notify_priv = NULL;
- /* Ensure handle is up to date */
- smp_wmb();
+ scmi_notification_instance_data_set(handle, NULL);
/* Destroy while letting pending work complete */
destroy_workqueue(ni->notify_wq);
diff --git a/drivers/firmware/arm_scmi/notify.h b/drivers/firmware/arm_scmi/notify.h
index 3485f20fa70e..ce0324be6c71 100644
--- a/drivers/firmware/arm_scmi/notify.h
+++ b/drivers/firmware/arm_scmi/notify.h
@@ -4,7 +4,7 @@
* notification header file containing some definitions, structures
* and function prototypes related to SCMI Notification handling.
*
- * Copyright (C) 2020 ARM Ltd.
+ * Copyright (C) 2020-2021 ARM Ltd.
*/
#ifndef _SCMI_NOTIFY_H
#define _SCMI_NOTIFY_H
@@ -31,8 +31,12 @@ struct scmi_event {
size_t max_report_sz;
};
+struct scmi_protocol_handle;
+
/**
* struct scmi_event_ops - Protocol helpers called by the notification core.
+ * @get_num_sources: Returns the number of possible events' sources for this
+ * protocol
* @set_notify_enabled: Enable/disable the required evt_id/src_id notifications
* using the proper custom protocol commands.
* Return 0 on Success
@@ -46,22 +50,42 @@ struct scmi_event {
* process context.
*/
struct scmi_event_ops {
- int (*set_notify_enabled)(const struct scmi_handle *handle,
+ int (*get_num_sources)(const struct scmi_protocol_handle *ph);
+ int (*set_notify_enabled)(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enabled);
- void *(*fill_custom_report)(const struct scmi_handle *handle,
+ void *(*fill_custom_report)(const struct scmi_protocol_handle *ph,
u8 evt_id, ktime_t timestamp,
const void *payld, size_t payld_sz,
void *report, u32 *src_id);
};
+/**
+ * struct scmi_protocol_events - Per-protocol description of available events
+ * @queue_sz: Size in bytes of the per-protocol queue to use.
+ * @ops: Array of protocol-specific events operations.
+ * @evts: Array of supported protocol's events.
+ * @num_events: Number of supported protocol's events described in @evts.
+ * @num_sources: Number of protocol's sources, should be greater than 0; if not
+ * available at compile time, it will be provided at run-time via
+ * @get_num_sources.
+ */
+struct scmi_protocol_events {
+ size_t queue_sz;
+ const struct scmi_event_ops *ops;
+ const struct scmi_event *evts;
+ unsigned int num_events;
+ unsigned int num_sources;
+};
+
int scmi_notification_init(struct scmi_handle *handle);
void scmi_notification_exit(struct scmi_handle *handle);
-int scmi_register_protocol_events(const struct scmi_handle *handle,
- u8 proto_id, size_t queue_sz,
- const struct scmi_event_ops *ops,
- const struct scmi_event *evt, int num_events,
- int num_sources);
+struct scmi_protocol_handle;
+int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
+ const struct scmi_protocol_handle *ph,
+ const struct scmi_protocol_events *ee);
+void scmi_deregister_protocol_events(const struct scmi_handle *handle,
+ u8 proto_id);
int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,
const void *buf, size_t len, ktime_t ts);
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index e374b1125fca..f4cd5193b961 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -2,7 +2,7 @@
/*
* System Control and Management Interface (SCMI) Performance Protocol
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
@@ -11,6 +11,7 @@
#include <linux/of.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/scmi_protocol.h>
@@ -175,21 +176,21 @@ static enum scmi_performance_protocol_cmd evt_2_cmd[] = {
PERF_NOTIFY_LEVEL,
};
-static int scmi_perf_attributes_get(const struct scmi_handle *handle,
+static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
struct scmi_perf_info *pi)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_perf_attributes *attr;
- ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
- SCMI_PROTOCOL_PERF, 0, sizeof(*attr), &t);
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(*attr), &t);
if (ret)
return ret;
attr = t->rx.buf;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
u16 flags = le16_to_cpu(attr->flags);
@@ -200,28 +201,27 @@ static int scmi_perf_attributes_get(const struct scmi_handle *handle,
pi->stats_size = le32_to_cpu(attr->stats_size);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
static int
-scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
- struct perf_dom_info *dom_info)
+scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
+ u32 domain, struct perf_dom_info *dom_info)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_perf_domain_attributes *attr;
- ret = scmi_xfer_get_init(handle, PERF_DOMAIN_ATTRIBUTES,
- SCMI_PROTOCOL_PERF, sizeof(domain),
- sizeof(*attr), &t);
+ ret = ph->xops->xfer_get_init(ph, PERF_DOMAIN_ATTRIBUTES,
+ sizeof(domain), sizeof(*attr), &t);
if (ret)
return ret;
put_unaligned_le32(domain, t->tx.buf);
attr = t->rx.buf;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
u32 flags = le32_to_cpu(attr->flags);
@@ -245,7 +245,7 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -257,7 +257,7 @@ static int opp_cmp_func(const void *opp1, const void *opp2)
}
static int
-scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
+scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
struct perf_dom_info *perf_dom)
{
int ret, cnt;
@@ -268,8 +268,8 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
struct scmi_msg_perf_describe_levels *dom_info;
struct scmi_msg_resp_perf_describe_levels *level_info;
- ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_LEVELS,
- SCMI_PROTOCOL_PERF, sizeof(*dom_info), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_LEVELS,
+ sizeof(*dom_info), 0, &t);
if (ret)
return ret;
@@ -281,14 +281,14 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
/* Set the number of OPPs to be skipped/already read */
dom_info->level_index = cpu_to_le32(tot_opp_cnt);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (ret)
break;
num_returned = le16_to_cpu(level_info->num_returned);
num_remaining = le16_to_cpu(level_info->num_remaining);
if (tot_opp_cnt + num_returned > MAX_OPPS) {
- dev_err(handle->dev, "No. of OPPs exceeded MAX_OPPS");
+ dev_err(ph->dev, "No. of OPPs exceeded MAX_OPPS");
break;
}
@@ -299,13 +299,13 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
opp->trans_latency_us = le16_to_cpu
(level_info->opp[cnt].transition_latency_us);
- dev_dbg(handle->dev, "Level %d Power %d Latency %dus\n",
+ dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n",
opp->perf, opp->power, opp->trans_latency_us);
}
tot_opp_cnt += num_returned;
- scmi_reset_rx_to_maxsz(handle, t);
+ ph->xops->reset_rx_to_maxsz(ph, t);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
@@ -313,7 +313,7 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
} while (num_returned && num_remaining);
perf_dom->opp_count = tot_opp_cnt;
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
return ret;
@@ -353,15 +353,15 @@ static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db)
#endif
}
-static int scmi_perf_mb_limits_set(const struct scmi_handle *handle, u32 domain,
- u32 max_perf, u32 min_perf)
+static int scmi_perf_mb_limits_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 max_perf, u32 min_perf)
{
int ret;
struct scmi_xfer *t;
struct scmi_perf_set_limits *limits;
- ret = scmi_xfer_get_init(handle, PERF_LIMITS_SET, SCMI_PROTOCOL_PERF,
- sizeof(*limits), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_SET,
+ sizeof(*limits), 0, &t);
if (ret)
return ret;
@@ -370,16 +370,16 @@ static int scmi_perf_mb_limits_set(const struct scmi_handle *handle, u32 domain,
limits->max_level = cpu_to_le32(max_perf);
limits->min_level = cpu_to_le32(min_perf);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
- u32 max_perf, u32 min_perf)
+static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 max_perf, u32 min_perf)
{
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
if (dom->fc_info && dom->fc_info->limit_set_addr) {
@@ -389,24 +389,24 @@ static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
return 0;
}
- return scmi_perf_mb_limits_set(handle, domain, max_perf, min_perf);
+ return scmi_perf_mb_limits_set(ph, domain, max_perf, min_perf);
}
-static int scmi_perf_mb_limits_get(const struct scmi_handle *handle, u32 domain,
- u32 *max_perf, u32 *min_perf)
+static int scmi_perf_mb_limits_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *max_perf, u32 *min_perf)
{
int ret;
struct scmi_xfer *t;
struct scmi_perf_get_limits *limits;
- ret = scmi_xfer_get_init(handle, PERF_LIMITS_GET, SCMI_PROTOCOL_PERF,
- sizeof(__le32), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_GET,
+ sizeof(__le32), 0, &t);
if (ret)
return ret;
put_unaligned_le32(domain, t->tx.buf);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
limits = t->rx.buf;
@@ -414,14 +414,14 @@ static int scmi_perf_mb_limits_get(const struct scmi_handle *handle, u32 domain,
*min_perf = le32_to_cpu(limits->min_level);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
- u32 *max_perf, u32 *min_perf)
+static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *max_perf, u32 *min_perf)
{
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
if (dom->fc_info && dom->fc_info->limit_get_addr) {
@@ -430,18 +430,17 @@ static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
return 0;
}
- return scmi_perf_mb_limits_get(handle, domain, max_perf, min_perf);
+ return scmi_perf_mb_limits_get(ph, domain, max_perf, min_perf);
}
-static int scmi_perf_mb_level_set(const struct scmi_handle *handle, u32 domain,
- u32 level, bool poll)
+static int scmi_perf_mb_level_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 level, bool poll)
{
int ret;
struct scmi_xfer *t;
struct scmi_perf_set_level *lvl;
- ret = scmi_xfer_get_init(handle, PERF_LEVEL_SET, SCMI_PROTOCOL_PERF,
- sizeof(*lvl), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_SET, sizeof(*lvl), 0, &t);
if (ret)
return ret;
@@ -450,16 +449,16 @@ static int scmi_perf_mb_level_set(const struct scmi_handle *handle, u32 domain,
lvl->domain = cpu_to_le32(domain);
lvl->level = cpu_to_le32(level);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
- u32 level, bool poll)
+static int scmi_perf_level_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 level, bool poll)
{
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
if (dom->fc_info && dom->fc_info->level_set_addr) {
@@ -468,35 +467,35 @@ static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
return 0;
}
- return scmi_perf_mb_level_set(handle, domain, level, poll);
+ return scmi_perf_mb_level_set(ph, domain, level, poll);
}
-static int scmi_perf_mb_level_get(const struct scmi_handle *handle, u32 domain,
- u32 *level, bool poll)
+static int scmi_perf_mb_level_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *level, bool poll)
{
int ret;
struct scmi_xfer *t;
- ret = scmi_xfer_get_init(handle, PERF_LEVEL_GET, SCMI_PROTOCOL_PERF,
- sizeof(u32), sizeof(u32), &t);
+ ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_GET,
+ sizeof(u32), sizeof(u32), &t);
if (ret)
return ret;
t->hdr.poll_completion = poll;
put_unaligned_le32(domain, t->tx.buf);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret)
*level = get_unaligned_le32(t->rx.buf);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
- u32 *level, bool poll)
+static int scmi_perf_level_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *level, bool poll)
{
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
if (dom->fc_info && dom->fc_info->level_get_addr) {
@@ -504,10 +503,10 @@ static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
return 0;
}
- return scmi_perf_mb_level_get(handle, domain, level, poll);
+ return scmi_perf_mb_level_get(ph, domain, level, poll);
}
-static int scmi_perf_level_limits_notify(const struct scmi_handle *handle,
+static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph,
u32 domain, int message_id,
bool enable)
{
@@ -515,8 +514,7 @@ static int scmi_perf_level_limits_notify(const struct scmi_handle *handle,
struct scmi_xfer *t;
struct scmi_perf_notify_level_or_limits *notify;
- ret = scmi_xfer_get_init(handle, message_id, SCMI_PROTOCOL_PERF,
- sizeof(*notify), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
if (ret)
return ret;
@@ -524,9 +522,9 @@ static int scmi_perf_level_limits_notify(const struct scmi_handle *handle,
notify->domain = cpu_to_le32(domain);
notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -540,7 +538,7 @@ static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size)
}
static void
-scmi_perf_domain_desc_fc(const struct scmi_handle *handle, u32 domain,
+scmi_perf_domain_desc_fc(const struct scmi_protocol_handle *ph, u32 domain,
u32 message_id, void __iomem **p_addr,
struct scmi_fc_db_info **p_db)
{
@@ -557,9 +555,8 @@ scmi_perf_domain_desc_fc(const struct scmi_handle *handle, u32 domain,
if (!p_addr)
return;
- ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_FASTCHANNEL,
- SCMI_PROTOCOL_PERF,
- sizeof(*info), sizeof(*resp), &t);
+ ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ sizeof(*info), sizeof(*resp), &t);
if (ret)
return;
@@ -567,7 +564,7 @@ scmi_perf_domain_desc_fc(const struct scmi_handle *handle, u32 domain,
info->domain = cpu_to_le32(domain);
info->message_id = cpu_to_le32(message_id);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (ret)
goto err_xfer;
@@ -579,20 +576,20 @@ scmi_perf_domain_desc_fc(const struct scmi_handle *handle, u32 domain,
phys_addr = le32_to_cpu(resp->chan_addr_low);
phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
- addr = devm_ioremap(handle->dev, phys_addr, size);
+ addr = devm_ioremap(ph->dev, phys_addr, size);
if (!addr)
goto err_xfer;
*p_addr = addr;
if (p_db && SUPPORTS_DOORBELL(flags)) {
- db = devm_kzalloc(handle->dev, sizeof(*db), GFP_KERNEL);
+ db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
if (!db)
goto err_xfer;
size = 1 << DOORBELL_REG_WIDTH(flags);
phys_addr = le32_to_cpu(resp->db_addr_low);
phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
- addr = devm_ioremap(handle->dev, phys_addr, size);
+ addr = devm_ioremap(ph->dev, phys_addr, size);
if (!addr)
goto err_xfer;
@@ -605,25 +602,25 @@ scmi_perf_domain_desc_fc(const struct scmi_handle *handle, u32 domain,
*p_db = db;
}
err_xfer:
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
}
-static void scmi_perf_domain_init_fc(const struct scmi_handle *handle,
+static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph,
u32 domain, struct scmi_fc_info **p_fc)
{
struct scmi_fc_info *fc;
- fc = devm_kzalloc(handle->dev, sizeof(*fc), GFP_KERNEL);
+ fc = devm_kzalloc(ph->dev, sizeof(*fc), GFP_KERNEL);
if (!fc)
return;
- scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_SET,
+ scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_SET,
&fc->level_set_addr, &fc->level_set_db);
- scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_GET,
+ scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_GET,
&fc->level_get_addr, NULL);
- scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_SET,
+ scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_SET,
&fc->limit_set_addr, &fc->limit_set_db);
- scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_GET,
+ scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_GET,
&fc->limit_get_addr, NULL);
*p_fc = fc;
}
@@ -640,14 +637,14 @@ static int scmi_dev_domain_id(struct device *dev)
return clkspec.args[0];
}
-static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle,
+static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
struct device *dev)
{
int idx, ret, domain;
unsigned long freq;
struct scmi_opp *opp;
struct perf_dom_info *dom;
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
domain = scmi_dev_domain_id(dev);
if (domain < 0)
@@ -672,11 +669,12 @@ static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle,
return 0;
}
-static int scmi_dvfs_transition_latency_get(const struct scmi_handle *handle,
- struct device *dev)
+static int
+scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph,
+ struct device *dev)
{
struct perf_dom_info *dom;
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
int domain = scmi_dev_domain_id(dev);
if (domain < 0)
@@ -687,35 +685,35 @@ static int scmi_dvfs_transition_latency_get(const struct scmi_handle *handle,
return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
}
-static int scmi_dvfs_freq_set(const struct scmi_handle *handle, u32 domain,
+static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long freq, bool poll)
{
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
- return scmi_perf_level_set(handle, domain, freq / dom->mult_factor,
- poll);
+ return scmi_perf_level_set(ph, domain, freq / dom->mult_factor, poll);
}
-static int scmi_dvfs_freq_get(const struct scmi_handle *handle, u32 domain,
+static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long *freq, bool poll)
{
int ret;
u32 level;
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
- ret = scmi_perf_level_get(handle, domain, &level, poll);
+ ret = scmi_perf_level_get(ph, domain, &level, poll);
if (!ret)
*freq = level * dom->mult_factor;
return ret;
}
-static int scmi_dvfs_est_power_get(const struct scmi_handle *handle, u32 domain,
- unsigned long *freq, unsigned long *power)
+static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
+ u32 domain, unsigned long *freq,
+ unsigned long *power)
{
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom;
unsigned long opp_freq;
int idx, ret = -EINVAL;
@@ -739,25 +737,25 @@ static int scmi_dvfs_est_power_get(const struct scmi_handle *handle, u32 domain,
return ret;
}
-static bool scmi_fast_switch_possible(const struct scmi_handle *handle,
+static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph,
struct device *dev)
{
struct perf_dom_info *dom;
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
dom = pi->dom_info + scmi_dev_domain_id(dev);
return dom->fc_info && dom->fc_info->level_set_addr;
}
-static bool scmi_power_scale_mw_get(const struct scmi_handle *handle)
+static bool scmi_power_scale_mw_get(const struct scmi_protocol_handle *ph)
{
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
return pi->power_scale_mw;
}
-static const struct scmi_perf_ops perf_ops = {
+static const struct scmi_perf_proto_ops perf_proto_ops = {
.limits_set = scmi_perf_limits_set,
.limits_get = scmi_perf_limits_get,
.level_set = scmi_perf_level_set,
@@ -772,7 +770,7 @@ static const struct scmi_perf_ops perf_ops = {
.power_scale_mw_get = scmi_power_scale_mw_get,
};
-static int scmi_perf_set_notify_enabled(const struct scmi_handle *handle,
+static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret, cmd_id;
@@ -781,7 +779,7 @@ static int scmi_perf_set_notify_enabled(const struct scmi_handle *handle,
return -EINVAL;
cmd_id = evt_2_cmd[evt_id];
- ret = scmi_perf_level_limits_notify(handle, src_id, cmd_id, enable);
+ ret = scmi_perf_level_limits_notify(ph, src_id, cmd_id, enable);
if (ret)
pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
evt_id, src_id, ret);
@@ -789,7 +787,7 @@ static int scmi_perf_set_notify_enabled(const struct scmi_handle *handle,
return ret;
}
-static void *scmi_perf_fill_custom_report(const struct scmi_handle *handle,
+static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph,
u8 evt_id, ktime_t timestamp,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
@@ -837,6 +835,16 @@ static void *scmi_perf_fill_custom_report(const struct scmi_handle *handle,
return rep;
}
+static int scmi_perf_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_perf_info *pi = ph->get_priv(ph);
+
+ if (!pi)
+ return -EINVAL;
+
+ return pi->num_domains;
+}
+
static const struct scmi_event perf_events[] = {
{
.id = SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
@@ -851,28 +859,36 @@ static const struct scmi_event perf_events[] = {
};
static const struct scmi_event_ops perf_event_ops = {
+ .get_num_sources = scmi_perf_get_num_sources,
.set_notify_enabled = scmi_perf_set_notify_enabled,
.fill_custom_report = scmi_perf_fill_custom_report,
};
-static int scmi_perf_protocol_init(struct scmi_handle *handle)
+static const struct scmi_protocol_events perf_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &perf_event_ops,
+ .evts = perf_events,
+ .num_events = ARRAY_SIZE(perf_events),
+};
+
+static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
{
int domain;
u32 version;
struct scmi_perf_info *pinfo;
- scmi_version_get(handle, SCMI_PROTOCOL_PERF, &version);
+ ph->xops->version_get(ph, &version);
- dev_dbg(handle->dev, "Performance Version %d.%d\n",
+ dev_dbg(ph->dev, "Performance Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
- pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+ pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
- scmi_perf_attributes_get(handle, pinfo);
+ scmi_perf_attributes_get(ph, pinfo);
- pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
+ pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
sizeof(*pinfo->dom_info), GFP_KERNEL);
if (!pinfo->dom_info)
return -ENOMEM;
@@ -880,24 +896,24 @@ static int scmi_perf_protocol_init(struct scmi_handle *handle)
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct perf_dom_info *dom = pinfo->dom_info + domain;
- scmi_perf_domain_attributes_get(handle, domain, dom);
- scmi_perf_describe_levels_get(handle, domain, dom);
+ scmi_perf_domain_attributes_get(ph, domain, dom);
+ scmi_perf_describe_levels_get(ph, domain, dom);
if (dom->perf_fastchannels)
- scmi_perf_domain_init_fc(handle, domain, &dom->fc_info);
+ scmi_perf_domain_init_fc(ph, domain, &dom->fc_info);
}
- scmi_register_protocol_events(handle,
- SCMI_PROTOCOL_PERF, SCMI_PROTO_QUEUE_SZ,
- &perf_event_ops, perf_events,
- ARRAY_SIZE(perf_events),
- pinfo->num_domains);
-
pinfo->version = version;
- handle->perf_ops = &perf_ops;
- handle->perf_priv = pinfo;
- return 0;
+ return ph->set_priv(ph, pinfo);
}
-DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_PERF, perf)
+static const struct scmi_protocol scmi_perf = {
+ .id = SCMI_PROTOCOL_PERF,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_perf_protocol_init,
+ .ops = &perf_proto_ops,
+ .events = &perf_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(perf, scmi_perf)
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index 1f37258e9bee..ad2ab080f344 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -2,11 +2,12 @@
/*
* System Control and Management Interface (SCMI) Power Protocol
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications POWER - " fmt
+#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "common.h"
@@ -68,21 +69,21 @@ struct scmi_power_info {
struct power_dom_info *dom_info;
};
-static int scmi_power_attributes_get(const struct scmi_handle *handle,
+static int scmi_power_attributes_get(const struct scmi_protocol_handle *ph,
struct scmi_power_info *pi)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_power_attributes *attr;
- ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
- SCMI_PROTOCOL_POWER, 0, sizeof(*attr), &t);
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
+ 0, sizeof(*attr), &t);
if (ret)
return ret;
attr = t->rx.buf;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
pi->num_domains = le16_to_cpu(attr->num_domains);
pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
@@ -90,28 +91,27 @@ static int scmi_power_attributes_get(const struct scmi_handle *handle,
pi->stats_size = le32_to_cpu(attr->stats_size);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
static int
-scmi_power_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
- struct power_dom_info *dom_info)
+scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph,
+ u32 domain, struct power_dom_info *dom_info)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_power_domain_attributes *attr;
- ret = scmi_xfer_get_init(handle, POWER_DOMAIN_ATTRIBUTES,
- SCMI_PROTOCOL_POWER, sizeof(domain),
- sizeof(*attr), &t);
+ ret = ph->xops->xfer_get_init(ph, POWER_DOMAIN_ATTRIBUTES,
+ sizeof(domain), sizeof(*attr), &t);
if (ret)
return ret;
put_unaligned_le32(domain, t->tx.buf);
attr = t->rx.buf;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
u32 flags = le32_to_cpu(attr->flags);
@@ -121,19 +121,18 @@ scmi_power_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int
-scmi_power_state_set(const struct scmi_handle *handle, u32 domain, u32 state)
+static int scmi_power_state_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 state)
{
int ret;
struct scmi_xfer *t;
struct scmi_power_set_state *st;
- ret = scmi_xfer_get_init(handle, POWER_STATE_SET, SCMI_PROTOCOL_POWER,
- sizeof(*st), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, POWER_STATE_SET, sizeof(*st), 0, &t);
if (ret)
return ret;
@@ -142,64 +141,64 @@ scmi_power_state_set(const struct scmi_handle *handle, u32 domain, u32 state)
st->domain = cpu_to_le32(domain);
st->state = cpu_to_le32(state);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int
-scmi_power_state_get(const struct scmi_handle *handle, u32 domain, u32 *state)
+static int scmi_power_state_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *state)
{
int ret;
struct scmi_xfer *t;
- ret = scmi_xfer_get_init(handle, POWER_STATE_GET, SCMI_PROTOCOL_POWER,
- sizeof(u32), sizeof(u32), &t);
+ ret = ph->xops->xfer_get_init(ph, POWER_STATE_GET, sizeof(u32), sizeof(u32), &t);
if (ret)
return ret;
put_unaligned_le32(domain, t->tx.buf);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret)
*state = get_unaligned_le32(t->rx.buf);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_power_num_domains_get(const struct scmi_handle *handle)
+static int scmi_power_num_domains_get(const struct scmi_protocol_handle *ph)
{
- struct scmi_power_info *pi = handle->power_priv;
+ struct scmi_power_info *pi = ph->get_priv(ph);
return pi->num_domains;
}
-static char *scmi_power_name_get(const struct scmi_handle *handle, u32 domain)
+static char *scmi_power_name_get(const struct scmi_protocol_handle *ph,
+ u32 domain)
{
- struct scmi_power_info *pi = handle->power_priv;
+ struct scmi_power_info *pi = ph->get_priv(ph);
struct power_dom_info *dom = pi->dom_info + domain;
return dom->name;
}
-static const struct scmi_power_ops power_ops = {
+static const struct scmi_power_proto_ops power_proto_ops = {
.num_domains_get = scmi_power_num_domains_get,
.name_get = scmi_power_name_get,
.state_set = scmi_power_state_set,
.state_get = scmi_power_state_get,
};
-static int scmi_power_request_notify(const struct scmi_handle *handle,
+static int scmi_power_request_notify(const struct scmi_protocol_handle *ph,
u32 domain, bool enable)
{
int ret;
struct scmi_xfer *t;
struct scmi_power_state_notify *notify;
- ret = scmi_xfer_get_init(handle, POWER_STATE_NOTIFY,
- SCMI_PROTOCOL_POWER, sizeof(*notify), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, POWER_STATE_NOTIFY,
+ sizeof(*notify), 0, &t);
if (ret)
return ret;
@@ -207,18 +206,18 @@ static int scmi_power_request_notify(const struct scmi_handle *handle,
notify->domain = cpu_to_le32(domain);
notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_power_set_notify_enabled(const struct scmi_handle *handle,
+static int scmi_power_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret;
- ret = scmi_power_request_notify(handle, src_id, enable);
+ ret = scmi_power_request_notify(ph, src_id, enable);
if (ret)
pr_debug("FAIL_ENABLE - evt[%X] dom[%d] - ret:%d\n",
evt_id, src_id, ret);
@@ -226,10 +225,11 @@ static int scmi_power_set_notify_enabled(const struct scmi_handle *handle,
return ret;
}
-static void *scmi_power_fill_custom_report(const struct scmi_handle *handle,
- u8 evt_id, ktime_t timestamp,
- const void *payld, size_t payld_sz,
- void *report, u32 *src_id)
+static void *
+scmi_power_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
{
const struct scmi_power_state_notify_payld *p = payld;
struct scmi_power_state_changed_report *r = report;
@@ -246,6 +246,16 @@ static void *scmi_power_fill_custom_report(const struct scmi_handle *handle,
return r;
}
+static int scmi_power_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_power_info *pinfo = ph->get_priv(ph);
+
+ if (!pinfo)
+ return -EINVAL;
+
+ return pinfo->num_domains;
+}
+
static const struct scmi_event power_events[] = {
{
.id = SCMI_EVENT_POWER_STATE_CHANGED,
@@ -256,28 +266,36 @@ static const struct scmi_event power_events[] = {
};
static const struct scmi_event_ops power_event_ops = {
+ .get_num_sources = scmi_power_get_num_sources,
.set_notify_enabled = scmi_power_set_notify_enabled,
.fill_custom_report = scmi_power_fill_custom_report,
};
-static int scmi_power_protocol_init(struct scmi_handle *handle)
+static const struct scmi_protocol_events power_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &power_event_ops,
+ .evts = power_events,
+ .num_events = ARRAY_SIZE(power_events),
+};
+
+static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph)
{
int domain;
u32 version;
struct scmi_power_info *pinfo;
- scmi_version_get(handle, SCMI_PROTOCOL_POWER, &version);
+ ph->xops->version_get(ph, &version);
- dev_dbg(handle->dev, "Power Version %d.%d\n",
+ dev_dbg(ph->dev, "Power Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
- pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+ pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
- scmi_power_attributes_get(handle, pinfo);
+ scmi_power_attributes_get(ph, pinfo);
- pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
+ pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
sizeof(*pinfo->dom_info), GFP_KERNEL);
if (!pinfo->dom_info)
return -ENOMEM;
@@ -285,20 +303,20 @@ static int scmi_power_protocol_init(struct scmi_handle *handle)
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct power_dom_info *dom = pinfo->dom_info + domain;
- scmi_power_domain_attributes_get(handle, domain, dom);
+ scmi_power_domain_attributes_get(ph, domain, dom);
}
- scmi_register_protocol_events(handle,
- SCMI_PROTOCOL_POWER, SCMI_PROTO_QUEUE_SZ,
- &power_event_ops, power_events,
- ARRAY_SIZE(power_events),
- pinfo->num_domains);
-
pinfo->version = version;
- handle->power_ops = &power_ops;
- handle->power_priv = pinfo;
- return 0;
+ return ph->set_priv(ph, pinfo);
}
-DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_POWER, power)
+static const struct scmi_protocol scmi_power = {
+ .id = SCMI_PROTOCOL_POWER,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_power_protocol_init,
+ .ops = &power_proto_ops,
+ .events = &power_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(power, scmi_power)
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index a981a22cfe89..9bf2478ec6d1 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -2,11 +2,12 @@
/*
* System Control and Management Interface (SCMI) Reset Protocol
*
- * Copyright (C) 2019 ARM Ltd.
+ * Copyright (C) 2019-2021 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications RESET - " fmt
+#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "common.h"
@@ -64,46 +65,45 @@ struct scmi_reset_info {
struct reset_dom_info *dom_info;
};
-static int scmi_reset_attributes_get(const struct scmi_handle *handle,
+static int scmi_reset_attributes_get(const struct scmi_protocol_handle *ph,
struct scmi_reset_info *pi)
{
int ret;
struct scmi_xfer *t;
u32 attr;
- ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
- SCMI_PROTOCOL_RESET, 0, sizeof(attr), &t);
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
+ 0, sizeof(attr), &t);
if (ret)
return ret;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
attr = get_unaligned_le32(t->rx.buf);
pi->num_domains = attr & NUM_RESET_DOMAIN_MASK;
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
static int
-scmi_reset_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
- struct reset_dom_info *dom_info)
+scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
+ u32 domain, struct reset_dom_info *dom_info)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_reset_domain_attributes *attr;
- ret = scmi_xfer_get_init(handle, RESET_DOMAIN_ATTRIBUTES,
- SCMI_PROTOCOL_RESET, sizeof(domain),
- sizeof(*attr), &t);
+ ret = ph->xops->xfer_get_init(ph, RESET_DOMAIN_ATTRIBUTES,
+ sizeof(domain), sizeof(*attr), &t);
if (ret)
return ret;
put_unaligned_le32(domain, t->tx.buf);
attr = t->rx.buf;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
u32 attributes = le32_to_cpu(attr->attributes);
@@ -115,47 +115,49 @@ scmi_reset_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_reset_num_domains_get(const struct scmi_handle *handle)
+static int scmi_reset_num_domains_get(const struct scmi_protocol_handle *ph)
{
- struct scmi_reset_info *pi = handle->reset_priv;
+ struct scmi_reset_info *pi = ph->get_priv(ph);
return pi->num_domains;
}
-static char *scmi_reset_name_get(const struct scmi_handle *handle, u32 domain)
+static char *scmi_reset_name_get(const struct scmi_protocol_handle *ph,
+ u32 domain)
{
- struct scmi_reset_info *pi = handle->reset_priv;
+ struct scmi_reset_info *pi = ph->get_priv(ph);
+
struct reset_dom_info *dom = pi->dom_info + domain;
return dom->name;
}
-static int scmi_reset_latency_get(const struct scmi_handle *handle, u32 domain)
+static int scmi_reset_latency_get(const struct scmi_protocol_handle *ph,
+ u32 domain)
{
- struct scmi_reset_info *pi = handle->reset_priv;
+ struct scmi_reset_info *pi = ph->get_priv(ph);
struct reset_dom_info *dom = pi->dom_info + domain;
return dom->latency_us;
}
-static int scmi_domain_reset(const struct scmi_handle *handle, u32 domain,
+static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
u32 flags, u32 state)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_reset_domain_reset *dom;
- struct scmi_reset_info *pi = handle->reset_priv;
+ struct scmi_reset_info *pi = ph->get_priv(ph);
struct reset_dom_info *rdom = pi->dom_info + domain;
if (rdom->async_reset)
flags |= ASYNCHRONOUS_RESET;
- ret = scmi_xfer_get_init(handle, RESET, SCMI_PROTOCOL_RESET,
- sizeof(*dom), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t);
if (ret)
return ret;
@@ -165,34 +167,35 @@ static int scmi_domain_reset(const struct scmi_handle *handle, u32 domain,
dom->reset_state = cpu_to_le32(state);
if (rdom->async_reset)
- ret = scmi_do_xfer_with_response(handle, t);
+ ret = ph->xops->do_xfer_with_response(ph, t);
else
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_reset_domain_reset(const struct scmi_handle *handle, u32 domain)
+static int scmi_reset_domain_reset(const struct scmi_protocol_handle *ph,
+ u32 domain)
{
- return scmi_domain_reset(handle, domain, AUTONOMOUS_RESET,
+ return scmi_domain_reset(ph, domain, AUTONOMOUS_RESET,
ARCH_COLD_RESET);
}
static int
-scmi_reset_domain_assert(const struct scmi_handle *handle, u32 domain)
+scmi_reset_domain_assert(const struct scmi_protocol_handle *ph, u32 domain)
{
- return scmi_domain_reset(handle, domain, EXPLICIT_RESET_ASSERT,
+ return scmi_domain_reset(ph, domain, EXPLICIT_RESET_ASSERT,
ARCH_COLD_RESET);
}
static int
-scmi_reset_domain_deassert(const struct scmi_handle *handle, u32 domain)
+scmi_reset_domain_deassert(const struct scmi_protocol_handle *ph, u32 domain)
{
- return scmi_domain_reset(handle, domain, 0, ARCH_COLD_RESET);
+ return scmi_domain_reset(ph, domain, 0, ARCH_COLD_RESET);
}
-static const struct scmi_reset_ops reset_ops = {
+static const struct scmi_reset_proto_ops reset_proto_ops = {
.num_domains_get = scmi_reset_num_domains_get,
.name_get = scmi_reset_name_get,
.latency_get = scmi_reset_latency_get,
@@ -201,16 +204,15 @@ static const struct scmi_reset_ops reset_ops = {
.deassert = scmi_reset_domain_deassert,
};
-static int scmi_reset_notify(const struct scmi_handle *handle, u32 domain_id,
- bool enable)
+static int scmi_reset_notify(const struct scmi_protocol_handle *ph,
+ u32 domain_id, bool enable)
{
int ret;
u32 evt_cntl = enable ? RESET_TP_NOTIFY_ALL : 0;
struct scmi_xfer *t;
struct scmi_msg_reset_notify *cfg;
- ret = scmi_xfer_get_init(handle, RESET_NOTIFY,
- SCMI_PROTOCOL_RESET, sizeof(*cfg), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, RESET_NOTIFY, sizeof(*cfg), 0, &t);
if (ret)
return ret;
@@ -218,18 +220,18 @@ static int scmi_reset_notify(const struct scmi_handle *handle, u32 domain_id,
cfg->id = cpu_to_le32(domain_id);
cfg->event_control = cpu_to_le32(evt_cntl);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_reset_set_notify_enabled(const struct scmi_handle *handle,
+static int scmi_reset_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret;
- ret = scmi_reset_notify(handle, src_id, enable);
+ ret = scmi_reset_notify(ph, src_id, enable);
if (ret)
pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
evt_id, src_id, ret);
@@ -237,10 +239,11 @@ static int scmi_reset_set_notify_enabled(const struct scmi_handle *handle,
return ret;
}
-static void *scmi_reset_fill_custom_report(const struct scmi_handle *handle,
- u8 evt_id, ktime_t timestamp,
- const void *payld, size_t payld_sz,
- void *report, u32 *src_id)
+static void *
+scmi_reset_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
{
const struct scmi_reset_issued_notify_payld *p = payld;
struct scmi_reset_issued_report *r = report;
@@ -257,6 +260,16 @@ static void *scmi_reset_fill_custom_report(const struct scmi_handle *handle,
return r;
}
+static int scmi_reset_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_reset_info *pinfo = ph->get_priv(ph);
+
+ if (!pinfo)
+ return -EINVAL;
+
+ return pinfo->num_domains;
+}
+
static const struct scmi_event reset_events[] = {
{
.id = SCMI_EVENT_RESET_ISSUED,
@@ -266,28 +279,36 @@ static const struct scmi_event reset_events[] = {
};
static const struct scmi_event_ops reset_event_ops = {
+ .get_num_sources = scmi_reset_get_num_sources,
.set_notify_enabled = scmi_reset_set_notify_enabled,
.fill_custom_report = scmi_reset_fill_custom_report,
};
-static int scmi_reset_protocol_init(struct scmi_handle *handle)
+static const struct scmi_protocol_events reset_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &reset_event_ops,
+ .evts = reset_events,
+ .num_events = ARRAY_SIZE(reset_events),
+};
+
+static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph)
{
int domain;
u32 version;
struct scmi_reset_info *pinfo;
- scmi_version_get(handle, SCMI_PROTOCOL_RESET, &version);
+ ph->xops->version_get(ph, &version);
- dev_dbg(handle->dev, "Reset Version %d.%d\n",
+ dev_dbg(ph->dev, "Reset Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
- pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+ pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
- scmi_reset_attributes_get(handle, pinfo);
+ scmi_reset_attributes_get(ph, pinfo);
- pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
+ pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
sizeof(*pinfo->dom_info), GFP_KERNEL);
if (!pinfo->dom_info)
return -ENOMEM;
@@ -295,20 +316,19 @@ static int scmi_reset_protocol_init(struct scmi_handle *handle)
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct reset_dom_info *dom = pinfo->dom_info + domain;
- scmi_reset_domain_attributes_get(handle, domain, dom);
+ scmi_reset_domain_attributes_get(ph, domain, dom);
}
- scmi_register_protocol_events(handle,
- SCMI_PROTOCOL_RESET, SCMI_PROTO_QUEUE_SZ,
- &reset_event_ops, reset_events,
- ARRAY_SIZE(reset_events),
- pinfo->num_domains);
-
pinfo->version = version;
- handle->reset_ops = &reset_ops;
- handle->reset_priv = pinfo;
-
- return 0;
+ return ph->set_priv(ph, pinfo);
}
-DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_RESET, reset)
+static const struct scmi_protocol scmi_reset = {
+ .id = SCMI_PROTOCOL_RESET,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_reset_protocol_init,
+ .ops = &reset_proto_ops,
+ .events = &reset_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(reset, scmi_reset)
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
index 9e44479f0284..9d36d5c0622d 100644
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -2,7 +2,7 @@
/*
* SCMI Generic power domain support.
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
#include <linux/err.h>
@@ -11,9 +11,11 @@
#include <linux/pm_domain.h>
#include <linux/scmi_protocol.h>
+static const struct scmi_power_proto_ops *power_ops;
+
struct scmi_pm_domain {
struct generic_pm_domain genpd;
- const struct scmi_handle *handle;
+ const struct scmi_protocol_handle *ph;
const char *name;
u32 domain;
};
@@ -25,16 +27,15 @@ static int scmi_pd_power(struct generic_pm_domain *domain, bool power_on)
int ret;
u32 state, ret_state;
struct scmi_pm_domain *pd = to_scmi_pd(domain);
- const struct scmi_power_ops *ops = pd->handle->power_ops;
if (power_on)
state = SCMI_POWER_STATE_GENERIC_ON;
else
state = SCMI_POWER_STATE_GENERIC_OFF;
- ret = ops->state_set(pd->handle, pd->domain, state);
+ ret = power_ops->state_set(pd->ph, pd->domain, state);
if (!ret)
- ret = ops->state_get(pd->handle, pd->domain, &ret_state);
+ ret = power_ops->state_get(pd->ph, pd->domain, &ret_state);
if (!ret && state != ret_state)
return -EIO;
@@ -60,11 +61,16 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
struct genpd_onecell_data *scmi_pd_data;
struct generic_pm_domain **domains;
const struct scmi_handle *handle = sdev->handle;
+ struct scmi_protocol_handle *ph;
- if (!handle || !handle->power_ops)
+ if (!handle)
return -ENODEV;
- num_domains = handle->power_ops->num_domains_get(handle);
+ power_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_POWER, &ph);
+ if (IS_ERR(power_ops))
+ return PTR_ERR(power_ops);
+
+ num_domains = power_ops->num_domains_get(ph);
if (num_domains < 0) {
dev_err(dev, "number of domains not found\n");
return num_domains;
@@ -85,14 +91,14 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
for (i = 0; i < num_domains; i++, scmi_pd++) {
u32 state;
- if (handle->power_ops->state_get(handle, i, &state)) {
+ if (power_ops->state_get(ph, i, &state)) {
dev_warn(dev, "failed to get state for domain %d\n", i);
continue;
}
scmi_pd->domain = i;
- scmi_pd->handle = handle;
- scmi_pd->name = handle->power_ops->name_get(handle, i);
+ scmi_pd->ph = ph;
+ scmi_pd->name = power_ops->name_get(ph, i);
scmi_pd->genpd.name = scmi_pd->name;
scmi_pd->genpd.power_off = scmi_pd_power_off;
scmi_pd->genpd.power_on = scmi_pd_power_on;
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 4541b891b733..2c88aa221559 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -2,12 +2,13 @@
/*
* System Control and Management Interface (SCMI) Sensor Protocol
*
- * Copyright (C) 2018-2020 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications SENSOR - " fmt
#include <linux/bitfield.h>
+#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "common.h"
@@ -201,21 +202,21 @@ struct sensors_info {
struct scmi_sensor_info *sensors;
};
-static int scmi_sensor_attributes_get(const struct scmi_handle *handle,
+static int scmi_sensor_attributes_get(const struct scmi_protocol_handle *ph,
struct sensors_info *si)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_sensor_attributes *attr;
- ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
- SCMI_PROTOCOL_SENSOR, 0, sizeof(*attr), &t);
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
+ 0, sizeof(*attr), &t);
if (ret)
return ret;
attr = t->rx.buf;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
si->num_sensors = le16_to_cpu(attr->num_sensors);
si->max_requests = attr->max_requests;
@@ -224,7 +225,7 @@ static int scmi_sensor_attributes_get(const struct scmi_handle *handle,
si->reg_size = le32_to_cpu(attr->reg_size);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -235,7 +236,7 @@ static inline void scmi_parse_range_attrs(struct scmi_range_attrs *out,
out->max_range = get_unaligned_le64((void *)&in->max_range_low);
}
-static int scmi_sensor_update_intervals(const struct scmi_handle *handle,
+static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph,
struct scmi_sensor_info *s)
{
int ret, cnt;
@@ -245,8 +246,8 @@ static int scmi_sensor_update_intervals(const struct scmi_handle *handle,
struct scmi_msg_resp_sensor_list_update_intervals *buf;
struct scmi_msg_sensor_list_update_intervals *msg;
- ret = scmi_xfer_get_init(handle, SENSOR_LIST_UPDATE_INTERVALS,
- SCMI_PROTOCOL_SENSOR, sizeof(*msg), 0, &ti);
+ ret = ph->xops->xfer_get_init(ph, SENSOR_LIST_UPDATE_INTERVALS,
+ sizeof(*msg), 0, &ti);
if (ret)
return ret;
@@ -259,7 +260,7 @@ static int scmi_sensor_update_intervals(const struct scmi_handle *handle,
msg->id = cpu_to_le32(s->id);
msg->index = cpu_to_le32(desc_index);
- ret = scmi_do_xfer(handle, ti);
+ ret = ph->xops->do_xfer(ph, ti);
if (ret)
break;
@@ -277,7 +278,7 @@ static int scmi_sensor_update_intervals(const struct scmi_handle *handle,
/* segmented intervals are reported in one triplet */
if (s->intervals.segmented &&
(num_remaining || num_returned != 3)) {
- dev_err(handle->dev,
+ dev_err(ph->dev,
"Sensor ID:%d advertises an invalid segmented interval (%d)\n",
s->id, s->intervals.count);
s->intervals.segmented = false;
@@ -288,7 +289,7 @@ static int scmi_sensor_update_intervals(const struct scmi_handle *handle,
/* Direct allocation when exceeding pre-allocated */
if (s->intervals.count >= SCMI_MAX_PREALLOC_POOL) {
s->intervals.desc =
- devm_kcalloc(handle->dev,
+ devm_kcalloc(ph->dev,
s->intervals.count,
sizeof(*s->intervals.desc),
GFP_KERNEL);
@@ -300,7 +301,7 @@ static int scmi_sensor_update_intervals(const struct scmi_handle *handle,
}
}
} else if (desc_index + num_returned > s->intervals.count) {
- dev_err(handle->dev,
+ dev_err(ph->dev,
"No. of update intervals can't exceed %d\n",
s->intervals.count);
ret = -EINVAL;
@@ -313,18 +314,18 @@ static int scmi_sensor_update_intervals(const struct scmi_handle *handle,
desc_index += num_returned;
- scmi_reset_rx_to_maxsz(handle, ti);
+ ph->xops->reset_rx_to_maxsz(ph, ti);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
*/
} while (num_returned && num_remaining);
- scmi_xfer_put(handle, ti);
+ ph->xops->xfer_put(ph, ti);
return ret;
}
-static int scmi_sensor_axis_description(const struct scmi_handle *handle,
+static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph,
struct scmi_sensor_info *s)
{
int ret, cnt;
@@ -334,13 +335,13 @@ static int scmi_sensor_axis_description(const struct scmi_handle *handle,
struct scmi_msg_resp_sensor_axis_description *buf;
struct scmi_msg_sensor_axis_description_get *msg;
- s->axis = devm_kcalloc(handle->dev, s->num_axis,
+ s->axis = devm_kcalloc(ph->dev, s->num_axis,
sizeof(*s->axis), GFP_KERNEL);
if (!s->axis)
return -ENOMEM;
- ret = scmi_xfer_get_init(handle, SENSOR_AXIS_DESCRIPTION_GET,
- SCMI_PROTOCOL_SENSOR, sizeof(*msg), 0, &te);
+ ret = ph->xops->xfer_get_init(ph, SENSOR_AXIS_DESCRIPTION_GET,
+ sizeof(*msg), 0, &te);
if (ret)
return ret;
@@ -354,7 +355,7 @@ static int scmi_sensor_axis_description(const struct scmi_handle *handle,
msg->id = cpu_to_le32(s->id);
msg->axis_desc_index = cpu_to_le32(desc_index);
- ret = scmi_do_xfer(handle, te);
+ ret = ph->xops->do_xfer(ph, te);
if (ret)
break;
@@ -363,7 +364,7 @@ static int scmi_sensor_axis_description(const struct scmi_handle *handle,
num_remaining = NUM_AXIS_REMAINING(flags);
if (desc_index + num_returned > s->num_axis) {
- dev_err(handle->dev, "No. of axis can't exceed %d\n",
+ dev_err(ph->dev, "No. of axis can't exceed %d\n",
s->num_axis);
break;
}
@@ -405,18 +406,18 @@ static int scmi_sensor_axis_description(const struct scmi_handle *handle,
desc_index += num_returned;
- scmi_reset_rx_to_maxsz(handle, te);
+ ph->xops->reset_rx_to_maxsz(ph, te);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
*/
} while (num_returned && num_remaining);
- scmi_xfer_put(handle, te);
+ ph->xops->xfer_put(ph, te);
return ret;
}
-static int scmi_sensor_description_get(const struct scmi_handle *handle,
+static int scmi_sensor_description_get(const struct scmi_protocol_handle *ph,
struct sensors_info *si)
{
int ret, cnt;
@@ -425,8 +426,8 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
struct scmi_xfer *t;
struct scmi_msg_resp_sensor_description *buf;
- ret = scmi_xfer_get_init(handle, SENSOR_DESCRIPTION_GET,
- SCMI_PROTOCOL_SENSOR, sizeof(__le32), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, SENSOR_DESCRIPTION_GET,
+ sizeof(__le32), 0, &t);
if (ret)
return ret;
@@ -437,7 +438,8 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
/* Set the number of sensors to be skipped/already read */
put_unaligned_le32(desc_index, t->tx.buf);
- ret = scmi_do_xfer(handle, t);
+
+ ret = ph->xops->do_xfer(ph, t);
if (ret)
break;
@@ -445,7 +447,7 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
num_remaining = le16_to_cpu(buf->num_remaining);
if (desc_index + num_returned > si->num_sensors) {
- dev_err(handle->dev, "No. of sensors can't exceed %d",
+ dev_err(ph->dev, "No. of sensors can't exceed %d",
si->num_sensors);
break;
}
@@ -500,8 +502,8 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
* Since the command is optional, on error carry
* on without any update interval.
*/
- if (scmi_sensor_update_intervals(handle, s))
- dev_dbg(handle->dev,
+ if (scmi_sensor_update_intervals(ph, s))
+ dev_dbg(ph->dev,
"Update Intervals not available for sensor ID:%d\n",
s->id);
}
@@ -535,7 +537,7 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
}
}
if (s->num_axis > 0) {
- ret = scmi_sensor_axis_description(handle, s);
+ ret = scmi_sensor_axis_description(ph, s);
if (ret)
goto out;
}
@@ -545,7 +547,7 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
desc_index += num_returned;
- scmi_reset_rx_to_maxsz(handle, t);
+ ph->xops->reset_rx_to_maxsz(ph, t);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
@@ -553,12 +555,12 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
} while (num_returned && num_remaining);
out:
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
static inline int
-scmi_sensor_request_notify(const struct scmi_handle *handle, u32 sensor_id,
+scmi_sensor_request_notify(const struct scmi_protocol_handle *ph, u32 sensor_id,
u8 message_id, bool enable)
{
int ret;
@@ -566,8 +568,7 @@ scmi_sensor_request_notify(const struct scmi_handle *handle, u32 sensor_id,
struct scmi_xfer *t;
struct scmi_msg_sensor_request_notify *cfg;
- ret = scmi_xfer_get_init(handle, message_id,
- SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*cfg), 0, &t);
if (ret)
return ret;
@@ -575,40 +576,40 @@ scmi_sensor_request_notify(const struct scmi_handle *handle, u32 sensor_id,
cfg->id = cpu_to_le32(sensor_id);
cfg->event_control = cpu_to_le32(evt_cntl);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_sensor_trip_point_notify(const struct scmi_handle *handle,
+static int scmi_sensor_trip_point_notify(const struct scmi_protocol_handle *ph,
u32 sensor_id, bool enable)
{
- return scmi_sensor_request_notify(handle, sensor_id,
+ return scmi_sensor_request_notify(ph, sensor_id,
SENSOR_TRIP_POINT_NOTIFY,
enable);
}
static int
-scmi_sensor_continuous_update_notify(const struct scmi_handle *handle,
+scmi_sensor_continuous_update_notify(const struct scmi_protocol_handle *ph,
u32 sensor_id, bool enable)
{
- return scmi_sensor_request_notify(handle, sensor_id,
+ return scmi_sensor_request_notify(ph, sensor_id,
SENSOR_CONTINUOUS_UPDATE_NOTIFY,
enable);
}
static int
-scmi_sensor_trip_point_config(const struct scmi_handle *handle, u32 sensor_id,
- u8 trip_id, u64 trip_value)
+scmi_sensor_trip_point_config(const struct scmi_protocol_handle *ph,
+ u32 sensor_id, u8 trip_id, u64 trip_value)
{
int ret;
u32 evt_cntl = SENSOR_TP_BOTH;
struct scmi_xfer *t;
struct scmi_msg_set_sensor_trip_point *trip;
- ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_CONFIG,
- SCMI_PROTOCOL_SENSOR, sizeof(*trip), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, SENSOR_TRIP_POINT_CONFIG,
+ sizeof(*trip), 0, &t);
if (ret)
return ret;
@@ -618,47 +619,46 @@ scmi_sensor_trip_point_config(const struct scmi_handle *handle, u32 sensor_id,
trip->value_low = cpu_to_le32(trip_value & 0xffffffff);
trip->value_high = cpu_to_le32(trip_value >> 32);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_sensor_config_get(const struct scmi_handle *handle,
+static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
u32 sensor_id, u32 *sensor_config)
{
int ret;
struct scmi_xfer *t;
- ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_GET,
- SCMI_PROTOCOL_SENSOR, sizeof(__le32),
- sizeof(__le32), &t);
+ ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_GET,
+ sizeof(__le32), sizeof(__le32), &t);
if (ret)
return ret;
put_unaligned_le32(cpu_to_le32(sensor_id), t->tx.buf);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
- struct sensors_info *si = handle->sensor_priv;
+ struct sensors_info *si = ph->get_priv(ph);
struct scmi_sensor_info *s = si->sensors + sensor_id;
*sensor_config = get_unaligned_le64(t->rx.buf);
s->sensor_config = *sensor_config;
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_sensor_config_set(const struct scmi_handle *handle,
+static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph,
u32 sensor_id, u32 sensor_config)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_config_set *msg;
- ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_SET,
- SCMI_PROTOCOL_SENSOR, sizeof(*msg), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_SET,
+ sizeof(*msg), 0, &t);
if (ret)
return ret;
@@ -666,21 +666,21 @@ static int scmi_sensor_config_set(const struct scmi_handle *handle,
msg->id = cpu_to_le32(sensor_id);
msg->sensor_config = cpu_to_le32(sensor_config);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
- struct sensors_info *si = handle->sensor_priv;
+ struct sensors_info *si = ph->get_priv(ph);
struct scmi_sensor_info *s = si->sensors + sensor_id;
s->sensor_config = sensor_config;
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
/**
* scmi_sensor_reading_get - Read scalar sensor value
- * @handle: Platform handle
+ * @ph: Protocol handle
* @sensor_id: Sensor ID
* @value: The 64bit value sensor reading
*
@@ -693,17 +693,17 @@ static int scmi_sensor_config_set(const struct scmi_handle *handle,
*
* Return: 0 on Success
*/
-static int scmi_sensor_reading_get(const struct scmi_handle *handle,
+static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph,
u32 sensor_id, u64 *value)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_reading_get *sensor;
- struct sensors_info *si = handle->sensor_priv;
+ struct sensors_info *si = ph->get_priv(ph);
struct scmi_sensor_info *s = si->sensors + sensor_id;
- ret = scmi_xfer_get_init(handle, SENSOR_READING_GET,
- SCMI_PROTOCOL_SENSOR, sizeof(*sensor), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, SENSOR_READING_GET,
+ sizeof(*sensor), 0, &t);
if (ret)
return ret;
@@ -711,7 +711,7 @@ static int scmi_sensor_reading_get(const struct scmi_handle *handle,
sensor->id = cpu_to_le32(sensor_id);
if (s->async) {
sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
- ret = scmi_do_xfer_with_response(handle, t);
+ ret = ph->xops->do_xfer_with_response(ph, t);
if (!ret) {
struct scmi_resp_sensor_reading_complete *resp;
@@ -723,12 +723,12 @@ static int scmi_sensor_reading_get(const struct scmi_handle *handle,
}
} else {
sensor->flags = cpu_to_le32(0);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret)
*value = get_unaligned_le64(t->rx.buf);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -742,7 +742,7 @@ scmi_parse_sensor_readings(struct scmi_sensor_reading *out,
/**
* scmi_sensor_reading_get_timestamped - Read multiple-axis timestamped values
- * @handle: Platform handle
+ * @ph: Protocol handle
* @sensor_id: Sensor ID
* @count: The length of the provided @readings array
* @readings: An array of elements each representing a timestamped per-axis
@@ -755,22 +755,22 @@ scmi_parse_sensor_readings(struct scmi_sensor_reading *out,
* Return: 0 on Success
*/
static int
-scmi_sensor_reading_get_timestamped(const struct scmi_handle *handle,
+scmi_sensor_reading_get_timestamped(const struct scmi_protocol_handle *ph,
u32 sensor_id, u8 count,
struct scmi_sensor_reading *readings)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_reading_get *sensor;
- struct sensors_info *si = handle->sensor_priv;
+ struct sensors_info *si = ph->get_priv(ph);
struct scmi_sensor_info *s = si->sensors + sensor_id;
if (!count || !readings ||
(!s->num_axis && count > 1) || (s->num_axis && count > s->num_axis))
return -EINVAL;
- ret = scmi_xfer_get_init(handle, SENSOR_READING_GET,
- SCMI_PROTOCOL_SENSOR, sizeof(*sensor), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, SENSOR_READING_GET,
+ sizeof(*sensor), 0, &t);
if (ret)
return ret;
@@ -778,7 +778,7 @@ scmi_sensor_reading_get_timestamped(const struct scmi_handle *handle,
sensor->id = cpu_to_le32(sensor_id);
if (s->async) {
sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
- ret = scmi_do_xfer_with_response(handle, t);
+ ret = ph->xops->do_xfer_with_response(ph, t);
if (!ret) {
int i;
struct scmi_resp_sensor_reading_complete_v3 *resp;
@@ -794,7 +794,7 @@ scmi_sensor_reading_get_timestamped(const struct scmi_handle *handle,
}
} else {
sensor->flags = cpu_to_le32(0);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
int i;
struct scmi_sensor_reading_resp *resp_readings;
@@ -806,26 +806,26 @@ scmi_sensor_reading_get_timestamped(const struct scmi_handle *handle,
}
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
static const struct scmi_sensor_info *
-scmi_sensor_info_get(const struct scmi_handle *handle, u32 sensor_id)
+scmi_sensor_info_get(const struct scmi_protocol_handle *ph, u32 sensor_id)
{
- struct sensors_info *si = handle->sensor_priv;
+ struct sensors_info *si = ph->get_priv(ph);
return si->sensors + sensor_id;
}
-static int scmi_sensor_count_get(const struct scmi_handle *handle)
+static int scmi_sensor_count_get(const struct scmi_protocol_handle *ph)
{
- struct sensors_info *si = handle->sensor_priv;
+ struct sensors_info *si = ph->get_priv(ph);
return si->num_sensors;
}
-static const struct scmi_sensor_ops sensor_ops = {
+static const struct scmi_sensor_proto_ops sensor_proto_ops = {
.count_get = scmi_sensor_count_get,
.info_get = scmi_sensor_info_get,
.trip_point_config = scmi_sensor_trip_point_config,
@@ -835,18 +835,17 @@ static const struct scmi_sensor_ops sensor_ops = {
.config_set = scmi_sensor_config_set,
};
-static int scmi_sensor_set_notify_enabled(const struct scmi_handle *handle,
+static int scmi_sensor_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret;
switch (evt_id) {
case SCMI_EVENT_SENSOR_TRIP_POINT_EVENT:
- ret = scmi_sensor_trip_point_notify(handle, src_id, enable);
+ ret = scmi_sensor_trip_point_notify(ph, src_id, enable);
break;
case SCMI_EVENT_SENSOR_UPDATE:
- ret = scmi_sensor_continuous_update_notify(handle, src_id,
- enable);
+ ret = scmi_sensor_continuous_update_notify(ph, src_id, enable);
break;
default:
ret = -EINVAL;
@@ -860,10 +859,11 @@ static int scmi_sensor_set_notify_enabled(const struct scmi_handle *handle,
return ret;
}
-static void *scmi_sensor_fill_custom_report(const struct scmi_handle *handle,
- u8 evt_id, ktime_t timestamp,
- const void *payld, size_t payld_sz,
- void *report, u32 *src_id)
+static void *
+scmi_sensor_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
{
void *rep = NULL;
@@ -890,7 +890,7 @@ static void *scmi_sensor_fill_custom_report(const struct scmi_handle *handle,
struct scmi_sensor_info *s;
const struct scmi_sensor_update_notify_payld *p = payld;
struct scmi_sensor_update_report *r = report;
- struct sensors_info *sinfo = handle->sensor_priv;
+ struct sensors_info *sinfo = ph->get_priv(ph);
/* payld_sz is variable for this event */
r->sensor_id = le32_to_cpu(p->sensor_id);
@@ -920,6 +920,13 @@ static void *scmi_sensor_fill_custom_report(const struct scmi_handle *handle,
return rep;
}
+static int scmi_sensor_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct sensors_info *si = ph->get_priv(ph);
+
+ return si->num_sensors;
+}
+
static const struct scmi_event sensor_events[] = {
{
.id = SCMI_EVENT_SENSOR_TRIP_POINT_EVENT,
@@ -939,48 +946,55 @@ static const struct scmi_event sensor_events[] = {
};
static const struct scmi_event_ops sensor_event_ops = {
+ .get_num_sources = scmi_sensor_get_num_sources,
.set_notify_enabled = scmi_sensor_set_notify_enabled,
.fill_custom_report = scmi_sensor_fill_custom_report,
};
-static int scmi_sensors_protocol_init(struct scmi_handle *handle)
+static const struct scmi_protocol_events sensor_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &sensor_event_ops,
+ .evts = sensor_events,
+ .num_events = ARRAY_SIZE(sensor_events),
+};
+
+static int scmi_sensors_protocol_init(const struct scmi_protocol_handle *ph)
{
u32 version;
int ret;
struct sensors_info *sinfo;
- scmi_version_get(handle, SCMI_PROTOCOL_SENSOR, &version);
+ ph->xops->version_get(ph, &version);
- dev_dbg(handle->dev, "Sensor Version %d.%d\n",
+ dev_dbg(ph->dev, "Sensor Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
- sinfo = devm_kzalloc(handle->dev, sizeof(*sinfo), GFP_KERNEL);
+ sinfo = devm_kzalloc(ph->dev, sizeof(*sinfo), GFP_KERNEL);
if (!sinfo)
return -ENOMEM;
sinfo->version = version;
- ret = scmi_sensor_attributes_get(handle, sinfo);
+ ret = scmi_sensor_attributes_get(ph, sinfo);
if (ret)
return ret;
- sinfo->sensors = devm_kcalloc(handle->dev, sinfo->num_sensors,
+ sinfo->sensors = devm_kcalloc(ph->dev, sinfo->num_sensors,
sizeof(*sinfo->sensors), GFP_KERNEL);
if (!sinfo->sensors)
return -ENOMEM;
- ret = scmi_sensor_description_get(handle, sinfo);
+ ret = scmi_sensor_description_get(ph, sinfo);
if (ret)
return ret;
- scmi_register_protocol_events(handle,
- SCMI_PROTOCOL_SENSOR, SCMI_PROTO_QUEUE_SZ,
- &sensor_event_ops, sensor_events,
- ARRAY_SIZE(sensor_events),
- sinfo->num_sensors);
-
- handle->sensor_priv = sinfo;
- handle->sensor_ops = &sensor_ops;
-
- return 0;
+ return ph->set_priv(ph, sinfo);
}
-DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_SENSOR, sensors)
+static const struct scmi_protocol scmi_sensors = {
+ .id = SCMI_PROTOCOL_SENSOR,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_sensors_protocol_init,
+ .ops = &sensor_proto_ops,
+ .events = &sensor_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(sensors, scmi_sensors)
diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c
index 283e12d5f24b..e5175ef73b40 100644
--- a/drivers/firmware/arm_scmi/system.c
+++ b/drivers/firmware/arm_scmi/system.c
@@ -2,11 +2,12 @@
/*
* System Control and Management Interface (SCMI) System Power Protocol
*
- * Copyright (C) 2020 ARM Ltd.
+ * Copyright (C) 2020-2021 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications SYSTEM - " fmt
+#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "common.h"
@@ -32,43 +33,44 @@ struct scmi_system_info {
u32 version;
};
-static int scmi_system_request_notify(const struct scmi_handle *handle,
+static int scmi_system_request_notify(const struct scmi_protocol_handle *ph,
bool enable)
{
int ret;
struct scmi_xfer *t;
struct scmi_system_power_state_notify *notify;
- ret = scmi_xfer_get_init(handle, SYSTEM_POWER_STATE_NOTIFY,
- SCMI_PROTOCOL_SYSTEM, sizeof(*notify), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, SYSTEM_POWER_STATE_NOTIFY,
+ sizeof(*notify), 0, &t);
if (ret)
return ret;
notify = t->tx.buf;
notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_system_set_notify_enabled(const struct scmi_handle *handle,
+static int scmi_system_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret;
- ret = scmi_system_request_notify(handle, enable);
+ ret = scmi_system_request_notify(ph, enable);
if (ret)
pr_debug("FAIL_ENABLE - evt[%X] - ret:%d\n", evt_id, ret);
return ret;
}
-static void *scmi_system_fill_custom_report(const struct scmi_handle *handle,
- u8 evt_id, ktime_t timestamp,
- const void *payld, size_t payld_sz,
- void *report, u32 *src_id)
+static void *
+scmi_system_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
{
const struct scmi_system_power_state_notifier_payld *p = payld;
struct scmi_system_power_state_notifier_report *r = report;
@@ -101,31 +103,38 @@ static const struct scmi_event_ops system_event_ops = {
.fill_custom_report = scmi_system_fill_custom_report,
};
-static int scmi_system_protocol_init(struct scmi_handle *handle)
+static const struct scmi_protocol_events system_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &system_event_ops,
+ .evts = system_events,
+ .num_events = ARRAY_SIZE(system_events),
+ .num_sources = SCMI_SYSTEM_NUM_SOURCES,
+};
+
+static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph)
{
u32 version;
struct scmi_system_info *pinfo;
- scmi_version_get(handle, SCMI_PROTOCOL_SYSTEM, &version);
+ ph->xops->version_get(ph, &version);
- dev_dbg(handle->dev, "System Power Version %d.%d\n",
+ dev_dbg(ph->dev, "System Power Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
- pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+ pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
- scmi_register_protocol_events(handle,
- SCMI_PROTOCOL_SYSTEM, SCMI_PROTO_QUEUE_SZ,
- &system_event_ops,
- system_events,
- ARRAY_SIZE(system_events),
- SCMI_SYSTEM_NUM_SOURCES);
-
pinfo->version = version;
- handle->system_priv = pinfo;
-
- return 0;
+ return ph->set_priv(ph, pinfo);
}
-DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_SYSTEM, system)
+static const struct scmi_protocol scmi_system = {
+ .id = SCMI_PROTOCOL_SYSTEM,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_system_protocol_init,
+ .ops = NULL,
+ .events = &system_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(system, scmi_system)
diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c
index e794e4349ae6..a5048956a0be 100644
--- a/drivers/firmware/arm_scmi/voltage.c
+++ b/drivers/firmware/arm_scmi/voltage.c
@@ -2,9 +2,10 @@
/*
* System Control and Management Interface (SCMI) Voltage Protocol
*
- * Copyright (C) 2020 ARM Ltd.
+ * Copyright (C) 2020-2021 ARM Ltd.
*/
+#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "common.h"
@@ -59,23 +60,23 @@ struct voltage_info {
struct scmi_voltage_info *domains;
};
-static int scmi_protocol_attributes_get(const struct scmi_handle *handle,
+static int scmi_protocol_attributes_get(const struct scmi_protocol_handle *ph,
struct voltage_info *vinfo)
{
int ret;
struct scmi_xfer *t;
- ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
- SCMI_PROTOCOL_VOLTAGE, 0, sizeof(__le32), &t);
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(__le32), &t);
if (ret)
return ret;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret)
vinfo->num_domains =
NUM_VOLTAGE_DOMAINS(get_unaligned_le32(t->rx.buf));
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -109,24 +110,23 @@ static int scmi_init_voltage_levels(struct device *dev,
return 0;
}
-static int scmi_voltage_descriptors_get(const struct scmi_handle *handle,
+static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
struct voltage_info *vinfo)
{
int ret, dom;
struct scmi_xfer *td, *tl;
- struct device *dev = handle->dev;
+ struct device *dev = ph->dev;
struct scmi_msg_resp_domain_attributes *resp_dom;
struct scmi_msg_resp_describe_levels *resp_levels;
- ret = scmi_xfer_get_init(handle, VOLTAGE_DOMAIN_ATTRIBUTES,
- SCMI_PROTOCOL_VOLTAGE, sizeof(__le32),
- sizeof(*resp_dom), &td);
+ ret = ph->xops->xfer_get_init(ph, VOLTAGE_DOMAIN_ATTRIBUTES,
+ sizeof(__le32), sizeof(*resp_dom), &td);
if (ret)
return ret;
resp_dom = td->rx.buf;
- ret = scmi_xfer_get_init(handle, VOLTAGE_DESCRIBE_LEVELS,
- SCMI_PROTOCOL_VOLTAGE, sizeof(__le64), 0, &tl);
+ ret = ph->xops->xfer_get_init(ph, VOLTAGE_DESCRIBE_LEVELS,
+ sizeof(__le64), 0, &tl);
if (ret)
goto outd;
resp_levels = tl->rx.buf;
@@ -139,7 +139,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_handle *handle,
/* Retrieve domain attributes at first ... */
put_unaligned_le32(dom, td->tx.buf);
- ret = scmi_do_xfer(handle, td);
+ ret = ph->xops->do_xfer(ph, td);
/* Skip domain on comms error */
if (ret)
continue;
@@ -157,7 +157,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_handle *handle,
cmd->domain_id = cpu_to_le32(v->id);
cmd->level_index = desc_index;
- ret = scmi_do_xfer(handle, tl);
+ ret = ph->xops->do_xfer(ph, tl);
if (ret)
break;
@@ -176,7 +176,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_handle *handle,
}
if (desc_index + num_returned > v->num_levels) {
- dev_err(handle->dev,
+ dev_err(ph->dev,
"No. of voltage levels can't exceed %d\n",
v->num_levels);
ret = -EINVAL;
@@ -195,7 +195,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_handle *handle,
desc_index += num_returned;
- scmi_reset_rx_to_maxsz(handle, tl);
+ ph->xops->reset_rx_to_maxsz(ph, tl);
/* check both to avoid infinite loop due to buggy fw */
} while (num_returned && num_remaining);
@@ -204,55 +204,52 @@ static int scmi_voltage_descriptors_get(const struct scmi_handle *handle,
devm_kfree(dev, v->levels_uv);
}
- scmi_reset_rx_to_maxsz(handle, td);
+ ph->xops->reset_rx_to_maxsz(ph, td);
}
- scmi_xfer_put(handle, tl);
+ ph->xops->xfer_put(ph, tl);
outd:
- scmi_xfer_put(handle, td);
+ ph->xops->xfer_put(ph, td);
return ret;
}
-static int __scmi_voltage_get_u32(const struct scmi_handle *handle,
+static int __scmi_voltage_get_u32(const struct scmi_protocol_handle *ph,
u8 cmd_id, u32 domain_id, u32 *value)
{
int ret;
struct scmi_xfer *t;
- struct voltage_info *vinfo = handle->voltage_priv;
+ struct voltage_info *vinfo = ph->get_priv(ph);
if (domain_id >= vinfo->num_domains)
return -EINVAL;
- ret = scmi_xfer_get_init(handle, cmd_id,
- SCMI_PROTOCOL_VOLTAGE,
- sizeof(__le32), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(__le32), 0, &t);
if (ret)
return ret;
put_unaligned_le32(domain_id, t->tx.buf);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret)
*value = get_unaligned_le32(t->rx.buf);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_voltage_config_set(const struct scmi_handle *handle,
+static int scmi_voltage_config_set(const struct scmi_protocol_handle *ph,
u32 domain_id, u32 config)
{
int ret;
struct scmi_xfer *t;
- struct voltage_info *vinfo = handle->voltage_priv;
+ struct voltage_info *vinfo = ph->get_priv(ph);
struct scmi_msg_cmd_config_set *cmd;
if (domain_id >= vinfo->num_domains)
return -EINVAL;
- ret = scmi_xfer_get_init(handle, VOLTAGE_CONFIG_SET,
- SCMI_PROTOCOL_VOLTAGE,
- sizeof(*cmd), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, VOLTAGE_CONFIG_SET,
+ sizeof(*cmd), 0, &t);
if (ret)
return ret;
@@ -260,33 +257,32 @@ static int scmi_voltage_config_set(const struct scmi_handle *handle,
cmd->domain_id = cpu_to_le32(domain_id);
cmd->config = cpu_to_le32(config & GENMASK(3, 0));
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_voltage_config_get(const struct scmi_handle *handle,
+static int scmi_voltage_config_get(const struct scmi_protocol_handle *ph,
u32 domain_id, u32 *config)
{
- return __scmi_voltage_get_u32(handle, VOLTAGE_CONFIG_GET,
+ return __scmi_voltage_get_u32(ph, VOLTAGE_CONFIG_GET,
domain_id, config);
}
-static int scmi_voltage_level_set(const struct scmi_handle *handle,
+static int scmi_voltage_level_set(const struct scmi_protocol_handle *ph,
u32 domain_id, u32 flags, s32 volt_uV)
{
int ret;
struct scmi_xfer *t;
- struct voltage_info *vinfo = handle->voltage_priv;
+ struct voltage_info *vinfo = ph->get_priv(ph);
struct scmi_msg_cmd_level_set *cmd;
if (domain_id >= vinfo->num_domains)
return -EINVAL;
- ret = scmi_xfer_get_init(handle, VOLTAGE_LEVEL_SET,
- SCMI_PROTOCOL_VOLTAGE,
- sizeof(*cmd), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, VOLTAGE_LEVEL_SET,
+ sizeof(*cmd), 0, &t);
if (ret)
return ret;
@@ -295,23 +291,23 @@ static int scmi_voltage_level_set(const struct scmi_handle *handle,
cmd->flags = cpu_to_le32(flags);
cmd->voltage_level = cpu_to_le32(volt_uV);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_voltage_level_get(const struct scmi_handle *handle,
+static int scmi_voltage_level_get(const struct scmi_protocol_handle *ph,
u32 domain_id, s32 *volt_uV)
{
- return __scmi_voltage_get_u32(handle, VOLTAGE_LEVEL_GET,
+ return __scmi_voltage_get_u32(ph, VOLTAGE_LEVEL_GET,
domain_id, (u32 *)volt_uV);
}
static const struct scmi_voltage_info * __must_check
-scmi_voltage_info_get(const struct scmi_handle *handle, u32 domain_id)
+scmi_voltage_info_get(const struct scmi_protocol_handle *ph, u32 domain_id)
{
- struct voltage_info *vinfo = handle->voltage_priv;
+ struct voltage_info *vinfo = ph->get_priv(ph);
if (domain_id >= vinfo->num_domains ||
!vinfo->domains[domain_id].num_levels)
@@ -320,14 +316,14 @@ scmi_voltage_info_get(const struct scmi_handle *handle, u32 domain_id)
return vinfo->domains + domain_id;
}
-static int scmi_voltage_domains_num_get(const struct scmi_handle *handle)
+static int scmi_voltage_domains_num_get(const struct scmi_protocol_handle *ph)
{
- struct voltage_info *vinfo = handle->voltage_priv;
+ struct voltage_info *vinfo = ph->get_priv(ph);
return vinfo->num_domains;
}
-static struct scmi_voltage_ops voltage_ops = {
+static struct scmi_voltage_proto_ops voltage_proto_ops = {
.num_domains_get = scmi_voltage_domains_num_get,
.info_get = scmi_voltage_info_get,
.config_set = scmi_voltage_config_set,
@@ -336,45 +332,49 @@ static struct scmi_voltage_ops voltage_ops = {
.level_get = scmi_voltage_level_get,
};
-static int scmi_voltage_protocol_init(struct scmi_handle *handle)
+static int scmi_voltage_protocol_init(const struct scmi_protocol_handle *ph)
{
int ret;
u32 version;
struct voltage_info *vinfo;
- ret = scmi_version_get(handle, SCMI_PROTOCOL_VOLTAGE, &version);
+ ret = ph->xops->version_get(ph, &version);
if (ret)
return ret;
- dev_dbg(handle->dev, "Voltage Version %d.%d\n",
+ dev_dbg(ph->dev, "Voltage Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
- vinfo = devm_kzalloc(handle->dev, sizeof(*vinfo), GFP_KERNEL);
+ vinfo = devm_kzalloc(ph->dev, sizeof(*vinfo), GFP_KERNEL);
if (!vinfo)
return -ENOMEM;
vinfo->version = version;
- ret = scmi_protocol_attributes_get(handle, vinfo);
+ ret = scmi_protocol_attributes_get(ph, vinfo);
if (ret)
return ret;
if (vinfo->num_domains) {
- vinfo->domains = devm_kcalloc(handle->dev, vinfo->num_domains,
+ vinfo->domains = devm_kcalloc(ph->dev, vinfo->num_domains,
sizeof(*vinfo->domains),
GFP_KERNEL);
if (!vinfo->domains)
return -ENOMEM;
- ret = scmi_voltage_descriptors_get(handle, vinfo);
+ ret = scmi_voltage_descriptors_get(ph, vinfo);
if (ret)
return ret;
} else {
- dev_warn(handle->dev, "No Voltage domains found.\n");
+ dev_warn(ph->dev, "No Voltage domains found.\n");
}
- handle->voltage_ops = &voltage_ops;
- handle->voltage_priv = vinfo;
-
- return 0;
+ return ph->set_priv(ph, vinfo);
}
-DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_VOLTAGE, voltage)
+static const struct scmi_protocol scmi_voltage = {
+ .id = SCMI_PROTOCOL_VOLTAGE,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_voltage_protocol_init,
+ .ops = &voltage_proto_ops,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(voltage, scmi_voltage)
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index 08533ee67626..ff6569c4a53b 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -29,6 +29,10 @@
* The framework needs some proper extension to support multi power
* domain cases.
*
+ * Update: Genpd assigns the ->of_node for the virtual device before it
+ * invokes ->attach_dev() callback, hence parsing for device resources via
+ * DT should work fine.
+ *
* 2. It also breaks most of current drivers as the driver probe sequence
* behavior changed if removing ->power_on|off() callback and use
* ->start() and ->stop() instead. genpd_dev_pm_attach will only power
@@ -39,8 +43,11 @@
* domain enabled will trigger a HW access error. That means we need fix
* most drivers probe sequence with proper runtime pm.
*
- * In summary, we need fix above two issue before being able to switch to
- * the "single global power domain" way.
+ * Update: Runtime PM support isn't necessary. Instead, this can easily be
+ * fixed in drivers by adding a call to dev_pm_domain_start() during probe.
+ *
+ * In summary, the second part needs to be addressed via minor updates to the
+ * relevant drivers, before the "single global power domain" model can be used.
*
*/
@@ -86,6 +93,8 @@ struct imx_sc_pd_soc {
u8 num_ranges;
};
+static int imx_con_rsrc;
+
static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
/* LSIO SS */
{ "pwm", IMX_SC_R_PWM_0, 8, true, 0 },
@@ -134,7 +143,7 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "can", IMX_SC_R_CAN_0, 3, true, 0 },
{ "ftm", IMX_SC_R_FTM_0, 2, true, 0 },
{ "lpi2c", IMX_SC_R_I2C_0, 4, true, 0 },
- { "adc", IMX_SC_R_ADC_0, 1, true, 0 },
+ { "adc", IMX_SC_R_ADC_0, 2, true, 0 },
{ "lcd", IMX_SC_R_LCD_0, 1, true, 0 },
{ "lcd0-pwm", IMX_SC_R_LCD_0_PWM_0, 1, true, 0 },
{ "lpuart", IMX_SC_R_UART_0, 4, true, 0 },
@@ -207,6 +216,23 @@ to_imx_sc_pd(struct generic_pm_domain *genpd)
return container_of(genpd, struct imx_sc_pm_domain, pd);
}
+static void imx_sc_pd_get_console_rsrc(void)
+{
+ struct of_phandle_args specs;
+ int ret;
+
+ if (!of_stdout)
+ return;
+
+ ret = of_parse_phandle_with_args(of_stdout, "power-domains",
+ "#power-domain-cells",
+ 0, &specs);
+ if (ret)
+ return;
+
+ imx_con_rsrc = specs.args[0];
+}
+
static int imx_sc_pd_power(struct generic_pm_domain *domain, bool power_on)
{
struct imx_sc_msg_req_set_resource_power_mode msg;
@@ -267,6 +293,7 @@ imx_scu_add_pm_domain(struct device *dev, int idx,
const struct imx_sc_pd_range *pd_ranges)
{
struct imx_sc_pm_domain *sc_pd;
+ bool is_off = true;
int ret;
if (!imx_sc_rm_is_resource_owned(pm_ipc_handle, pd_ranges->rsrc + idx))
@@ -288,6 +315,10 @@ imx_scu_add_pm_domain(struct device *dev, int idx,
"%s", pd_ranges->name);
sc_pd->pd.name = sc_pd->name;
+ if (imx_con_rsrc == sc_pd->rsrc) {
+ sc_pd->pd.flags = GENPD_FLAG_RPM_ALWAYS_ON;
+ is_off = false;
+ }
if (sc_pd->rsrc >= IMX_SC_R_LAST) {
dev_warn(dev, "invalid pd %s rsrc id %d found",
@@ -297,7 +328,7 @@ imx_scu_add_pm_domain(struct device *dev, int idx,
return NULL;
}
- ret = pm_genpd_init(&sc_pd->pd, NULL, true);
+ ret = pm_genpd_init(&sc_pd->pd, NULL, is_off);
if (ret) {
dev_warn(dev, "failed to init pd %s rsrc id %d",
sc_pd->name, sc_pd->rsrc);
@@ -363,6 +394,8 @@ static int imx_sc_pd_probe(struct platform_device *pdev)
if (!pd_soc)
return -ENODEV;
+ imx_sc_pd_get_console_rsrc();
+
return imx_scu_init_pm_domains(&pdev->dev, pd_soc);
}
diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c
index eba6b60bfb61..1829ba220576 100644
--- a/drivers/firmware/qcom_scm-legacy.c
+++ b/drivers/firmware/qcom_scm-legacy.c
@@ -118,7 +118,7 @@ static void __scm_legacy_do(const struct arm_smccc_args *smc,
}
/**
- * qcom_scm_call() - Sends a command to the SCM and waits for the command to
+ * scm_legacy_call() - Sends a command to the SCM and waits for the command to
* finish processing.
*
* A note on cache maintenance:
@@ -209,7 +209,7 @@ out:
(n & 0xf))
/**
- * qcom_scm_call_atomic() - Send an atomic SCM command with up to 5 arguments
+ * scm_legacy_call_atomic() - Send an atomic SCM command with up to 5 arguments
* and 3 return values
* @desc: SCM call descriptor containing arguments
* @res: SCM call return values
diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c
index 497c13ba98d6..d111833364ba 100644
--- a/drivers/firmware/qcom_scm-smc.c
+++ b/drivers/firmware/qcom_scm-smc.c
@@ -77,8 +77,10 @@ static void __scm_smc_do(const struct arm_smccc_args *smc,
} while (res->a0 == QCOM_SCM_V2_EBUSY);
}
-int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
- struct qcom_scm_res *res, bool atomic)
+
+int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+ enum qcom_scm_convention qcom_convention,
+ struct qcom_scm_res *res, bool atomic)
{
int arglen = desc->arginfo & 0xf;
int i;
@@ -87,9 +89,8 @@ int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
size_t alloc_len;
gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
- u32 qcom_smccc_convention =
- (qcom_scm_convention == SMC_CONVENTION_ARM_32) ?
- ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
+ u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ?
+ ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
struct arm_smccc_res smc_res;
struct arm_smccc_args smc = {0};
@@ -148,4 +149,5 @@ int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
}
return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
+
}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index f57779fc7ee9..ee9cb545e73b 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -113,14 +113,10 @@ static void qcom_scm_clk_disable(void)
clk_disable_unprepare(__scm->bus_clk);
}
-static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
- u32 cmd_id);
+enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
+static DEFINE_SPINLOCK(scm_query_lock);
-enum qcom_scm_convention qcom_scm_convention;
-static bool has_queried __read_mostly;
-static DEFINE_SPINLOCK(query_lock);
-
-static void __query_convention(void)
+static enum qcom_scm_convention __get_convention(void)
{
unsigned long flags;
struct qcom_scm_desc desc = {
@@ -133,36 +129,50 @@ static void __query_convention(void)
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
+ enum qcom_scm_convention probed_convention;
int ret;
+ bool forced = false;
- spin_lock_irqsave(&query_lock, flags);
- if (has_queried)
- goto out;
+ if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
+ return qcom_scm_convention;
- qcom_scm_convention = SMC_CONVENTION_ARM_64;
- // Device isn't required as there is only one argument - no device
- // needed to dma_map_single to secure world
- ret = scm_smc_call(NULL, &desc, &res, true);
+ /*
+ * Device isn't required as there is only one argument - no device
+ * needed to dma_map_single to secure world
+ */
+ probed_convention = SMC_CONVENTION_ARM_64;
+ ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
if (!ret && res.result[0] == 1)
- goto out;
+ goto found;
+
+ /*
+ * Some SC7180 firmwares didn't implement the
+ * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
+ * calling conventions on these firmwares. Luckily we don't make any
+ * early calls into the firmware on these SoCs so the device pointer
+ * will be valid here to check if the compatible matches.
+ */
+ if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
+ forced = true;
+ goto found;
+ }
- qcom_scm_convention = SMC_CONVENTION_ARM_32;
- ret = scm_smc_call(NULL, &desc, &res, true);
+ probed_convention = SMC_CONVENTION_ARM_32;
+ ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
if (!ret && res.result[0] == 1)
- goto out;
-
- qcom_scm_convention = SMC_CONVENTION_LEGACY;
-out:
- has_queried = true;
- spin_unlock_irqrestore(&query_lock, flags);
- pr_info("qcom_scm: convention: %s\n",
- qcom_scm_convention_names[qcom_scm_convention]);
-}
+ goto found;
+
+ probed_convention = SMC_CONVENTION_LEGACY;
+found:
+ spin_lock_irqsave(&scm_query_lock, flags);
+ if (probed_convention != qcom_scm_convention) {
+ qcom_scm_convention = probed_convention;
+ pr_info("qcom_scm: convention: %s%s\n",
+ qcom_scm_convention_names[qcom_scm_convention],
+ forced ? " (forced)" : "");
+ }
+ spin_unlock_irqrestore(&scm_query_lock, flags);
-static inline enum qcom_scm_convention __get_convention(void)
-{
- if (unlikely(!has_queried))
- __query_convention();
return qcom_scm_convention;
}
@@ -219,8 +229,8 @@ static int qcom_scm_call_atomic(struct device *dev,
}
}
-static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
- u32 cmd_id)
+static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+ u32 cmd_id)
{
int ret;
struct qcom_scm_desc desc = {
@@ -247,7 +257,7 @@ static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
ret = qcom_scm_call(dev, &desc, &res);
- return ret ? : res.result[0];
+ return ret ? false : !!res.result[0];
}
/**
@@ -585,9 +595,8 @@ bool qcom_scm_pas_supported(u32 peripheral)
};
struct qcom_scm_res res;
- ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
- QCOM_SCM_PIL_PAS_IS_SUPPORTED);
- if (ret <= 0)
+ if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PIL_PAS_IS_SUPPORTED))
return false;
ret = qcom_scm_call(__scm->dev, &desc, &res);
@@ -1060,17 +1069,18 @@ EXPORT_SYMBOL(qcom_scm_ice_set_key);
*/
bool qcom_scm_hdcp_available(void)
{
+ bool avail;
int ret = qcom_scm_clk_enable();
if (ret)
return ret;
- ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
+ avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
QCOM_SCM_HDCP_INVOKE);
qcom_scm_clk_disable();
- return ret > 0;
+ return avail;
}
EXPORT_SYMBOL(qcom_scm_hdcp_available);
@@ -1242,7 +1252,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
__scm = scm;
__scm->dev = &pdev->dev;
- __query_convention();
+ __get_convention();
/*
* If requested enable "download mode", from this point on warmboot
@@ -1291,6 +1301,7 @@ static struct platform_driver qcom_scm_driver = {
.driver = {
.name = "qcom_scm",
.of_match_table = qcom_scm_dt_match,
+ .suppress_bind_attrs = true,
},
.probe = qcom_scm_probe,
.shutdown = qcom_scm_shutdown,
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 95cd1ac30ab0..632fe3142462 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -61,8 +61,11 @@ struct qcom_scm_res {
};
#define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
-extern int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
- struct qcom_scm_res *res, bool atomic);
+extern int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+ enum qcom_scm_convention qcom_convention,
+ struct qcom_scm_res *res, bool atomic);
+#define scm_smc_call(dev, desc, res, atomic) \
+ __scm_smc_call((dev), (desc), qcom_scm_convention, (res), (atomic))
#define SCM_LEGACY_FNID(s, c) (((s) << 10) | ((c) & 0x3ff))
extern int scm_legacy_call_atomic(struct device *dev,
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index 30259dc9b805..250e01680742 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -7,6 +7,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/kref.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of_platform.h>
@@ -27,6 +28,8 @@ struct rpi_firmware {
struct mbox_chan *chan; /* The property channel. */
struct completion c;
u32 enabled;
+
+ struct kref consumers;
};
static DEFINE_MUTEX(transaction_lock);
@@ -225,12 +228,38 @@ static void rpi_register_clk_driver(struct device *dev)
-1, NULL, 0);
}
+static void rpi_firmware_delete(struct kref *kref)
+{
+ struct rpi_firmware *fw = container_of(kref, struct rpi_firmware,
+ consumers);
+
+ mbox_free_channel(fw->chan);
+ kfree(fw);
+}
+
+void rpi_firmware_put(struct rpi_firmware *fw)
+{
+ kref_put(&fw->consumers, rpi_firmware_delete);
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_put);
+
+static void devm_rpi_firmware_put(void *data)
+{
+ struct rpi_firmware *fw = data;
+
+ rpi_firmware_put(fw);
+}
+
static int rpi_firmware_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rpi_firmware *fw;
- fw = devm_kzalloc(dev, sizeof(*fw), GFP_KERNEL);
+ /*
+ * Memory will be freed by rpi_firmware_delete() once all users have
+ * released their firmware handles. Don't use devm_kzalloc() here.
+ */
+ fw = kzalloc(sizeof(*fw), GFP_KERNEL);
if (!fw)
return -ENOMEM;
@@ -247,6 +276,7 @@ static int rpi_firmware_probe(struct platform_device *pdev)
}
init_completion(&fw->c);
+ kref_init(&fw->consumers);
platform_set_drvdata(pdev, fw);
@@ -275,7 +305,8 @@ static int rpi_firmware_remove(struct platform_device *pdev)
rpi_hwmon = NULL;
platform_device_unregister(rpi_clk);
rpi_clk = NULL;
- mbox_free_channel(fw->chan);
+
+ rpi_firmware_put(fw);
return 0;
}
@@ -284,19 +315,51 @@ static int rpi_firmware_remove(struct platform_device *pdev)
* rpi_firmware_get - Get pointer to rpi_firmware structure.
* @firmware_node: Pointer to the firmware Device Tree node.
*
+ * The reference to rpi_firmware has to be released with rpi_firmware_put().
+ *
* Returns NULL is the firmware device is not ready.
*/
struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
{
struct platform_device *pdev = of_find_device_by_node(firmware_node);
+ struct rpi_firmware *fw;
if (!pdev)
return NULL;
- return platform_get_drvdata(pdev);
+ fw = platform_get_drvdata(pdev);
+ if (!fw)
+ return NULL;
+
+ if (!kref_get_unless_zero(&fw->consumers))
+ return NULL;
+
+ return fw;
}
EXPORT_SYMBOL_GPL(rpi_firmware_get);
+/**
+ * devm_rpi_firmware_get - Get pointer to rpi_firmware structure.
+ * @firmware_node: Pointer to the firmware Device Tree node.
+ *
+ * Returns NULL is the firmware device is not ready.
+ */
+struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
+ struct device_node *firmware_node)
+{
+ struct rpi_firmware *fw;
+
+ fw = rpi_firmware_get(firmware_node);
+ if (!fw)
+ return NULL;
+
+ if (devm_add_action_or_reset(dev, devm_rpi_firmware_put, fw))
+ return NULL;
+
+ return fw;
+}
+EXPORT_SYMBOL_GPL(devm_rpi_firmware_get);
+
static const struct of_device_id rpi_firmware_of_match[] = {
{ .compatible = "raspberrypi,bcm2835-firmware", },
{},
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 7eb9958662dd..83082e2f2e44 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -2,7 +2,7 @@
/*
* Xilinx Zynq MPSoC Firmware layer
*
- * Copyright (C) 2014-2020 Xilinx, Inc.
+ * Copyright (C) 2014-2021 Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -1280,12 +1280,13 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
static int zynqmp_firmware_remove(struct platform_device *pdev)
{
struct pm_api_feature_data *feature_data;
+ struct hlist_node *tmp;
int i;
mfd_remove_devices(&pdev->dev);
zynqmp_pm_api_debugfs_exit();
- hash_for_each(pm_api_features_map, i, feature_data, hentry) {
+ hash_for_each_safe(pm_api_features_map, i, tmp, feature_data, hentry) {
hash_del(&feature_data->hentry);
kfree(feature_data);
}