summaryrefslogtreecommitdiffstats
path: root/drivers/firmware/arm_scmi/driver.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firmware/arm_scmi/driver.c')
-rw-r--r--drivers/firmware/arm_scmi/driver.c1632
1 files changed, 1060 insertions, 572 deletions
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index ef3d76b3f4..e602f7a440 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -14,62 +14,53 @@
* Copyright (C) 2018-2021 ARM Ltd.
*/
-#define pr_fmt(fmt) "SCMI DRIVER - " fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <common.h>
#include <linux/bitmap.h>
#include <driver.h>
#include <linux/export.h>
-#include <io.h>
+#include <linux/notifier.h>
+#include <linux/io.h>
+#include <io-64-nonatomic-hi-lo.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <of_address.h>
#include <of_device.h>
#include <linux/slab.h>
#include <linux/idr.h>
+#include <linux/mutex.h>
#include <linux/processor.h>
#include "common.h"
-enum scmi_error_codes {
- SCMI_SUCCESS = 0, /* Success */
- SCMI_ERR_SUPPORT = -1, /* Not supported */
- SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
- SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
- SCMI_ERR_ENTRY = -4, /* Not found */
- SCMI_ERR_RANGE = -5, /* Value out of range */
- SCMI_ERR_BUSY = -6, /* Device busy */
- SCMI_ERR_COMMS = -7, /* Communication Error */
- SCMI_ERR_GENERIC = -8, /* Generic Error */
- SCMI_ERR_HARDWARE = -9, /* Hardware Error */
- SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
- SCMI_ERR_MAX
-};
+static DEFINE_IDR(scmi_protocols);
+static DEFINE_SPINLOCK(protocol_lock);
/* List of all SCMI devices active in system */
static LIST_HEAD(scmi_list);
/* Protection for the entire list */
+static DEFINE_MUTEX(scmi_list_mutex);
/* Track the unique id for the transfers for debug & profiling purpose */
-static unsigned transfer_last_id;
-
-static DEFINE_IDR(scmi_requested_devices);
-
-struct scmi_requested_dev {
- const struct scmi_device_id *id_table;
- struct list_head node;
-};
+static atomic_t transfer_last_id;
/**
* struct scmi_xfers_info - Structure to manage transfer information
*
- * @xfer_block: Preallocated Message array
* @xfer_alloc_table: Bitmap table for allocated messages.
* Index of this bitmap table is also used for message
* sequence identifier.
+ * @xfer_lock: Protection for message allocation
+ * @max_msg: Maximum number of messages that can be pending
+ * @free_xfers: A free list for available to use xfers. It is initialized with
+ * a number of xfers equal to the maximum allowed in-flight
+ * messages.
*/
struct scmi_xfers_info {
- struct scmi_xfer *xfer_block;
unsigned long *xfer_alloc_table;
+ spinlock_t xfer_lock;
+ int max_msg;
+ struct hlist_head free_xfers;
};
/**
@@ -87,7 +78,7 @@ struct scmi_xfers_info {
struct scmi_protocol_instance {
const struct scmi_handle *handle;
const struct scmi_protocol *proto;
- int users;
+ refcount_t users;
void *priv;
struct scmi_protocol_handle ph;
};
@@ -97,6 +88,7 @@ struct scmi_protocol_instance {
/**
* struct scmi_info - Structure representing a SCMI instance
*
+ * @id: A sequence number starting from zero identifying this instance
* @dev: Device pointer
* @desc: SoC description for this instance
* @version: SCMI revision information containing protocol version,
@@ -109,15 +101,26 @@ struct scmi_protocol_instance {
* @protocols: IDR for protocols' instance descriptors initialized for
* this SCMI instance: populated on protocol's first attempted
* usage.
+ * @protocols_mtx: A mutex to protect protocols instances initialization.
* @protocols_imp: List of protocols implemented, currently maximum of
- * MAX_PROTOCOLS_IMP elements allocated by the base protocol
+ * scmi_revision_info.num_protocols elements allocated by the
+ * base protocol
* @active_protocols: IDR storing device_nodes for protocols actually defined
* in the DT and confirmed as implemented by fw.
+ * @atomic_threshold: Optional system wide DT-configured threshold, expressed
+ * in microseconds, for atomic operations.
+ * Only SCMI synchronous commands reported by the platform
+ * to have an execution latency lesser-equal to the threshold
+ * should be considered for atomic mode operation: such
+ * decision is finally left up to the SCMI drivers.
* @node: List head
* @users: Number of users of this instance
+ * @dev_req_nb: A notifier to listen for device request/unrequest on the scmi
+ * bus
+ * @devreq_mtx: A mutex to serialize device creation for this SCMI instance
*/
struct scmi_info {
- struct device_d *dev;
+ struct device *dev;
const struct scmi_desc *desc;
struct scmi_revision_info version;
struct scmi_handle handle;
@@ -127,176 +130,340 @@ struct scmi_info {
struct idr rx_idr;
struct idr protocols;
/* Ensure mutual exclusive access to protocols instance array */
+ struct mutex protocols_mtx;
u8 *protocols_imp;
struct idr active_protocols;
+ unsigned int atomic_threshold;
struct list_head node;
- int users;
+ struct notifier_block dev_req_nb;
+ /* Serialize device creation process for this instance */
+ struct mutex devreq_mtx;
};
#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
+#define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb)
-static const int scmi_linux_errmap[] = {
- /* better than switch case as long as return value is continuous */
- 0, /* SCMI_SUCCESS */
- -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
- -EINVAL, /* SCMI_ERR_PARAM */
- -EACCES, /* SCMI_ERR_ACCESS */
- -ENOENT, /* SCMI_ERR_ENTRY */
- -ERANGE, /* SCMI_ERR_RANGE */
- -EBUSY, /* SCMI_ERR_BUSY */
- -ECOMM, /* SCMI_ERR_COMMS */
- -EIO, /* SCMI_ERR_GENERIC */
- -EREMOTEIO, /* SCMI_ERR_HARDWARE */
- -EPROTO, /* SCMI_ERR_PROTOCOL */
-};
+static const struct scmi_protocol *scmi_protocol_get(int protocol_id)
+{
+ const struct scmi_protocol *proto;
+
+ proto = idr_find(&scmi_protocols, protocol_id);
+ if (!proto) {
+ pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
+ return NULL;
+ }
+
+ pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
-static inline int scmi_to_linux_errno(int errno)
+ return proto;
+}
+
+int scmi_protocol_register(const struct scmi_protocol *proto)
{
- if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
- return scmi_linux_errmap[-errno];
- return -EIO;
+ int ret;
+
+ if (!proto) {
+ pr_err("invalid protocol\n");
+ return -EINVAL;
+ }
+
+ if (!proto->instance_init) {
+ pr_err("missing init for protocol 0x%x\n", proto->id);
+ return -EINVAL;
+ }
+
+ spin_lock(&protocol_lock);
+ ret = idr_alloc_one(&scmi_protocols, (void *)proto, proto->id);
+ spin_unlock(&protocol_lock);
+ if (ret != proto->id) {
+ pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
+ proto->id, ret);
+ return ret;
+ }
+
+ pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(scmi_protocol_register);
/**
- * scmi_dump_header_dbg() - Helper to dump a message header.
+ * scmi_create_protocol_devices - Create devices for all pending requests for
+ * this SCMI instance.
*
- * @dev: Device pointer corresponding to the SCMI entity
- * @hdr: pointer to header.
+ * @np: The device node describing the protocol
+ * @info: The SCMI instance descriptor
+ * @prot_id: The protocol ID
+ * @name: The optional name of the device to be created: if not provided this
+ * call will lead to the creation of all the devices currently requested
+ * for the specified protocol.
*/
-static inline void scmi_dump_header_dbg(struct device_d *dev,
- struct scmi_msg_hdr *hdr)
+static void scmi_create_protocol_devices(struct device_node *np,
+ struct scmi_info *info,
+ int prot_id, const char *name)
{
- dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
- hdr->id, hdr->seq, hdr->protocol_id);
+ struct scmi_device *sdev;
+
+ mutex_lock(&info->devreq_mtx);
+ sdev = scmi_device_create(np, info->dev, prot_id, name);
+ if (name && !sdev)
+ dev_err(info->dev,
+ "failed to create device for protocol 0x%X (%s)\n",
+ prot_id, name);
+ mutex_unlock(&info->devreq_mtx);
+}
+
+static void scmi_destroy_protocol_devices(struct scmi_info *info,
+ int prot_id, const char *name)
+{
+ mutex_lock(&info->devreq_mtx);
+ scmi_device_destroy(info->dev, prot_id, name);
+ mutex_unlock(&info->devreq_mtx);
}
/**
- * scmi_xfer_get() - Allocate one message
+ * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
*
- * @handle: Pointer to SCMI entity handle
* @minfo: Pointer to Tx/Rx Message management info based on channel type
+ * @xfer: The xfer to act upon
*
- * Helper function which is used by various message functions that are
- * exposed to clients of this driver for allocating a message traffic event.
+ * Pick the next unused monotonically increasing token and set it into
+ * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
+ * reuse of freshly completed or timed-out xfers, thus mitigating the risk
+ * of incorrect association of a late and expired xfer with a live in-flight
+ * transaction, both happening to re-use the same token identifier.
+ *
+ * Since platform is NOT required to answer our request in-order we should
+ * account for a few rare but possible scenarios:
+ *
+ * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
+ * using find_next_zero_bit() starting from candidate next_token bit
+ *
+ * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
+ * are plenty of free tokens at start, so try a second pass using
+ * find_next_zero_bit() and starting from 0.
+ *
+ * X = used in-flight
+ *
+ * Normal
+ * ------
+ *
+ * |- xfer_id picked
+ * -----------+----------------------------------------------------------
+ * | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
+ * ----------------------------------------------------------------------
+ * ^
+ * |- next_token
+ *
+ * Out-of-order pending at start
+ * -----------------------------
+ *
+ * |- xfer_id picked, last_token fixed
+ * -----+----------------------------------------------------------------
+ * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
+ * ----------------------------------------------------------------------
+ * ^
+ * |- next_token
+ *
+ *
+ * Out-of-order pending at end
+ * ---------------------------
+ *
+ * |- xfer_id picked, last_token fixed
+ * -----+----------------------------------------------------------------
+ * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
+ * ----------------------------------------------------------------------
+ * ^
+ * |- next_token
*
- * This function can sleep depending on pending requests already in the system
- * for the SCMI entity.
+ * Context: Assumes to be called with @xfer_lock already acquired.
*
- * Return: 0 if all went fine, else corresponding error.
+ * Return: 0 on Success or error
*/
-static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
- struct scmi_xfers_info *minfo)
+static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
+ struct scmi_xfer *xfer)
{
- u16 xfer_id;
- struct scmi_xfer *xfer;
- unsigned long bit_pos;
- struct scmi_info *info = handle_to_scmi_info(handle);
+ unsigned long xfer_id, next_token;
- bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
- info->desc->max_msg);
- if (bit_pos == info->desc->max_msg)
- return ERR_PTR(-ENOMEM);
- set_bit(bit_pos, minfo->xfer_alloc_table);
+ /*
+ * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
+ * using the pre-allocated transfer_id as a base.
+ * Note that the global transfer_id is shared across all message types
+ * so there could be holes in the allocated set of monotonic sequence
+ * numbers, but that is going to limit the effectiveness of the
+ * mitigation only in very rare limit conditions.
+ */
+ next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
- xfer_id = bit_pos;
+ /* Pick the next available xfer_id >= next_token */
+ xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
+ MSG_TOKEN_MAX, next_token);
+ if (xfer_id == MSG_TOKEN_MAX) {
+ /*
+ * After heavily out-of-order responses, there are no free
+ * tokens ahead, but only at start of xfer_alloc_table so
+ * try again from the beginning.
+ */
+ xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
+ MSG_TOKEN_MAX, 0);
+ /*
+ * Something is wrong if we got here since there can be a
+ * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
+ * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
+ */
+ if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
+ return -ENOMEM;
+ }
- xfer = &minfo->xfer_block[xfer_id];
- xfer->hdr.seq = xfer_id;
- xfer->done = false;
- xfer->transfer_id = ++transfer_last_id;
+ /* Update +/- last_token accordingly if we skipped some hole */
+ if (xfer_id != next_token)
+ atomic_add((int)(xfer_id - next_token), &transfer_last_id);
- return xfer;
+ xfer->hdr.seq = (u16)xfer_id;
+
+ return 0;
}
/**
- * __scmi_xfer_put() - Release a message
+ * scmi_xfer_token_clear - Release the token
*
* @minfo: Pointer to Tx/Rx Message management info based on channel type
- * @xfer: message that was reserved by scmi_xfer_get
+ * @xfer: The xfer to act upon
*/
-static void
-__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
+static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
+ struct scmi_xfer *xfer)
{
clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
}
-static void scmi_handle_response(struct scmi_chan_info *cinfo,
- u16 xfer_id, u8 msg_type)
+/**
+ * scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
+ *
+ * @xfer: The xfer to register
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Note that this helper assumes that the xfer to be registered as in-flight
+ * had been built using an xfer sequence number which still corresponds to a
+ * free slot in the xfer_alloc_table.
+ *
+ * Context: Assumes to be called with @xfer_lock already acquired.
+ */
+static inline void
+scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
+ struct scmi_xfers_info *minfo)
+{
+ /* Set in-flight */
+ set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
+ xfer->pending = true;
+}
+
+/**
+ * scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
+ * as pending in-flight
+ *
+ * @xfer: The xfer to act upon
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Return: 0 on Success or error otherwise
+ */
+static inline int scmi_xfer_pending_set(struct scmi_xfer *xfer,
+ struct scmi_xfers_info *minfo)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ /* Set a new monotonic token as the xfer sequence number */
+ ret = scmi_xfer_token_set(minfo, xfer);
+ if (!ret)
+ scmi_xfer_inflight_register_unlocked(xfer, minfo);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ return ret;
+}
+
+/**
+ * scmi_xfer_get() - Allocate one message
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Helper function which is used by various message functions that are
+ * exposed to clients of this driver for allocating a message traffic event.
+ *
+ * Picks an xfer from the free list @free_xfers (if any available) and perform
+ * a basic initialization.
+ *
+ * Note that, at this point, still no sequence number is assigned to the
+ * allocated xfer, nor it is registered as a pending transaction.
+ *
+ * The successfully initialized xfer is refcounted.
+ *
+ * Context: Holds @xfer_lock while manipulating @free_xfers.
+ *
+ * Return: An initialized xfer if all went fine, else pointer error.
+ */
+static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
+ struct scmi_xfers_info *minfo)
{
+ unsigned long flags;
struct scmi_xfer *xfer;
- struct device_d *dev = cinfo->dev;
- struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
- struct scmi_xfers_info *minfo = &info->tx_minfo;
- /* Are we even expecting this? */
- if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
- dev_err(dev, "message for %d is not expected!\n", xfer_id);
- info->desc->ops->clear_channel(cinfo);
- return;
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ if (hlist_empty(&minfo->free_xfers)) {
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+ return ERR_PTR(-ENOMEM);
}
- xfer = &minfo->xfer_block[xfer_id];
+ /* grab an xfer from the free_list */
+ xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
+ hlist_del_init(&xfer->node);
+
/*
- * Even if a response was indeed expected on this slot at this point,
- * a buggy platform could wrongly reply feeding us an unexpected
- * delayed response we're not prepared to handle: bail-out safely
- * blaming firmware.
+ * Allocate transfer_id early so that can be used also as base for
+ * monotonic sequence number generation if needed.
*/
- if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
- dev_err(dev,
- "Delayed Response for %d not expected! Buggy F/W ?\n",
- xfer_id);
- info->desc->ops->clear_channel(cinfo);
- /* It was unexpected, so nobody will clear the xfer if not us */
- __scmi_xfer_put(minfo, xfer);
- return;
- }
-
- scmi_dump_header_dbg(dev, &xfer->hdr);
+ xfer->transfer_id = atomic_inc_return(&transfer_last_id);
- info->desc->ops->fetch_response(cinfo, xfer);
+ refcount_set(&xfer->users, 1);
+ atomic_set(&xfer->busy, SCMI_XFER_FREE);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
- if (msg_type == MSG_TYPE_DELAYED_RESP) {
- info->desc->ops->clear_channel(cinfo);
- *xfer->async_done = true;
- } else {
- xfer->done = true;
- }
+ return xfer;
}
/**
- * scmi_rx_callback() - callback for receiving messages
+ * __scmi_xfer_put() - Release a message
*
- * @cinfo: SCMI channel info
- * @msg_hdr: Message header
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ * @xfer: message that was reserved by scmi_xfer_get
*
- * Processes one received message to appropriate transfer information and
- * signals completion of the transfer.
+ * After refcount check, possibly release an xfer, clearing the token slot,
+ * removing xfer from @pending_xfers and putting it back into free_xfers.
*
- * NOTE: This function will be invoked in IRQ context, hence should be
- * as optimal as possible.
+ * This holds a spinlock to maintain integrity of internal data structures.
*/
-void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
+static void
+__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
{
- u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
- u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
+ unsigned long flags;
- switch (msg_type) {
- case MSG_TYPE_COMMAND:
- case MSG_TYPE_DELAYED_RESP:
- scmi_handle_response(cinfo, xfer_id, msg_type);
- break;
- default:
- WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
- break;
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ if (refcount_dec_and_test(&xfer->users)) {
+ if (xfer->pending) {
+ scmi_xfer_token_clear(minfo, xfer);
+ xfer->pending = false;
+ }
+ hlist_add_head(&xfer->node, &minfo->free_xfers);
}
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
}
/**
* xfer_put() - Release a transmit message
*
* @ph: Pointer to SCMI protocol handle
- * @xfer: message that was reserved by scmi_xfer_get
+ * @xfer: message that was reserved by xfer_get_init
*/
static void xfer_put(const struct scmi_protocol_handle *ph,
struct scmi_xfer *xfer)
@@ -307,6 +474,46 @@ static void xfer_put(const struct scmi_protocol_handle *ph,
__scmi_xfer_put(&info->tx_minfo, xfer);
}
+static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
+ struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer, unsigned int timeout_ms)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ /*
+ * Do not fetch_response if an out-of-order delayed
+ * response is being processed.
+ */
+ spin_lock_irqsave(&xfer->lock, flags);
+ if (xfer->state == SCMI_XFER_SENT_OK) {
+ desc->ops->fetch_response(cinfo, xfer);
+ xfer->state = SCMI_XFER_RESP_OK;
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+
+ return ret;
+}
+
+/**
+ * scmi_wait_for_message_response - An helper to group all the possible ways of
+ * waiting for a synchronous message response.
+ *
+ * @cinfo: SCMI channel info
+ * @xfer: Reference to the transfer being waited for.
+ *
+ * Return: 0 on Success, error otherwise.
+ */
+static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct device *dev = info->dev;
+
+ return scmi_wait_for_reply(dev, info->desc, cinfo, xfer,
+ info->desc->max_rx_timeout_ms);
+}
+
/**
* do_xfer() - Do one transfer
*
@@ -323,20 +530,30 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
int ret;
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
struct scmi_info *info = handle_to_scmi_info(pi->handle);
- struct device_d *dev = info->dev;
+ struct device *dev = info->dev;
struct scmi_chan_info *cinfo;
- u64 start;
+
+ /* Check for polling request on custom command xfers at first */
+ if (!is_transport_polling_capable(info->desc)) {
+ dev_warn_once(dev,
+ "Polling mode is not supported by transport.\n");
+ return -EINVAL;
+ }
+
+ cinfo = idr_find(&info->tx_idr, pi->proto->id);
+ if (unlikely(!cinfo))
+ return -EINVAL;
/*
- * Re-instate protocol id here from protocol handle so that cannot be
+ * Initialise protocol id now from protocol handle to avoid it being
* overridden by mistake (or malice) by the protocol code mangling with
- * the scmi_xfer structure.
+ * the scmi_xfer structure prior to this.
*/
xfer->hdr.protocol_id = pi->proto->id;
- cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
- if (unlikely(!cinfo))
- return -EINVAL;
+ /* Clear any stale status */
+ xfer->hdr.status = SCMI_SUCCESS;
+ xfer->state = SCMI_XFER_SENT_OK;
ret = info->desc->ops->send_message(cinfo, xfer);
if (ret < 0) {
@@ -344,21 +561,12 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
return ret;
}
- /* And we wait for the response. */
- start = get_time_ns();
- while (!xfer->done) {
- if (is_timeout(start, info->desc->max_rx_timeout_ms * (u64)NSEC_PER_MSEC)) {
- dev_err(dev, "timed out in resp(caller: %pS)\n", (void *)_RET_IP_);
- ret = -ETIMEDOUT;
- break;
- }
- }
-
+ ret = scmi_wait_for_message_response(cinfo, xfer);
if (!ret && xfer->hdr.status)
ret = scmi_to_linux_errno(xfer->hdr.status);
if (info->desc->ops->mark_txdone)
- info->desc->ops->mark_txdone(cinfo, ret);
+ info->desc->ops->mark_txdone(cinfo, ret, xfer);
return ret;
}
@@ -372,47 +580,6 @@ static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
xfer->rx.len = info->desc->max_msg_size;
}
-#define SCMI_MAX_RESPONSE_TIMEOUT_NS (2 * NSEC_PER_SEC)
-
-/**
- * do_xfer_with_response() - Do one transfer and wait until the delayed
- * response is received
- *
- * @ph: Pointer to SCMI protocol handle
- * @xfer: Transfer to initiate and wait for response
- *
- * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
- * return corresponding error, else if all goes well, return 0.
- */
-static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
- struct scmi_xfer *xfer)
-{
- int ret;
- const struct scmi_protocol_instance *pi = ph_to_pi(ph);
- bool async_response = false;
- u64 start;
-
- xfer->hdr.protocol_id = pi->proto->id;
-
- xfer->async_done = &async_response;
-
- ret = do_xfer(ph, xfer);
- if (ret)
- goto out;
-
- start = get_time_ns();
- while (!*xfer->async_done) {
- if (is_timeout(start, SCMI_MAX_RESPONSE_TIMEOUT_NS)) {
- ret = -ETIMEDOUT;
- break;
- }
- }
-
-out:
- xfer->async_done = NULL;
- return ret;
-}
-
/**
* xfer_get_init() - Allocate and initialise one message for transmit
*
@@ -437,7 +604,7 @@ static int xfer_get_init(const struct scmi_protocol_handle *ph,
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
struct scmi_info *info = handle_to_scmi_info(pi->handle);
struct scmi_xfers_info *minfo = &info->tx_minfo;
- struct device_d *dev = info->dev;
+ struct device *dev = info->dev;
/* Ensure we have sane transfer sizes */
if (rx_size > info->desc->max_msg_size ||
@@ -451,11 +618,19 @@ static int xfer_get_init(const struct scmi_protocol_handle *ph,
return ret;
}
+ /* Pick a sequence number and register this xfer as in-flight */
+ ret = scmi_xfer_pending_set(xfer, minfo);
+ if (ret) {
+ dev_err(pi->handle->dev,
+ "Failed to get monotonic token %d\n", ret);
+ __scmi_xfer_put(minfo, xfer);
+ return ret;
+ }
+
xfer->tx.len = tx_size;
xfer->rx.len = rx_size ? : info->desc->max_msg_size;
+ xfer->hdr.type = MSG_TYPE_COMMAND;
xfer->hdr.id = msg_id;
- xfer->hdr.protocol_id = pi->proto->id;
- xfer->hdr.poll_completion = false;
*p = xfer;
@@ -529,10 +704,335 @@ static const struct scmi_xfer_ops xfer_ops = {
.xfer_get_init = xfer_get_init,
.reset_rx_to_maxsz = reset_rx_to_maxsz,
.do_xfer = do_xfer,
- .do_xfer_with_response = do_xfer_with_response,
.xfer_put = xfer_put,
};
+struct scmi_msg_resp_domain_name_get {
+ __le32 flags;
+ u8 name[SCMI_MAX_STR_SIZE];
+};
+
+/**
+ * scmi_common_extended_name_get - Common helper to get extended resources name
+ * @ph: A protocol handle reference.
+ * @cmd_id: The specific command ID to use.
+ * @res_id: The specific resource ID to use.
+ * @name: A pointer to the preallocated area where the retrieved name will be
+ * stored as a NULL terminated string.
+ * @len: The len in bytes of the @name char array.
+ *
+ * Return: 0 on Succcess
+ */
+static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
+ u8 cmd_id, u32 res_id, char *name,
+ size_t len)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_domain_name_get *resp;
+
+ ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(res_id),
+ sizeof(*resp), &t);
+ if (ret)
+ goto out;
+
+ put_unaligned_le32(res_id, t->tx.buf);
+ resp = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ strscpy(name, resp->name, len);
+
+ ph->xops->xfer_put(ph, t);
+out:
+ if (ret)
+ dev_warn(ph->dev,
+ "Failed to get extended name - id:%u (ret:%d). Using %s\n",
+ res_id, ret, name);
+ return ret;
+}
+
+/**
+ * struct scmi_iterator - Iterator descriptor
+ * @msg: A reference to the message TX buffer; filled by @prepare_message with
+ * a proper custom command payload for each multi-part command request.
+ * @resp: A reference to the response RX buffer; used by @update_state and
+ * @process_response to parse the multi-part replies.
+ * @t: A reference to the underlying xfer initialized and used transparently by
+ * the iterator internal routines.
+ * @ph: A reference to the associated protocol handle to be used.
+ * @ops: A reference to the custom provided iterator operations.
+ * @state: The current iterator state; used and updated in turn by the iterators
+ * internal routines and by the caller-provided @scmi_iterator_ops.
+ * @priv: A reference to optional private data as provided by the caller and
+ * passed back to the @@scmi_iterator_ops.
+ */
+struct scmi_iterator {
+ void *msg;
+ void *resp;
+ struct scmi_xfer *t;
+ const struct scmi_protocol_handle *ph;
+ struct scmi_iterator_ops *ops;
+ struct scmi_iterator_state state;
+ void *priv;
+};
+
+static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
+ struct scmi_iterator_ops *ops,
+ unsigned int max_resources, u8 msg_id,
+ size_t tx_size, void *priv)
+{
+ int ret;
+ struct scmi_iterator *i;
+
+ i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
+ if (!i)
+ return ERR_PTR(-ENOMEM);
+
+ i->ph = ph;
+ i->ops = ops;
+ i->priv = priv;
+
+ ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
+ if (ret) {
+ devm_kfree(ph->dev, i);
+ return ERR_PTR(ret);
+ }
+
+ i->state.max_resources = max_resources;
+ i->msg = i->t->tx.buf;
+ i->resp = i->t->rx.buf;
+
+ return i;
+}
+
+static int scmi_iterator_run(void *iter)
+{
+ int ret = -EINVAL;
+ struct scmi_iterator_ops *iops;
+ const struct scmi_protocol_handle *ph;
+ struct scmi_iterator_state *st;
+ struct scmi_iterator *i = iter;
+
+ if (!i || !i->ops || !i->ph)
+ return ret;
+
+ iops = i->ops;
+ ph = i->ph;
+ st = &i->state;
+
+ do {
+ iops->prepare_message(i->msg, st->desc_index, i->priv);
+ ret = ph->xops->do_xfer(ph, i->t);
+ if (ret)
+ break;
+
+ st->rx_len = i->t->rx.len;
+ ret = iops->update_state(st, i->resp, i->priv);
+ if (ret)
+ break;
+
+ if (st->num_returned > st->max_resources - st->desc_index) {
+ dev_err(ph->dev,
+ "No. of resources can't exceed %d\n",
+ st->max_resources);
+ ret = -EINVAL;
+ break;
+ }
+
+ for (st->loop_idx = 0; st->loop_idx < st->num_returned;
+ st->loop_idx++) {
+ ret = iops->process_response(ph, i->resp, st, i->priv);
+ if (ret)
+ goto out;
+ }
+
+ st->desc_index += st->num_returned;
+ ph->xops->reset_rx_to_maxsz(ph, i->t);
+ /*
+ * check for both returned and remaining to avoid infinite
+ * loop due to buggy firmware
+ */
+ } while (st->num_returned && st->num_remaining);
+
+out:
+ /* Finalize and destroy iterator */
+ ph->xops->xfer_put(ph, i->t);
+ devm_kfree(ph->dev, i);
+
+ return ret;
+}
+
+struct scmi_msg_get_fc_info {
+ __le32 domain;
+ __le32 message_id;
+};
+
+struct scmi_msg_resp_desc_fc {
+ __le32 attr;
+#define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
+#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
+ __le32 rate_limit;
+ __le32 chan_addr_low;
+ __le32 chan_addr_high;
+ __le32 chan_size;
+ __le32 db_addr_low;
+ __le32 db_addr_high;
+ __le32 db_set_lmask;
+ __le32 db_set_hmask;
+ __le32 db_preserve_lmask;
+ __le32 db_preserve_hmask;
+};
+
+static void
+scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
+ u8 describe_id, u32 message_id, u32 valid_size,
+ u32 domain, void __iomem **p_addr,
+ struct scmi_fc_db_info **p_db)
+{
+ int ret;
+ u32 flags;
+ u64 phys_addr;
+ u8 size;
+ void __iomem *addr;
+ struct scmi_xfer *t;
+ struct scmi_fc_db_info *db = NULL;
+ struct scmi_msg_get_fc_info *info;
+ struct scmi_msg_resp_desc_fc *resp;
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+
+ if (!p_addr) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ret = ph->xops->xfer_get_init(ph, describe_id,
+ sizeof(*info), sizeof(*resp), &t);
+ if (ret)
+ goto err_out;
+
+ info = t->tx.buf;
+ info->domain = cpu_to_le32(domain);
+ info->message_id = cpu_to_le32(message_id);
+
+ /*
+ * Bail out on error leaving fc_info addresses zeroed; this includes
+ * the case in which the requested domain/message_id does NOT support
+ * fastchannels at all.
+ */
+ ret = ph->xops->do_xfer(ph, t);
+ if (ret)
+ goto err_xfer;
+
+ resp = t->rx.buf;
+ flags = le32_to_cpu(resp->attr);
+ size = le32_to_cpu(resp->chan_size);
+ if (size != valid_size) {
+ ret = -EINVAL;
+ goto err_xfer;
+ }
+
+ phys_addr = le32_to_cpu(resp->chan_addr_low);
+ phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
+ addr = devm_ioremap(ph->dev, phys_addr, size);
+ if (!addr) {
+ ret = -EADDRNOTAVAIL;
+ goto err_xfer;
+ }
+
+ *p_addr = addr;
+
+ if (p_db && SUPPORTS_DOORBELL(flags)) {
+ db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
+ if (!db) {
+ ret = -ENOMEM;
+ goto err_db;
+ }
+
+ size = 1 << DOORBELL_REG_WIDTH(flags);
+ phys_addr = le32_to_cpu(resp->db_addr_low);
+ phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
+ addr = devm_ioremap(ph->dev, phys_addr, size);
+ if (!addr) {
+ ret = -EADDRNOTAVAIL;
+ goto err_db_mem;
+ }
+
+ db->addr = addr;
+ db->width = size;
+ db->set = le32_to_cpu(resp->db_set_lmask);
+ db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
+ db->mask = le32_to_cpu(resp->db_preserve_lmask);
+ db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
+
+ *p_db = db;
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ dev_dbg(ph->dev,
+ "Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n",
+ pi->proto->id, message_id, domain);
+
+ return;
+
+err_db_mem:
+ devm_kfree(ph->dev, db);
+
+err_db:
+ *p_addr = NULL;
+
+err_xfer:
+ ph->xops->xfer_put(ph, t);
+
+err_out:
+ dev_warn(ph->dev,
+ "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
+ pi->proto->id, message_id, domain, ret);
+}
+
+#define SCMI_PROTO_FC_RING_DB(w) \
+do { \
+ u##w val = 0; \
+ \
+ if (db->mask) \
+ val = ioread##w(db->addr) & db->mask; \
+ iowrite##w((u##w)db->set | val, db->addr); \
+} while (0)
+
+static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
+{
+ if (!db || !db->addr)
+ return;
+
+ if (db->width == 1)
+ SCMI_PROTO_FC_RING_DB(8);
+ else if (db->width == 2)
+ SCMI_PROTO_FC_RING_DB(16);
+ else if (db->width == 4)
+ SCMI_PROTO_FC_RING_DB(32);
+ else /* db->width == 8 */
+#ifdef CONFIG_64BIT
+ SCMI_PROTO_FC_RING_DB(64);
+#else
+ {
+ u64 val = 0;
+
+ if (db->mask)
+ val = ioread64_hi_lo(db->addr) & db->mask;
+ iowrite64_hi_lo(db->set | val, db->addr);
+ }
+#endif
+}
+
+static const struct scmi_proto_helpers_ops helpers_ops = {
+ .extended_name_get = scmi_common_extended_name_get,
+ .iter_response_init = scmi_iterator_init,
+ .iter_response_run = scmi_iterator_run,
+ .fastchannel_init = scmi_common_fastchannel_init,
+ .fastchannel_db_ring = scmi_common_fastchannel_db_ring,
+};
+
/**
* scmi_revision_area_get - Retrieve version memory area.
*
@@ -559,10 +1059,11 @@ scmi_revision_area_get(const struct scmi_protocol_handle *ph)
* @proto: The protocol descriptor.
*
* Allocate a new protocol instance descriptor, using the provided @proto
- * description, against the specified SCMI instance @info, and initialize it;
+ * description, against the specified SCMI instance @info, and initialize it.
*
+ * Context: Assumes to be called with @protocols_mtx already acquired.
* Return: A reference to a freshly allocated and initialized protocol instance
- * or ERR_PTR on failure. On failure the @proto reference is at first
+ * or ERR_PTR on failure.
*/
static struct scmi_protocol_instance *
scmi_alloc_init_protocol_instance(struct scmi_info *info,
@@ -572,7 +1073,7 @@ scmi_alloc_init_protocol_instance(struct scmi_info *info,
struct scmi_protocol_instance *pi;
const struct scmi_handle *handle = &info->handle;
- pi = kzalloc(sizeof(*pi), GFP_KERNEL);
+ pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
if (!pi)
goto clean;
@@ -580,9 +1081,10 @@ scmi_alloc_init_protocol_instance(struct scmi_info *info,
pi->handle = handle;
pi->ph.dev = handle->dev;
pi->ph.xops = &xfer_ops;
+ pi->ph.hops = &helpers_ops;
pi->ph.set_priv = scmi_set_protocol_priv;
pi->ph.get_priv = scmi_get_protocol_priv;
- pi->users++;
+ refcount_set(&pi->users, 1);
/* proto->init is assured NON NULL by scmi_protocol_register */
ret = pi->proto->instance_init(&pi->ph);
if (ret)
@@ -618,10 +1120,11 @@ scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
struct scmi_protocol_instance *pi;
struct scmi_info *info = handle_to_scmi_info(handle);
+ mutex_lock(&info->protocols_mtx);
pi = idr_find(&info->protocols, protocol_id);
if (pi) {
- pi->users++;
+ refcount_inc(&pi->users);
} else {
const struct scmi_protocol *proto;
@@ -632,6 +1135,7 @@ scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
else
pi = ERR_PTR(-EPROBE_DEFER);
}
+ mutex_unlock(&info->protocols_mtx);
return pi;
}
@@ -651,6 +1155,38 @@ int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
}
+/**
+ * scmi_protocol_release - Protocol de-initialization helper.
+ * @handle: A reference to the SCMI platform instance.
+ * @protocol_id: The protocol being requested.
+ *
+ * Remove one user for the specified protocol and triggers de-initialization
+ * and resources de-allocation once the last user has gone.
+ */
+void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+ struct scmi_protocol_instance *pi;
+
+ mutex_lock(&info->protocols_mtx);
+ pi = idr_find(&info->protocols, protocol_id);
+ if (WARN_ON(!pi))
+ goto out;
+
+ if (refcount_dec_and_test(&pi->users)) {
+ if (pi->proto->instance_deinit)
+ pi->proto->instance_deinit(&pi->ph);
+
+ idr_remove(&info->protocols, protocol_id);
+
+ dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
+ protocol_id);
+ }
+
+out:
+ mutex_unlock(&info->protocols_mtx);
+}
+
void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
u8 *prot_imp)
{
@@ -665,110 +1201,67 @@ scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
{
int i;
struct scmi_info *info = handle_to_scmi_info(handle);
+ struct scmi_revision_info *rev = handle->version;
if (!info->protocols_imp)
return false;
- for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
+ for (i = 0; i < rev->num_protocols; i++)
if (info->protocols_imp[i] == prot_id)
return true;
return false;
}
-/**
- * scmi_dev_protocol_get - get protocol operations and handle
- * @protocol_id: The protocol being requested.
- * @ph: A pointer reference used to pass back the associated protocol handle.
- *
- * Get hold of a protocol accounting for its usage, eventually triggering its
- * initialization, and returning the protocol specific operations and related
- * protocol handle which will be used as first argument in most of the
- * protocols operations methods.
- * Being a devres based managed method, protocol hold will be automatically
- * released, and possibly de-initialized on last user, once the SCMI driver
- * owning the scmi_device is unbound from it.
- *
- * Return: A reference to the requested protocol operations or error.
- * Must be checked for errors by caller.
- */
static const void __must_check *
scmi_dev_protocol_get(struct scmi_device *sdev, u8 protocol_id,
struct scmi_protocol_handle **ph)
{
struct scmi_protocol_instance *pi;
- struct scmi_handle *handle = sdev->handle;
if (!ph)
return ERR_PTR(-EINVAL);
- pi = scmi_get_protocol_instance(handle, protocol_id);
+ pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
if (IS_ERR(pi))
- return pi;
+ return ERR_CAST(pi);
*ph = &pi->ph;
return pi->proto->ops;
}
-static inline
-struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
+static int __must_check scmi_dev_protocol_acquire(struct scmi_device *sdev,
+ u8 protocol_id)
{
- info->users++;
- return &info->handle;
+ return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(sdev->handle, protocol_id));
}
-/**
- * scmi_handle_get() - Get the SCMI handle for a device
- *
- * @dev: pointer to device for which we want SCMI handle
- *
- * NOTE: The function does not track individual clients of the framework
- * and is expected to be maintained by caller of SCMI protocol library.
- * scmi_handle_put must be balanced with successful scmi_handle_get
- *
- * Return: pointer to handle if successful, NULL on error
- */
-struct scmi_handle *scmi_handle_get(struct device_d *dev)
+static void scmi_dev_protocol_put(struct scmi_device *sdev, u8 protocol_id)
{
- struct list_head *p;
- struct scmi_info *info;
- struct scmi_handle *handle = NULL;
-
- list_for_each(p, &scmi_list) {
- info = list_entry(p, struct scmi_info, node);
- if (dev->parent == info->dev) {
- handle = scmi_handle_get_from_info_unlocked(info);
- break;
- }
- }
-
- return handle;
+ scmi_protocol_release(sdev->handle, protocol_id);
}
/**
- * scmi_handle_put() - Release the handle acquired by scmi_handle_get
+ * scmi_is_transport_atomic - Method to check if underlying transport for an
+ * SCMI instance is configured as atomic.
*
- * @handle: handle acquired by scmi_handle_get
- *
- * NOTE: The function does not track individual clients of the framework
- * and is expected to be maintained by caller of SCMI protocol library.
- * scmi_handle_put must be balanced with successful scmi_handle_get
+ * @handle: A reference to the SCMI platform instance.
+ * @atomic_threshold: An optional return value for the system wide currently
+ * configured threshold for atomic operations.
*
- * Return: 0 is successfully released
- * if null was passed, it returns -EINVAL;
+ * Return: True if transport is configured as atomic
*/
-int scmi_handle_put(const struct scmi_handle *handle)
+static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
+ unsigned int *atomic_threshold)
{
- struct scmi_info *info;
-
- if (!handle)
- return -EINVAL;
+ bool ret;
+ struct scmi_info *info = handle_to_scmi_info(handle);
- info = handle_to_scmi_info(handle);
- if (!WARN_ON(!info->users))
- info->users--;
+ ret = is_transport_polling_capable(info->desc);
+ if (ret && atomic_threshold)
+ *atomic_threshold = info->atomic_threshold;
- return 0;
+ return ret;
}
static int __scmi_xfer_info_init(struct scmi_info *sinfo,
@@ -776,34 +1269,71 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo,
{
int i;
struct scmi_xfer *xfer;
- struct device_d *dev = sinfo->dev;
+ struct device *dev = sinfo->dev;
const struct scmi_desc *desc = sinfo->desc;
/* Pre-allocated messages, no more than what hdr.seq can support */
- if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
- dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
- desc->max_msg, MSG_TOKEN_MAX);
+ if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
+ dev_err(dev,
+ "Invalid maximum messages %d, not in range [1 - %lu]\n",
+ info->max_msg, MSG_TOKEN_MAX);
return -EINVAL;
}
- info->xfer_block = kcalloc(desc->max_msg, sizeof(*info->xfer_block), GFP_KERNEL);
- if (!info->xfer_block)
- return -ENOMEM;
-
- info->xfer_alloc_table = kcalloc(BITS_TO_LONGS(desc->max_msg),
- sizeof(long), GFP_KERNEL);
+ /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
+ info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX,
+ GFP_KERNEL);
if (!info->xfer_alloc_table)
return -ENOMEM;
- /* Pre-initialize the buffer pointer to pre-allocated buffers */
- for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
- xfer->rx.buf = kcalloc(sizeof(u8), desc->max_msg_size,
+ /*
+ * Preallocate a number of xfers equal to max inflight messages,
+ * pre-initialize the buffer pointer to pre-allocated buffers and
+ * attach all of them to the free list
+ */
+ INIT_HLIST_HEAD(&info->free_xfers);
+ for (i = 0; i < info->max_msg; i++) {
+ xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
+ if (!xfer)
+ return -ENOMEM;
+
+ xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
GFP_KERNEL);
if (!xfer->rx.buf)
return -ENOMEM;
xfer->tx.buf = xfer->rx.buf;
- xfer->done = false;
+ spin_lock_init(&xfer->lock);
+
+ /* Add initialized xfer to the free list */
+ hlist_add_head(&xfer->node, &info->free_xfers);
+ }
+
+ spin_lock_init(&info->xfer_lock);
+
+ return 0;
+}
+
+static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
+{
+ const struct scmi_desc *desc = sinfo->desc;
+
+ if (!desc->ops->get_max_msg) {
+ sinfo->tx_minfo.max_msg = desc->max_msg;
+ sinfo->rx_minfo.max_msg = desc->max_msg;
+ } else {
+ struct scmi_chan_info *base_cinfo;
+
+ base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
+ if (!base_cinfo)
+ return -EINVAL;
+ sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
+
+ /* RX channel is optional so can be skipped */
+ base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
+ if (base_cinfo)
+ sinfo->rx_minfo.max_msg =
+ desc->ops->get_max_msg(base_cinfo);
}
return 0;
@@ -811,51 +1341,119 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo,
static int scmi_xfer_info_init(struct scmi_info *sinfo)
{
- int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
+ int ret;
- if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
+ ret = scmi_channels_max_msg_configure(sinfo);
+ if (ret)
+ return ret;
+
+ ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
+ if (!ret && !idr_is_empty(&sinfo->rx_idr))
ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
return ret;
}
-static int scmi_chan_setup(struct scmi_info *info, struct device_d *dev,
+/**
+ * scmi_handle_get() - Get the SCMI handle for a device
+ *
+ * @dev: pointer to device for which we want SCMI handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of SCMI protocol library.
+ * scmi_handle_put must be balanced with successful scmi_handle_get
+ *
+ * Return: pointer to handle if successful, NULL on error
+ */
+struct scmi_handle *scmi_handle_get(struct device *dev)
+{
+ struct list_head *p;
+ struct scmi_info *info;
+ struct scmi_handle *handle = NULL;
+
+ mutex_lock(&scmi_list_mutex);
+ list_for_each(p, &scmi_list) {
+ info = list_entry(p, struct scmi_info, node);
+ if (dev->parent == info->dev) {
+ handle = &info->handle;
+ break;
+ }
+ }
+ mutex_unlock(&scmi_list_mutex);
+
+ return handle;
+}
+
+static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
int prot_id, bool tx)
{
int ret, idx;
+ char name[32];
struct scmi_chan_info *cinfo;
struct idr *idr;
+ struct scmi_device *tdev = NULL;
/* Transmit channel is first entry i.e. index 0 */
idx = tx ? 0 : 1;
idr = tx ? &info->tx_idr : &info->rx_idr;
- /* check if already allocated, used for multiple device per protocol */
- cinfo = idr_find(idr, prot_id);
- if (cinfo)
- return 0;
-
- if (!info->desc->ops->chan_available(dev, idx)) {
+ if (!info->desc->ops->chan_available(of_node, idx)) {
cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
return -EINVAL;
goto idr_alloc;
}
- cinfo = kzalloc(sizeof(*cinfo), GFP_KERNEL);
+ cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
if (!cinfo)
return -ENOMEM;
- cinfo->dev = dev;
+ cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
+
+ /* Create a unique name for this transport device */
+ snprintf(name, 32, "__scmi_transport_device_%s_%02X",
+ idx ? "rx" : "tx", prot_id);
+ /* Create a uniquely named, dedicated transport device for this chan */
+ tdev = scmi_device_create(of_node, info->dev, prot_id, name);
+ if (!tdev) {
+ dev_err(info->dev,
+ "failed to create transport device (%s)\n", name);
+ devm_kfree(info->dev, cinfo);
+ return -EINVAL;
+ }
+ of_node_get(of_node);
+ cinfo->id = prot_id;
+ cinfo->dev = &tdev->dev;
ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
- if (ret)
+ if (ret) {
+ of_node_put(of_node);
+ scmi_device_destroy(info->dev, prot_id, name);
+ devm_kfree(info->dev, cinfo);
return ret;
+ }
+
+ if (tx && is_polling_required(cinfo, info->desc)) {
+ if (is_transport_polling_capable(info->desc))
+ dev_dbg(&tdev->dev,
+ "Enabled polling mode TX channel - prot_id:%d\n",
+ prot_id);
+ else
+ dev_warn(&tdev->dev,
+ "Polling mode NOT supported by transport.\n");
+ }
idr_alloc:
ret = idr_alloc_one(idr, cinfo, prot_id);
if (ret != prot_id) {
- dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
+ dev_err(info->dev,
+ "unable to allocate SCMI idr slot err %d\n", ret);
+ /* Destroy channel and device only if created by this call. */
+ if (tdev) {
+ of_node_put(of_node);
+ scmi_device_destroy(info->dev, prot_id, name);
+ devm_kfree(info->dev, cinfo);
+ }
return ret;
}
@@ -864,311 +1462,160 @@ idr_alloc:
}
static inline int
-scmi_txrx_setup(struct scmi_info *info, struct device_d *dev, int prot_id)
+scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node,
+ int prot_id)
{
- int ret = scmi_chan_setup(info, dev, prot_id, true);
+ int ret = scmi_chan_setup(info, of_node, prot_id, true);
- if (!ret) /* Rx is optional, hence no error check */
- scmi_chan_setup(info, dev, prot_id, false);
+ if (!ret) {
+ /* Rx is optional, report only memory errors */
+ ret = scmi_chan_setup(info, of_node, prot_id, false);
+ if (ret && ret != -ENOMEM)
+ ret = 0;
+ }
return ret;
}
/**
- * scmi_get_protocol_device - Helper to get/create an SCMI device.
- *
- * @np: A device node representing a valid active protocols for the referred
- * SCMI instance.
- * @info: The referred SCMI instance for which we are getting/creating this
- * device.
- * @prot_id: The protocol ID.
- * @name: The device name.
- *
- * Referring to the specific SCMI instance identified by @info, this helper
- * takes care to return a properly initialized device matching the requested
- * @proto_id and @name: if device was still not existent it is created as a
- * child of the specified SCMI instance @info and its transport properly
- * initialized as usual.
+ * scmi_channels_setup - Helper to initialize all required channels
+ *
+ * @info: The SCMI instance descriptor.
+ *
+ * Initialize all the channels found described in the DT against the underlying
+ * configured transport using custom defined dedicated devices instead of
+ * borrowing devices from the SCMI drivers; this way channels are initialized
+ * upfront during core SCMI stack probing and are no more coupled with SCMI
+ * devices used by SCMI drivers.
+ *
+ * Note that, even though a pair of TX/RX channels is associated to each
+ * protocol defined in the DT, a distinct freshly initialized channel is
+ * created only if the DT node for the protocol at hand describes a dedicated
+ * channel: in all the other cases the common BASE protocol channel is reused.
+ *
+ * Return: 0 on Success
*/
-static inline struct scmi_device *
-scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
- int prot_id, const char *name)
+static int scmi_channels_setup(struct scmi_info *info)
{
- struct scmi_device *sdev;
+ int ret;
+ struct device_node *child, *top_np = info->dev->of_node;
- /* Already created for this parent SCMI instance ? */
- sdev = scmi_child_dev_find(info->dev, prot_id, name);
- if (sdev)
- return sdev;
+ /* Initialize a common generic channel at first */
+ ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE);
+ if (ret)
+ return ret;
- pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
+ for_each_available_child_of_node(top_np, child) {
+ u32 prot_id;
- sdev = scmi_device_alloc(np, info->dev, prot_id, name);
- if (!sdev) {
- dev_err(info->dev, "failed to create %d protocol device\n",
- prot_id);
- return NULL;
- }
+ if (of_property_read_u32(child, "reg", &prot_id))
+ continue;
- if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
- dev_err(&sdev->dev, "failed to setup transport\n");
- scmi_device_destroy(sdev);
- return NULL;
+ if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
+ dev_err(info->dev,
+ "Out of range protocol %d\n", prot_id);
+
+ ret = scmi_txrx_setup(info, child, prot_id);
+ if (ret) {
+ of_node_put(child);
+ return ret;
+ }
}
- return sdev;
+ return 0;
}
-static inline void
-scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
- int prot_id, const char *name)
+static int scmi_chan_destroy(int id, void *p, void *idr)
{
- struct scmi_device *sdev;
+ struct scmi_chan_info *cinfo = p;
- sdev = scmi_get_protocol_device(np, info, prot_id, name);
- if (!sdev)
- return;
+ if (cinfo->dev) {
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct scmi_device *sdev = to_scmi_dev(cinfo->dev);
- /* setup handle now as the transport is ready */
- scmi_set_handle(sdev);
+ of_node_put(cinfo->dev->of_node);
+ scmi_device_destroy(info->dev, id, sdev->name);
+ cinfo->dev = NULL;
+ }
+
+ idr_remove(idr, id);
- /* Register if not done yet */
- if (sdev->dev.id == DEVICE_ID_DYNAMIC)
- register_device(&sdev->dev);
+ return 0;
}
-/**
- * scmi_create_protocol_devices - Create devices for all pending requests for
- * this SCMI instance.
- *
- * @np: The device node describing the protocol
- * @info: The SCMI instance descriptor
- * @prot_id: The protocol ID
- *
- * All devices previously requested for this instance (if any) are found and
- * created by scanning the proper @&scmi_requested_devices entry.
- */
-static void scmi_create_protocol_devices(struct device_node *np,
- struct scmi_info *info, int prot_id)
+static void scmi_cleanup_channels(struct scmi_info *info, struct idr *idr)
{
- struct list_head *phead;
+ /* At first free all channels at the transport layer ... */
+ idr_for_each(idr, info->desc->ops->chan_free, idr);
- phead = idr_find(&scmi_requested_devices, prot_id);
- if (phead) {
- struct scmi_requested_dev *rdev;
+ /* ...then destroy all underlying devices */
+ idr_for_each(idr, scmi_chan_destroy, idr);
- list_for_each_entry(rdev, phead, node)
- scmi_create_protocol_device(np, info, prot_id,
- rdev->id_table->name);
- }
+ idr_destroy(idr);
}
-/**
- * scmi_protocol_device_request - Helper to request a device
- *
- * @id_table: A protocol/name pair descriptor for the device to be created.
- *
- * This helper let an SCMI driver request specific devices identified by the
- * @id_table to be created for each active SCMI instance.
- *
- * The requested device name MUST NOT be already existent for any protocol;
- * at first the freshly requested @id_table is annotated in the IDR table
- * @scmi_requested_devices, then a matching device is created for each already
- * active SCMI instance. (if any)
- *
- * This way the requested device is created straight-away for all the already
- * initialized(probed) SCMI instances (handles) and it remains also annotated
- * as pending creation if the requesting SCMI driver was loaded before some
- * SCMI instance and related transports were available: when such late instance
- * is probed, its probe will take care to scan the list of pending requested
- * devices and create those on its own (see @scmi_create_protocol_devices and
- * its enclosing loop)
- *
- * Return: 0 on Success
- */
-int scmi_protocol_device_request(const struct scmi_device_id *id_table)
+static void scmi_cleanup_txrx_channels(struct scmi_info *info)
{
- int ret = 0;
- struct list_head *phead = NULL;
- struct scmi_requested_dev *rdev;
- struct scmi_info *info;
- struct idr *idr;
-
- pr_debug("Requesting SCMI device (%s) for protocol 0x%x\n",
- id_table->name, id_table->protocol_id);
-
- /*
- * Search for the matching protocol rdev list and then search
- * of any existent equally named device...fails if any duplicate found.
- */
- __idr_for_each_entry(&scmi_requested_devices, idr) {
- struct list_head *head = idr->ptr;
- if (!phead) {
- /* A list found registered in the IDR is never empty */
- rdev = list_first_entry(head, struct scmi_requested_dev,
- node);
- if (rdev->id_table->protocol_id ==
- id_table->protocol_id)
- phead = head;
- }
- list_for_each_entry(rdev, head, node) {
- if (!strcmp(rdev->id_table->name, id_table->name)) {
- pr_err("Ignoring duplicate request [%d] %s\n",
- rdev->id_table->protocol_id,
- rdev->id_table->name);
- ret = -EINVAL;
- goto out;
- }
- }
- }
-
- /*
- * No duplicate found for requested id_table, so let's create a new
- * requested device entry for this new valid request.
- */
- rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
- if (!rdev) {
- ret = -ENOMEM;
- goto out;
- }
- rdev->id_table = id_table;
-
- /*
- * Append the new requested device table descriptor to the head of the
- * related protocol list, eventually creating such head if not already
- * there.
- */
- if (!phead) {
- phead = kzalloc(sizeof(*phead), GFP_KERNEL);
- if (!phead) {
- kfree(rdev);
- ret = -ENOMEM;
- goto out;
- }
- INIT_LIST_HEAD(phead);
-
- ret = idr_alloc_one(&scmi_requested_devices, (void *)phead,
- id_table->protocol_id);
- if (ret != id_table->protocol_id) {
- pr_err("Failed to save SCMI device - ret:%d\n", ret);
- kfree(rdev);
- kfree(phead);
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
- }
- list_add(&rdev->node, phead);
+ scmi_cleanup_channels(info, &info->tx_idr);
- /*
- * Now effectively create and initialize the requested device for every
- * already initialized SCMI instance which has registered the requested
- * protocol as a valid active one: i.e. defined in DT and supported by
- * current platform FW.
- */
- list_for_each_entry(info, &scmi_list, node) {
- struct device_node *child;
-
- child = idr_find(&info->active_protocols,
- id_table->protocol_id);
- if (child) {
- struct scmi_device *sdev;
-
- sdev = scmi_get_protocol_device(child, info,
- id_table->protocol_id,
- id_table->name);
- /* Set handle if not already set: device existed */
- if (sdev && !sdev->handle)
- sdev->handle =
- scmi_handle_get_from_info_unlocked(info);
- } else {
- dev_err(info->dev,
- "Failed. SCMI protocol %d not active.\n",
- id_table->protocol_id);
- }
- }
-
-out:
- return ret;
+ scmi_cleanup_channels(info, &info->rx_idr);
}
-/**
- * scmi_protocol_device_unrequest - Helper to unrequest a device
- *
- * @id_table: A protocol/name pair descriptor for the device to be unrequested.
- *
- * An helper to let an SCMI driver release its request about devices; note that
- * devices are created and initialized once the first SCMI driver request them
- * but they destroyed only on SCMI core unloading/unbinding.
- *
- * The current SCMI transport layer uses such devices as internal references and
- * as such they could be shared as same transport between multiple drivers so
- * that cannot be safely destroyed till the whole SCMI stack is removed.
- * (unless adding further burden of refcounting.)
- */
-void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
+static int scmi_device_request_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
{
- struct list_head *phead;
-
- pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
- id_table->name, id_table->protocol_id);
-
- phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
- if (phead) {
- struct scmi_requested_dev *victim, *tmp;
-
- list_for_each_entry_safe(victim, tmp, phead, node) {
- if (!strcmp(victim->id_table->name, id_table->name)) {
- list_del(&victim->node);
- kfree(victim);
- break;
- }
- }
-
- if (list_empty(phead)) {
- idr_remove(&scmi_requested_devices,
- id_table->protocol_id);
- kfree(phead);
- }
+ struct device_node *np;
+ struct scmi_device_id *id_table = data;
+ struct scmi_info *info = req_nb_to_scmi_info(nb);
+
+ np = idr_find(&info->active_protocols, id_table->protocol_id);
+ if (!np)
+ return NOTIFY_DONE;
+
+ dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n",
+ action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-",
+ id_table->name, id_table->protocol_id);
+
+ switch (action) {
+ case SCMI_BUS_NOTIFY_DEVICE_REQUEST:
+ scmi_create_protocol_devices(np, info, id_table->protocol_id,
+ id_table->name);
+ break;
+ case SCMI_BUS_NOTIFY_DEVICE_UNREQUEST:
+ scmi_destroy_protocol_devices(info, id_table->protocol_id,
+ id_table->name);
+ break;
+ default:
+ return NOTIFY_DONE;
}
-}
-static void version_info(struct device_d *dev)
-{
- struct scmi_info *info = dev->priv;
-
- printf("SCMI information:\n"
- " version: %u.%u\n"
- " firmware version: 0x%x\n"
- " vendor: %s (sub: %s)\n",
- info->version.minor_ver,
- info->version.major_ver,
- info->version.impl_ver,
- info->version.vendor_id,
- info->version.sub_vendor_id);
+ return NOTIFY_OK;
}
-static int scmi_probe(struct device_d *dev)
+static int scmi_probe(struct device *dev)
{
int ret;
struct scmi_handle *handle;
const struct scmi_desc *desc;
struct scmi_info *info;
- struct device_node *child, *np = dev->device_node;
+ struct device_node *child, *np = dev->of_node;
desc = of_device_get_match_data(dev);
if (!desc)
return -EINVAL;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = dev;
info->desc = desc;
+ info->dev_req_nb.notifier_call = scmi_device_request_notifier;
INIT_LIST_HEAD(&info->node);
idr_init(&info->protocols);
+ mutex_init(&info->protocols_mtx);
idr_init(&info->active_protocols);
+ mutex_init(&info->devreq_mtx);
dev->priv = info;
idr_init(&info->tx_idr);
@@ -1177,28 +1624,55 @@ static int scmi_probe(struct device_d *dev)
handle = &info->handle;
handle->dev = info->dev;
handle->version = &info->version;
- handle->protocol_get = scmi_dev_protocol_get;
+ handle->dev_protocol_acquire = scmi_dev_protocol_acquire;
+ handle->dev_protocol_get = scmi_dev_protocol_get;
+ handle->dev_protocol_put = scmi_dev_protocol_put;
+
+ /* System wide atomic threshold for atomic ops .. if any */
+ if (!of_property_read_u32(np, "atomic-threshold-us",
+ &info->atomic_threshold))
+ dev_info(dev,
+ "SCMI System wide atomic threshold set to %d us\n",
+ info->atomic_threshold);
+ handle->is_transport_atomic = scmi_is_transport_atomic;
+
+ if (desc->ops->link_supplier) {
+ ret = desc->ops->link_supplier(dev);
+ if (ret)
+ goto clear_ida;
+ }
- ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
+ /* Setup all channels described in the DT at first */
+ ret = scmi_channels_setup(info);
if (ret)
- return ret;
+ goto clear_ida;
+
+ ret = blocking_notifier_chain_register(&scmi_requested_devices_nh,
+ &info->dev_req_nb);
+ if (ret)
+ goto clear_txrx_setup;
ret = scmi_xfer_info_init(info);
if (ret)
- return ret;
+ goto clear_dev_req_notifier;
+
+ if (!is_transport_polling_capable(info->desc))
+ dev_err(dev,
+ "Transport is not polling capable. Atomic mode not supported.\n");
/*
* Trigger SCMI Base protocol initialization.
- * It's mandatory and won't be ever released/deinit until the
- * SCMI stack is shutdown/unloaded as a whole.
+ * It's mandatory and won't be ever released/deinit.
*/
ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
if (ret) {
dev_err(dev, "unable to communicate with SCMI\n");
- return ret;
+ goto notification_exit;
}
+ mutex_lock(&scmi_list_mutex);
list_add_tail(&info->node, &scmi_list);
+ mutex_unlock(&scmi_list_mutex);
for_each_available_child_of_node(np, child) {
u32 prot_id;
@@ -1226,54 +1700,68 @@ static int scmi_probe(struct device_d *dev)
continue;
}
- scmi_create_protocol_devices(child, info, prot_id);
+ of_node_get(child);
+ scmi_create_protocol_devices(child, info, prot_id, NULL);
}
- dev->info = version_info;
-
return 0;
-}
-void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
-{
- idr_remove(idr, id);
+notification_exit:
+clear_dev_req_notifier:
+ blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
+ &info->dev_req_nb);
+clear_txrx_setup:
+ scmi_cleanup_txrx_channels(info);
+clear_ida:
+ return ret;
}
/* Each compatible listed below must have descriptor associated with it */
static const struct of_device_id scmi_of_match[] = {
-#ifdef CONFIG_ARM_SMCCC
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
+ { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
+#endif
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
+ { .compatible = "arm,scmi-smc-param", .data = &scmi_smc_desc},
#endif
{ /* Sentinel */ },
};
-static struct driver_d scmi_driver = {
+MODULE_DEVICE_TABLE(of, scmi_of_match);
+
+static struct driver arm_scmi_driver = {
.name = "arm-scmi",
- .of_compatible = scmi_of_match,
+ .of_match_table = scmi_of_match,
.probe = scmi_probe,
};
+core_platform_driver(arm_scmi_driver);
static int __init scmi_bus_driver_init(void)
{
- scmi_bus_init();
+ int ret;
+
+ ret = scmi_bus_init();
+ if (ret)
+ return ret;
+
+ /* Bail out if no SCMI transport was configured */
+ if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
+ return -EINVAL;
scmi_base_register();
- scmi_reset_register();
scmi_clock_register();
+ scmi_power_register();
+ scmi_reset_register();
+ scmi_sensors_register();
scmi_voltage_register();
return 0;
}
pure_initcall(scmi_bus_driver_init);
-static int __init scmi_platform_driver_init(void)
-{
- return platform_driver_register(&scmi_driver);
-}
-core_initcall(scmi_platform_driver_init);
-
-MODULE_ALIAS("platform: arm-scmi");
+MODULE_ALIAS("platform:arm-scmi");
MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
MODULE_DESCRIPTION("ARM SCMI protocol driver");
MODULE_LICENSE("GPL v2");