summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAhmad Fatoum <ahmad@a3f.at>2021-02-22 08:05:59 +0100
committerSascha Hauer <s.hauer@pengutronix.de>2021-02-22 10:24:31 +0100
commit3e36b143754e8a0b8e5070b6f8138e9e18f32fdd (patch)
tree68a17308c517cfce3d0697fd9b39cc6a646c2025 /drivers
parentc0f984b4dfe53c66fa15e18d89b5a7b122d541fd (diff)
downloadbarebox-3e36b143754e8a0b8e5070b6f8138e9e18f32fdd.tar.gz
barebox-3e36b143754e8a0b8e5070b6f8138e9e18f32fdd.tar.xz
drivers: add support for memory-mapped VirtIO paravirtualization
Sandbox is only useful to test barebox in isolation. For interaction between barebox and firmware/OS, Qemu is the better choice. Qemu supports specifying VirtIO devices on the command line, which it automatically fixes up into the device tree. This is nice. Add support for that. Signed-off-by: Ahmad Fatoum <ahmad@a3f.at> Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig1
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/virtio/Kconfig28
-rw-r--r--drivers/virtio/Makefile3
-rw-r--r--drivers/virtio/virtio.c347
-rw-r--r--drivers/virtio/virtio_mmio.c465
-rw-r--r--drivers/virtio/virtio_ring.c365
7 files changed, 1210 insertions, 0 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index dda2405780..0f97ae2b15 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -43,5 +43,6 @@ source "drivers/soc/imx/Kconfig"
source "drivers/nvme/Kconfig"
source "drivers/ddr/Kconfig"
source "drivers/power/Kconfig"
+source "drivers/virtio/Kconfig"
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 5a03bdceab..4d3a7cb168 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -43,3 +43,4 @@ obj-y += soc/imx/
obj-y += nvme/
obj-y += ddr/
obj-y += power/
+obj-y += virtio/
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
new file mode 100644
index 0000000000..59e3d3c3f5
--- /dev/null
+++ b/drivers/virtio/Kconfig
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIRTIO
+ bool
+ help
+ This option is selected by any driver which implements the virtio
+ bus, such as CONFIG_VIRTIO_MMIO.
+
+config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
+ bool
+ help
+ This option is selected if the architecture may need to enforce
+ VIRTIO_F_ACCESS_PLATFORM
+
+menuconfig VIRTIO_MENU
+ bool "Virtio drivers"
+ default y
+
+if VIRTIO_MENU
+
+config VIRTIO_MMIO
+ bool "Platform bus driver for memory mapped virtio devices"
+ depends on HAS_DMA
+ select VIRTIO
+ help
+ This drivers provides support for memory mapped virtio
+ platform device driver. This is usually used with Qemu.
+
+endif # VIRTIO_MENU
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
new file mode 100644
index 0000000000..94ff1398fb
--- /dev/null
+++ b/drivers/virtio/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o
+obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
new file mode 100644
index 0000000000..c96c465e87
--- /dev/null
+++ b/drivers/virtio/virtio.c
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <common.h>
+#include <linux/virtio.h>
+#include <linux/spinlock.h>
+#include <linux/virtio_config.h>
+#include <module.h>
+#include <linux/kernel.h>
+#include <uapi/linux/virtio_ids.h>
+
+static int status_show(struct param_d *param, void *_dev)
+{
+ struct virtio_device *dev = _dev;
+
+ dev->status_param = dev->config->get_status(dev);
+ return 0;
+}
+
+static struct param_d *virtio_dev_add_param_features(struct virtio_device *dev)
+{
+ struct param_d *param;
+ unsigned int i;
+ char *buf;
+ int len = 0;
+
+ buf = xmalloc(sizeof(dev->features)*8 + 1);
+
+ /* We actually represent this as a bitstring, as it could be
+ * arbitrary length in future. */
+ for (i = 0; i < sizeof(dev->features)*8; i++)
+ len += sprintf(buf+len, "%c",
+ __virtio_test_bit(dev, i) ? '1' : '0');
+
+ param = dev_add_param_string_fixed(&dev->dev, "features", buf);
+ free(buf);
+
+ return param;
+}
+
+static inline int virtio_id_match(const struct virtio_device *dev,
+ const struct virtio_device_id *id)
+{
+ if (id->device != dev->id.device && id->device != VIRTIO_DEV_ANY_ID)
+ return 0;
+
+ return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor == dev->id.vendor;
+}
+
+/* This looks through all the IDs a driver claims to support. If any of them
+ * match, we return 1 and the kernel will call virtio_dev_probe(). */
+static int virtio_dev_match(struct device_d *_dv, struct driver_d *_dr)
+{
+ unsigned int i;
+ struct virtio_device *dev = dev_to_virtio(_dv);
+ const struct virtio_device_id *ids;
+
+ ids = drv_to_virtio(_dr)->id_table;
+ for (i = 0; ids[i].device; i++)
+ if (virtio_id_match(dev, &ids[i]))
+ return 0;
+
+ return -1;
+}
+
+void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
+ unsigned int fbit)
+{
+ unsigned int i;
+ struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver);
+
+ for (i = 0; i < drv->feature_table_size; i++)
+ if (drv->feature_table[i] == fbit)
+ return;
+
+ if (drv->feature_table_legacy) {
+ for (i = 0; i < drv->feature_table_size_legacy; i++)
+ if (drv->feature_table_legacy[i] == fbit)
+ return;
+ }
+
+ BUG();
+}
+EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature);
+
+static void __virtio_config_changed(struct virtio_device *dev)
+{
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+
+ if (!dev->config_enabled)
+ dev->config_change_pending = true;
+ else if (drv && drv->config_changed)
+ drv->config_changed(dev);
+}
+
+void virtio_config_changed(struct virtio_device *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->config_lock, flags);
+ __virtio_config_changed(dev);
+ spin_unlock_irqrestore(&dev->config_lock, flags);
+}
+EXPORT_SYMBOL_GPL(virtio_config_changed);
+
+void virtio_config_disable(struct virtio_device *dev)
+{
+ dev->config_enabled = false;
+}
+EXPORT_SYMBOL_GPL(virtio_config_disable);
+
+void virtio_config_enable(struct virtio_device *dev)
+{
+ dev->config_enabled = true;
+ if (dev->config_change_pending)
+ __virtio_config_changed(dev);
+ dev->config_change_pending = false;
+}
+EXPORT_SYMBOL_GPL(virtio_config_enable);
+
+void virtio_add_status(struct virtio_device *dev, unsigned int status)
+{
+ dev->config->set_status(dev, dev->config->get_status(dev) | status);
+}
+EXPORT_SYMBOL_GPL(virtio_add_status);
+
+int virtio_finalize_features(struct virtio_device *dev)
+{
+ int ret = dev->config->finalize_features(dev);
+ unsigned status;
+
+ if (ret)
+ return ret;
+
+ ret = arch_has_restricted_virtio_memory_access();
+ if (ret) {
+ if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1)) {
+ dev_warn(&dev->dev,
+ "device must provide VIRTIO_F_VERSION_1\n");
+ return -ENODEV;
+ }
+
+ if (!virtio_has_feature(dev, VIRTIO_F_ACCESS_PLATFORM)) {
+ dev_warn(&dev->dev,
+ "device must provide VIRTIO_F_ACCESS_PLATFORM\n");
+ return -ENODEV;
+ }
+ }
+
+ if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1))
+ return 0;
+
+ virtio_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
+ status = dev->config->get_status(dev);
+ if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
+ dev_err(&dev->dev, "virtio: device refuses features: %x\n",
+ status);
+ return -ENODEV;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_finalize_features);
+
+int virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
+ struct virtqueue *vqs[])
+{
+ return vdev->config->find_vqs(vdev, nvqs, vqs);
+}
+EXPORT_SYMBOL_GPL(virtio_find_vqs);
+
+static int virtio_dev_probe(struct device_d *_d)
+{
+ int err, i;
+ struct virtio_device *dev = dev_to_virtio(_d);
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+ u64 device_features;
+ u64 driver_features;
+ u64 driver_features_legacy;
+
+ /* We have a driver! */
+ virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER);
+
+ /* Figure out what features the device supports. */
+ device_features = dev->config->get_features(dev);
+
+ /* Figure out what features the driver supports. */
+ driver_features = 0;
+ for (i = 0; i < drv->feature_table_size; i++) {
+ unsigned int f = drv->feature_table[i];
+ BUG_ON(f >= 64);
+ driver_features |= (1ULL << f);
+ }
+
+ /* Some drivers have a separate feature table for virtio v1.0 */
+ if (drv->feature_table_legacy) {
+ driver_features_legacy = 0;
+ for (i = 0; i < drv->feature_table_size_legacy; i++) {
+ unsigned int f = drv->feature_table_legacy[i];
+ BUG_ON(f >= 64);
+ driver_features_legacy |= (1ULL << f);
+ }
+ } else {
+ driver_features_legacy = driver_features;
+ }
+
+ if (device_features & (1ULL << VIRTIO_F_VERSION_1))
+ dev->features = driver_features & device_features;
+ else
+ dev->features = driver_features_legacy & device_features;
+
+ /* Transport features always preserved to pass to finalize_features. */
+ for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++)
+ if (device_features & (1ULL << i))
+ __virtio_set_bit(dev, i);
+
+ if (drv->validate) {
+ err = drv->validate(dev);
+ if (err)
+ goto err;
+ }
+
+ err = virtio_finalize_features(dev);
+ if (err)
+ goto err;
+
+ err = drv->probe(dev);
+ if (err)
+ goto err;
+
+ /* If probe didn't do it, mark device DRIVER_OK ourselves. */
+ if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
+ virtio_device_ready(dev);
+
+ if (drv->scan)
+ drv->scan(dev);
+
+ virtio_config_enable(dev);
+
+ return 0;
+err:
+ virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
+ return err;
+
+}
+
+static void virtio_dev_remove(struct device_d *_d)
+{
+ struct virtio_device *dev = dev_to_virtio(_d);
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+
+ virtio_config_disable(dev);
+
+ drv->remove(dev);
+
+ WARN_ONCE(dev->config->get_status(dev), "Driver should have reset device");
+
+ /* Acknowledge the device's existence again. */
+ virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+}
+
+static struct bus_type virtio_bus = {
+ .name = "virtio",
+ .match = virtio_dev_match,
+ .probe = virtio_dev_probe,
+ .remove = virtio_dev_remove,
+};
+
+int virtio_driver_register(struct virtio_driver *driver)
+{
+ /* Catch this early. */
+ BUG_ON(driver->feature_table_size && !driver->feature_table);
+ driver->driver.bus = &virtio_bus;
+
+ return register_driver(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(virtio_driver_register);
+
+/**
+ * register_virtio_device - register virtio device
+ * @dev : virtio device to be registered
+ *
+ * On error, the caller must call put_device on &@dev->dev (and not kfree),
+ * as another code path may have obtained a reference to @dev.
+ *
+ * Returns: 0 on suceess, -error on failure
+ */
+int register_virtio_device(struct virtio_device *dev)
+{
+ int err;
+
+ dev->dev.bus = &virtio_bus;
+ dev->dev.id = DEVICE_ID_DYNAMIC;
+ dev->dev.name = "virtio";
+
+ spin_lock_init(&dev->config_lock);
+ dev->config_enabled = false;
+ dev->config_change_pending = false;
+
+ /* We always start by resetting the device, in case a previous
+ * driver messed it up. This also tests that code path a little. */
+ dev->config->reset(dev);
+
+ /* Acknowledge that we've seen the device. */
+ virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+
+ INIT_LIST_HEAD(&dev->vqs);
+
+ /*
+ * register_device() causes the bus infrastructure to look for a matching
+ * driver.
+ */
+ err = register_device(&dev->dev);
+ if (err)
+ goto out;
+
+ dev_add_param_uint32_ro(&dev->dev, "device", &dev->id.device, "0x%04x");
+ dev_add_param_uint32_ro(&dev->dev, "vendor", &dev->id.vendor, "0x%04x");
+ dev_add_param_uint32(&dev->dev, "status", param_set_readonly,
+ status_show, &dev->status_param, "0x%08x", dev);
+ virtio_dev_add_param_features(dev);
+
+out:
+ if (err)
+ virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
+ return err;
+}
+EXPORT_SYMBOL_GPL(register_virtio_device);
+
+bool is_virtio_device(struct device_d *dev)
+{
+ return dev->bus == &virtio_bus;
+}
+EXPORT_SYMBOL_GPL(is_virtio_device);
+
+void unregister_virtio_device(struct virtio_device *dev)
+{
+ unregister_device(&dev->dev);
+}
+EXPORT_SYMBOL_GPL(unregister_virtio_device);
+
+static int virtio_init(void)
+{
+ if (bus_register(&virtio_bus) != 0)
+ panic("virtio bus registration failed");
+ return 0;
+}
+core_initcall(virtio_init);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
new file mode 100644
index 0000000000..821b43871a
--- /dev/null
+++ b/drivers/virtio/virtio_mmio.c
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Virtio memory mapped device driver
+ *
+ * Copyright 2011-2014, ARM Ltd.
+ *
+ * This module allows virtio devices to be used over a virtual, memory mapped
+ * platform device.
+ *
+ * The guest device(s) may be instantiated via Device Tree node, eg.:
+ *
+ * virtio_block@1e000 {
+ * compatible = "virtio,mmio";
+ * reg = <0x1e000 0x100>;
+ * interrupts = <42>;
+ * }
+ *
+ * Qemu will automatically fix up the nodes corresponding to its command line
+ * arguments into the barebox device tree.
+ *
+ * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007
+ */
+
+#define pr_fmt(fmt) "virtio-mmio: " fmt
+
+#include <common.h>
+#include <io.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <driver.h>
+#include <linux/slab.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <uapi/linux/virtio_mmio.h>
+#include <linux/virtio_ring.h>
+
+#define to_virtio_mmio_device(_plat_dev) \
+ container_of(_plat_dev, struct virtio_mmio_device, vdev)
+
+#define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE
+
+struct virtio_mmio_device {
+ struct virtio_device vdev;
+
+ void __iomem *base;
+ unsigned long version;
+};
+
+struct virtio_mmio_vq_info {
+ /* the actual virtqueue */
+ struct virtqueue *vq;
+};
+
+static int virtio_mmio_get_config(struct virtio_device *vdev, unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct virtio_mmio_device *priv = to_virtio_mmio_device(vdev);
+ void __iomem *base = priv->base + VIRTIO_MMIO_CONFIG;
+ u8 b;
+ __le16 w;
+ __le32 l;
+
+ if (priv->version == 1) {
+ u8 *ptr = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ ptr[i] = readb(base + offset + i);
+
+ return 0;
+ }
+
+ switch (len) {
+ case 1:
+ b = readb(base + offset);
+ memcpy(buf, &b, sizeof(b));
+ break;
+ case 2:
+ w = cpu_to_le16(readw(base + offset));
+ memcpy(buf, &w, sizeof(w));
+ break;
+ case 4:
+ l = cpu_to_le32(readl(base + offset));
+ memcpy(buf, &l, sizeof(l));
+ break;
+ case 8:
+ l = cpu_to_le32(readl(base + offset));
+ memcpy(buf, &l, sizeof(l));
+ l = cpu_to_le32(readl(base + offset + sizeof(l)));
+ memcpy(buf + sizeof(l), &l, sizeof(l));
+ break;
+ default:
+ WARN_ON(true);
+ }
+
+ return 0;
+}
+
+static int virtio_mmio_set_config(struct virtio_device *vdev, unsigned int offset,
+ const void *buf, unsigned int len)
+{
+ struct virtio_mmio_device *priv = to_virtio_mmio_device(vdev);
+ void __iomem *base = priv->base + VIRTIO_MMIO_CONFIG;
+ u8 b;
+ __le16 w;
+ __le32 l;
+
+ if (priv->version == 1) {
+ const u8 *ptr = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ writeb(ptr[i], base + offset + i);
+
+ return 0;
+ }
+
+ switch (len) {
+ case 1:
+ memcpy(&b, buf, sizeof(b));
+ writeb(b, base + offset);
+ break;
+ case 2:
+ memcpy(&w, buf, sizeof(w));
+ writew(le16_to_cpu(w), base + offset);
+ break;
+ case 4:
+ memcpy(&l, buf, sizeof(l));
+ writel(le32_to_cpu(l), base + offset);
+ break;
+ case 8:
+ memcpy(&l, buf, sizeof(l));
+ writel(le32_to_cpu(l), base + offset);
+ memcpy(&l, buf + sizeof(l), sizeof(l));
+ writel(le32_to_cpu(l), base + offset + sizeof(l));
+ break;
+ default:
+ WARN_ON(true);
+ }
+
+ return 0;
+}
+
+static int virtio_mmio_generation(struct virtio_device *vdev, u32 *counter)
+{
+ struct virtio_mmio_device *priv = to_virtio_mmio_device(vdev);
+
+ if (priv->version == 1)
+ *counter = 0;
+ else
+ *counter = readl(priv->base + VIRTIO_MMIO_CONFIG_GENERATION);
+
+ return 0;
+}
+
+static int virtio_mmio_get_status(struct virtio_device *vdev)
+{
+ struct virtio_mmio_device *priv = to_virtio_mmio_device(vdev);
+
+ return readl(priv->base + VIRTIO_MMIO_STATUS) & 0xff;
+}
+
+static int virtio_mmio_set_status(struct virtio_device *vdev, u8 status)
+{
+ struct virtio_mmio_device *priv = to_virtio_mmio_device(vdev);
+
+ /* We should never be setting status to 0 */
+ WARN_ON(status == 0);
+
+ writel(status, priv->base + VIRTIO_MMIO_STATUS);
+
+ return 0;
+}
+
+static int virtio_mmio_reset(struct virtio_device *vdev)
+{
+ struct virtio_mmio_device *priv = to_virtio_mmio_device(vdev);
+
+ /* 0 status means a reset */
+ writel(0, priv->base + VIRTIO_MMIO_STATUS);
+
+ return 0;
+}
+
+static u64 virtio_mmio_get_features(struct virtio_device *vdev)
+{
+ struct virtio_mmio_device *priv = to_virtio_mmio_device(vdev);
+ u64 features;
+
+ writel(1, priv->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
+ features = readl(priv->base + VIRTIO_MMIO_DEVICE_FEATURES);
+ features <<= 32;
+
+ writel(0, priv->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
+ features |= readl(priv->base + VIRTIO_MMIO_DEVICE_FEATURES);
+
+ return features;
+}
+
+ static int virtio_mmio_finalize_features(struct virtio_device *vdev)
+{
+ struct virtio_mmio_device *priv = to_virtio_mmio_device(vdev);
+
+ /* Make sure there is are no mixed devices */
+ if (priv->version == 2 && !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
+ dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
+ return -EINVAL;
+ }
+
+ writel(1, priv->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
+ writel((u32)(vdev->features >> 32),
+ priv->base + VIRTIO_MMIO_DRIVER_FEATURES);
+
+ writel(0, priv->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
+ writel((u32)vdev->features,
+ priv->base + VIRTIO_MMIO_DRIVER_FEATURES);
+
+ return 0;
+}
+
+static struct virtqueue *virtio_mmio_setup_vq(struct virtio_device *vdev,
+ unsigned int index)
+{
+ struct virtio_mmio_device *priv = to_virtio_mmio_device(vdev);
+ struct virtqueue *vq;
+ unsigned int num;
+ int err;
+
+ /* Select the queue we're interested in */
+ writel(index, priv->base + VIRTIO_MMIO_QUEUE_SEL);
+
+ /* Queue shouldn't already be set up */
+ if (readl(priv->base + (priv->version == 1 ?
+ VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) {
+ err = -ENOENT;
+ goto error_available;
+ }
+
+ num = readl(priv->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
+ if (num == 0) {
+ err = -ENOENT;
+ goto error_new_virtqueue;
+ }
+
+ /* Create the vring */
+ vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev);
+ if (!vq) {
+ err = -ENOMEM;
+ goto error_new_virtqueue;
+ }
+
+ /* Activate the queue */
+ writel(virtqueue_get_vring_size(vq),
+ priv->base + VIRTIO_MMIO_QUEUE_NUM);
+ if (priv->version == 1) {
+ u64 q_pfn = virtqueue_get_desc_addr(vq) >> PAGE_SHIFT;
+
+ /*
+ * virtio-mmio v1 uses a 32bit QUEUE PFN. If we have something
+ * that doesn't fit in 32bit, fail the setup rather than
+ * pretending to be successful.
+ */
+ if (q_pfn >> 32) {
+ debug("platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
+ 0x1ULL << (32 + PAGE_SHIFT - 30));
+ err = -E2BIG;
+ goto error_bad_pfn;
+ }
+
+ writel(PAGE_SIZE, priv->base + VIRTIO_MMIO_QUEUE_ALIGN);
+ writel(q_pfn, priv->base + VIRTIO_MMIO_QUEUE_PFN);
+ } else {
+ u64 addr;
+
+ addr = virtqueue_get_desc_addr(vq);
+ writel((u32)addr, priv->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
+ writel((u32)(addr >> 32),
+ priv->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
+
+ addr = virtqueue_get_avail_addr(vq);
+ writel((u32)addr, priv->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
+ writel((u32)(addr >> 32),
+ priv->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
+
+ addr = virtqueue_get_used_addr(vq);
+ writel((u32)addr, priv->base + VIRTIO_MMIO_QUEUE_USED_LOW);
+ writel((u32)(addr >> 32),
+ priv->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
+
+ writel(1, priv->base + VIRTIO_MMIO_QUEUE_READY);
+ }
+
+ return vq;
+
+error_bad_pfn:
+ vring_del_virtqueue(vq);
+
+error_new_virtqueue:
+ if (priv->version == 1) {
+ writel(0, priv->base + VIRTIO_MMIO_QUEUE_PFN);
+ } else {
+ writel(0, priv->base + VIRTIO_MMIO_QUEUE_READY);
+ WARN_ON(readl(priv->base + VIRTIO_MMIO_QUEUE_READY));
+ }
+
+error_available:
+ return ERR_PTR(err);
+}
+
+static void virtio_mmio_del_vq(struct virtqueue *vq)
+{
+ struct virtio_mmio_device *priv = to_virtio_mmio_device(vq->vdev);
+ unsigned int index = vq->index;
+
+ /* Select and deactivate the queue */
+ writel(index, priv->base + VIRTIO_MMIO_QUEUE_SEL);
+ if (priv->version == 1) {
+ writel(0, priv->base + VIRTIO_MMIO_QUEUE_PFN);
+ } else {
+ writel(0, priv->base + VIRTIO_MMIO_QUEUE_READY);
+ WARN_ON(readl(priv->base + VIRTIO_MMIO_QUEUE_READY));
+ }
+
+ vring_del_virtqueue(vq);
+}
+
+static int virtio_mmio_del_vqs(struct virtio_device *vdev)
+{
+ struct virtqueue *vq, *n;
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list)
+ virtio_mmio_del_vq(vq);
+
+ return 0;
+}
+
+static int virtio_mmio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
+ struct virtqueue *vqs[])
+{
+ int i;
+
+ for (i = 0; i < nvqs; ++i) {
+ vqs[i] = virtio_mmio_setup_vq(vdev, i);
+ if (IS_ERR(vqs[i])) {
+ virtio_mmio_del_vqs(vdev);
+ return PTR_ERR(vqs[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int virtio_mmio_notify(struct virtio_device *vdev, struct virtqueue *vq)
+{
+ struct virtio_mmio_device *priv = to_virtio_mmio_device(vdev);
+
+ /*
+ * We write the queue's selector into the notification register
+ * to signal the other end
+ */
+ writel(vq->index, priv->base + VIRTIO_MMIO_QUEUE_NOTIFY);
+
+ return 0;
+}
+
+static const struct virtio_config_ops virtio_mmio_config_ops = {
+ .get_config = virtio_mmio_get_config,
+ .set_config = virtio_mmio_set_config,
+ .generation = virtio_mmio_generation,
+ .get_status = virtio_mmio_get_status,
+ .set_status = virtio_mmio_set_status,
+ .reset = virtio_mmio_reset,
+ .get_features = virtio_mmio_get_features,
+ .finalize_features = virtio_mmio_finalize_features,
+ .find_vqs = virtio_mmio_find_vqs,
+ .del_vqs = virtio_mmio_del_vqs,
+ .notify = virtio_mmio_notify,
+};
+
+
+/* Platform device */
+
+static int virtio_mmio_probe(struct device_d *dev)
+{
+ struct virtio_mmio_device *vm_dev;
+ struct resource *res;
+ unsigned long magic;
+
+ vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL);
+ if (!vm_dev)
+ return -ENOMEM;
+
+ vm_dev->vdev.dev.parent = dev;
+ vm_dev->vdev.config = &virtio_mmio_config_ops;
+
+ res = dev_request_mem_resource(dev, 0);
+ if (IS_ERR(res))
+ return PTR_ERR(res);
+
+ vm_dev->base = IOMEM(res->start);
+
+ /* Check magic value */
+ magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
+ if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
+ dev_warn(dev, "Wrong magic value 0x%08lx!\n", magic);
+ return -ENODEV;
+ }
+
+ /* Check device version */
+ vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION);
+ if (vm_dev->version < 1 || vm_dev->version > 2) {
+ dev_err(dev, "Version %ld not supported!\n",
+ vm_dev->version);
+ return -ENXIO;
+ }
+
+ vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
+ if (vm_dev->vdev.id.device == 0) {
+ /*
+ * virtio-mmio device with an ID 0 is a (dummy) placeholder
+ * with no function. End probing now with no error reported.
+ */
+ return -ENODEV;
+ }
+ vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
+
+ if (vm_dev->version == 1)
+ writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
+
+ dev->priv = vm_dev;
+
+ return register_virtio_device(&vm_dev->vdev);
+}
+
+static void virtio_mmio_remove(struct device_d *dev)
+{
+ struct virtio_mmio_device *vm_dev = dev->priv;
+ unregister_virtio_device(&vm_dev->vdev);
+}
+
+
+/* Platform driver */
+
+static const struct of_device_id virtio_mmio_match[] = {
+ { .compatible = "virtio,mmio", },
+ {},
+};
+
+static struct driver_d virtio_mmio_driver = {
+ .probe = virtio_mmio_probe,
+ .remove = virtio_mmio_remove,
+ .name = "virtio-mmio",
+ .of_compatible = virtio_mmio_match,
+};
+
+static int __init virtio_mmio_init(void)
+{
+ return platform_driver_register(&virtio_mmio_driver);
+}
+
+module_init(virtio_mmio_init);
+
+MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
+MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
new file mode 100644
index 0000000000..cac3362e72
--- /dev/null
+++ b/drivers/virtio/virtio_ring.c
@@ -0,0 +1,365 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
+ * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
+ *
+ * virtio ring implementation
+ */
+
+#define pr_fmt(fmt) "virtio_ring: " fmt
+
+#include <common.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_types.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ring.h>
+#include <linux/bug.h>
+#include <dma.h>
+
+#define vq_debug(vq, fmt, ...) \
+ dev_dbg(&vq->vdev->dev, fmt, ##__VA_ARGS__)
+
+#define vq_info(vq, fmt, ...) \
+ dev_info(&vq->vdev->dev, fmt, ##__VA_ARGS__)
+
+int virtqueue_add(struct virtqueue *vq, struct virtio_sg *sgs[],
+ unsigned int out_sgs, unsigned int in_sgs)
+{
+ struct vring_desc *desc;
+ unsigned int total_sg = out_sgs + in_sgs;
+ unsigned int i, n, avail, descs_used, uninitialized_var(prev);
+ int head;
+
+ WARN_ON(total_sg == 0);
+
+ head = vq->free_head;
+
+ desc = vq->vring.desc;
+ i = head;
+ descs_used = total_sg;
+
+ if (vq->num_free < descs_used) {
+ vq_debug(vq, "Can't add buf len %i - avail = %i\n",
+ descs_used, vq->num_free);
+ /*
+ * FIXME: for historical reasons, we force a notify here if
+ * there are outgoing parts to the buffer. Presumably the
+ * host should service the ring ASAP.
+ */
+ if (out_sgs)
+ virtio_notify(vq->vdev, vq);
+ return -ENOSPC;
+ }
+
+ for (n = 0; n < out_sgs; n++) {
+ struct virtio_sg *sg = sgs[n];
+
+ desc[i].flags = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT);
+ desc[i].addr = cpu_to_virtio64(vq->vdev, (u64)(size_t)sg->addr);
+ desc[i].len = cpu_to_virtio32(vq->vdev, sg->length);
+
+ prev = i;
+ i = virtio16_to_cpu(vq->vdev, desc[i].next);
+ }
+ for (; n < (out_sgs + in_sgs); n++) {
+ struct virtio_sg *sg = sgs[n];
+
+ desc[i].flags = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT |
+ VRING_DESC_F_WRITE);
+ desc[i].addr = cpu_to_virtio64(vq->vdev,
+ (u64)(uintptr_t)sg->addr);
+ desc[i].len = cpu_to_virtio32(vq->vdev, sg->length);
+
+ prev = i;
+ i = virtio16_to_cpu(vq->vdev, desc[i].next);
+ }
+ /* Last one doesn't continue */
+ desc[prev].flags &= cpu_to_virtio16(vq->vdev, ~VRING_DESC_F_NEXT);
+
+ /* We're using some buffers from the free list. */
+ vq->num_free -= descs_used;
+
+ /* Update free pointer */
+ vq->free_head = i;
+
+ /*
+ * Put entry in available array (but don't update avail->idx
+ * until they do sync).
+ */
+ avail = vq->avail_idx_shadow & (vq->vring.num - 1);
+ vq->vring.avail->ring[avail] = cpu_to_virtio16(vq->vdev, head);
+
+ /*
+ * Descriptors and available array need to be set before we expose the
+ * new available array entries.
+ */
+ virtio_wmb();
+ vq->avail_idx_shadow++;
+ vq->vring.avail->idx = cpu_to_virtio16(vq->vdev, vq->avail_idx_shadow);
+ vq->num_added++;
+
+ /*
+ * This is very unlikely, but theoretically possible.
+ * Kick just in case.
+ */
+ if (unlikely(vq->num_added == (1 << 16) - 1))
+ virtqueue_kick(vq);
+
+ return 0;
+}
+
+static bool virtqueue_kick_prepare(struct virtqueue *vq)
+{
+ u16 new, old;
+ bool needs_kick;
+
+ /*
+ * We need to expose available array entries before checking
+ * avail event.
+ */
+ virtio_mb();
+
+ old = vq->avail_idx_shadow - vq->num_added;
+ new = vq->avail_idx_shadow;
+ vq->num_added = 0;
+
+ if (vq->event) {
+ needs_kick = vring_need_event(virtio16_to_cpu(vq->vdev,
+ vring_avail_event(&vq->vring)), new, old);
+ } else {
+ needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(vq->vdev,
+ VRING_USED_F_NO_NOTIFY));
+ }
+
+ return needs_kick;
+}
+
+void virtqueue_kick(struct virtqueue *vq)
+{
+ if (virtqueue_kick_prepare(vq))
+ virtio_notify(vq->vdev, vq);
+}
+
+static void detach_buf(struct virtqueue *vq, unsigned int head)
+{
+ unsigned int i;
+ __virtio16 nextflag = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT);
+
+ /* Put back on free list: unmap first-level descriptors and find end */
+ i = head;
+
+ while (vq->vring.desc[i].flags & nextflag) {
+ i = virtio16_to_cpu(vq->vdev, vq->vring.desc[i].next);
+ vq->num_free++;
+ }
+
+ vq->vring.desc[i].next = cpu_to_virtio16(vq->vdev, vq->free_head);
+ vq->free_head = head;
+
+ /* Plus final descriptor */
+ vq->num_free++;
+}
+
+static inline bool more_used(const struct virtqueue *vq)
+{
+ return virtqueue_poll(vq, vq->last_used_idx);
+}
+
+void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len)
+{
+ unsigned int i;
+ u16 last_used;
+
+ if (!more_used(vq)) {
+ vq_debug(vq, "No more buffers in queue\n");
+ return NULL;
+ }
+
+ /* Only get used array entries after they have been exposed by host */
+ virtio_rmb();
+
+ last_used = (vq->last_used_idx & (vq->vring.num - 1));
+ i = virtio32_to_cpu(vq->vdev, vq->vring.used->ring[last_used].id);
+ if (len) {
+ *len = virtio32_to_cpu(vq->vdev,
+ vq->vring.used->ring[last_used].len);
+ vq_debug(vq, "last used idx %u with len %u\n", i, *len);
+ }
+
+ if (unlikely(i >= vq->vring.num)) {
+ vq_info(vq, "id %u out of range\n", i);
+ return NULL;
+ }
+
+ detach_buf(vq, i);
+ vq->last_used_idx++;
+ /*
+ * If we expect an interrupt for the next entry, tell host
+ * by writing event index and flush out the write before
+ * the read in the next get_buf call.
+ */
+ if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
+ virtio_store_mb(&vring_used_event(&vq->vring),
+ cpu_to_virtio16(vq->vdev, vq->last_used_idx));
+
+ return (void *)(uintptr_t)virtio64_to_cpu(vq->vdev,
+ vq->vring.desc[i].addr);
+}
+
+static struct virtqueue *__vring_new_virtqueue(unsigned int index,
+ struct vring vring,
+ struct virtio_device *vdev)
+{
+ unsigned int i;
+ struct virtqueue *vq;
+
+ vq = malloc(sizeof(*vq));
+ if (!vq)
+ return NULL;
+
+ vq->vdev = vdev;
+ vq->index = index;
+ vq->num_free = vring.num;
+ vq->vring = vring;
+ vq->last_used_idx = 0;
+ vq->avail_flags_shadow = 0;
+ vq->avail_idx_shadow = 0;
+ vq->num_added = 0;
+ list_add_tail(&vq->list, &vdev->vqs);
+
+ vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
+
+ /* Tell other side not to bother us */
+ vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+ if (!vq->event)
+ vq->vring.avail->flags = cpu_to_virtio16(vdev,
+ vq->avail_flags_shadow);
+
+ /* Put everything in free lists */
+ vq->free_head = 0;
+ for (i = 0; i < vring.num - 1; i++)
+ vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
+
+ return vq;
+}
+
+struct virtqueue *vring_create_virtqueue(unsigned int index, unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev)
+{
+ struct virtqueue *vq;
+ void *queue = NULL;
+ struct vring vring;
+
+ /* We assume num is a power of 2 */
+ if (num & (num - 1)) {
+ pr_err("Bad virtqueue length %u\n", num);
+ return NULL;
+ }
+
+ /* TODO: allocate each queue chunk individually */
+ for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
+ queue = memalign(PAGE_SIZE, vring_size(num, vring_align));
+ if (queue)
+ break;
+ }
+
+ if (!num)
+ return NULL;
+
+ if (!queue) {
+ /* Try to get a single page. You are my only hope! */
+ queue = memalign(PAGE_SIZE, vring_size(num, vring_align));
+ }
+ if (!queue)
+ return NULL;
+
+ memset(queue, 0, vring_size(num, vring_align));
+ vring_init(&vring, num, queue, vring_align);
+
+ vq = __vring_new_virtqueue(index, vring, vdev);
+ if (!vq) {
+ free(queue);
+ return NULL;
+ }
+ vq_debug(vq, "created vring @ %p for vq with num %u\n", queue, num);
+
+ return vq;
+}
+
+void vring_del_virtqueue(struct virtqueue *vq)
+{
+ free(vq->vring.desc);
+ list_del(&vq->list);
+ free(vq);
+}
+
+unsigned int virtqueue_get_vring_size(struct virtqueue *vq)
+{
+ return vq->vring.num;
+}
+
+dma_addr_t virtqueue_get_desc_addr(struct virtqueue *vq)
+{
+ return (dma_addr_t)vq->vring.desc;
+}
+
+dma_addr_t virtqueue_get_avail_addr(struct virtqueue *vq)
+{
+ return (dma_addr_t)vq->vring.desc +
+ ((char *)vq->vring.avail - (char *)vq->vring.desc);
+}
+
+dma_addr_t virtqueue_get_used_addr(struct virtqueue *vq)
+{
+ return (dma_addr_t)vq->vring.desc +
+ ((char *)vq->vring.used - (char *)vq->vring.desc);
+}
+
+bool virtqueue_poll(const struct virtqueue *vq, u16 last_used_idx)
+{
+ virtio_mb();
+
+ return last_used_idx != virtio16_to_cpu(vq->vdev, vq->vring.used->idx);
+}
+
+void virtqueue_dump(struct virtqueue *vq)
+{
+ unsigned int i;
+
+ printf("virtqueue %p for dev %s:\n", vq, vq->vdev->dev.name);
+ printf("\tindex %u, phys addr %p num %u\n",
+ vq->index, vq->vring.desc, vq->vring.num);
+ printf("\tfree_head %u, num_added %u, num_free %u\n",
+ vq->free_head, vq->num_added, vq->num_free);
+ printf("\tlast_used_idx %u, avail_flags_shadow %u, avail_idx_shadow %u\n",
+ vq->last_used_idx, vq->avail_flags_shadow, vq->avail_idx_shadow);
+
+ printf("Descriptor dump:\n");
+ for (i = 0; i < vq->vring.num; i++) {
+ printf("\tdesc[%u] = { 0x%llx, len %u, flags %u, next %u }\n",
+ i, vq->vring.desc[i].addr, vq->vring.desc[i].len,
+ vq->vring.desc[i].flags, vq->vring.desc[i].next);
+ }
+
+ printf("Avail ring dump:\n");
+ printf("\tflags %u, idx %u\n",
+ vq->vring.avail->flags, vq->vring.avail->idx);
+ for (i = 0; i < vq->vring.num; i++) {
+ printf("\tavail[%u] = %u\n",
+ i, vq->vring.avail->ring[i]);
+ }
+
+ printf("Used ring dump:\n");
+ printf("\tflags %u, idx %u\n",
+ vq->vring.used->flags, vq->vring.used->idx);
+ for (i = 0; i < vq->vring.num; i++) {
+ printf("\tused[%u] = { %u, %u }\n", i,
+ vq->vring.used->ring[i].id, vq->vring.used->ring[i].len);
+ }
+}
+
+int virtio_notify(struct virtio_device *vdev, struct virtqueue *vq)
+{
+ return vdev->config->notify(vdev, vq);
+}