summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSascha Hauer <s.hauer@pengutronix.de>2019-03-07 14:23:37 +0100
committerSascha Hauer <s.hauer@pengutronix.de>2019-03-07 14:23:37 +0100
commitce9cbae133c84c147bc6823f07c0b55bf4012837 (patch)
tree252d37bb21d8305a59a6d80346d61d24bd42b881 /drivers
parent63557eb99211a4d1e5f722255a2f9fd0a1e7c6ae (diff)
parentaedcb568afe4be0a5dfcc2c02f05efc3915c00f7 (diff)
downloadbarebox-ce9cbae133c84c147bc6823f07c0b55bf4012837.tar.gz
barebox-ce9cbae133c84c147bc6823f07c0b55bf4012837.tar.xz
Merge branch 'for-next/nvme'
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig1
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/net/e1000/main.c7
-rw-r--r--drivers/net/rtl8139.c7
-rw-r--r--drivers/net/rtl8169.c7
-rw-r--r--drivers/nvme/Kconfig5
-rw-r--r--drivers/nvme/Makefile1
-rw-r--r--drivers/nvme/host/Kconfig11
-rw-r--r--drivers/nvme/host/Makefile9
-rw-r--r--drivers/nvme/host/core.c614
-rw-r--r--drivers/nvme/host/nvme.h148
-rw-r--r--drivers/nvme/host/pci.c697
-rw-r--r--drivers/usb/host/xhci-pci.c7
13 files changed, 1491 insertions, 24 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index c6c2eb14db..d6fbcbfe16 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -39,5 +39,6 @@ source "drivers/phy/Kconfig"
source "drivers/crypto/Kconfig"
source "drivers/memory/Kconfig"
source "drivers/soc/imx/Kconfig"
+source "drivers/nvme/Kconfig"
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 752fd66242..65fd488ce9 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_CRYPTO_HW) += crypto/
obj-$(CONFIG_AIODEV) += aiodev/
obj-y += memory/
obj-y += soc/imx/
+obj-y += nvme/
diff --git a/drivers/net/e1000/main.c b/drivers/net/e1000/main.c
index 774e3d030f..f67c5d867b 100644
--- a/drivers/net/e1000/main.c
+++ b/drivers/net/e1000/main.c
@@ -3713,9 +3713,4 @@ static struct pci_driver e1000_eth_driver = {
.probe = e1000_probe,
.remove = e1000_remove,
};
-
-static int e1000_driver_init(void)
-{
- return pci_register_driver(&e1000_eth_driver);
-}
-device_initcall(e1000_driver_init);
+device_pci_driver(e1000_eth_driver);
diff --git a/drivers/net/rtl8139.c b/drivers/net/rtl8139.c
index cfa34a2f2a..e1c57e6b7c 100644
--- a/drivers/net/rtl8139.c
+++ b/drivers/net/rtl8139.c
@@ -594,9 +594,4 @@ static struct pci_driver rtl8139_eth_driver = {
.id_table = rtl8139_pci_tbl,
.probe = rtl8139_probe,
};
-
-static int rtl8139_init(void)
-{
- return pci_register_driver(&rtl8139_eth_driver);
-}
-device_initcall(rtl8139_init);
+device_pci_driver(rtl8139_eth_driver);
diff --git a/drivers/net/rtl8169.c b/drivers/net/rtl8169.c
index ba257509ee..4e9823d424 100644
--- a/drivers/net/rtl8169.c
+++ b/drivers/net/rtl8169.c
@@ -544,9 +544,4 @@ static struct pci_driver rtl8169_eth_driver = {
.id_table = rtl8169_pci_tbl,
.probe = rtl8169_probe,
};
-
-static int rtl8169_init(void)
-{
- return pci_register_driver(&rtl8169_eth_driver);
-}
-device_initcall(rtl8169_init);
+device_pci_driver(rtl8169_eth_driver);
diff --git a/drivers/nvme/Kconfig b/drivers/nvme/Kconfig
new file mode 100644
index 0000000000..27ac9654ac
--- /dev/null
+++ b/drivers/nvme/Kconfig
@@ -0,0 +1,5 @@
+menu "NVME Support"
+
+source "drivers/nvme/host/Kconfig"
+
+endmenu
diff --git a/drivers/nvme/Makefile b/drivers/nvme/Makefile
new file mode 100644
index 0000000000..6d7d51c801
--- /dev/null
+++ b/drivers/nvme/Makefile
@@ -0,0 +1 @@
+obj-y += host/
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
new file mode 100644
index 0000000000..8888c8900b
--- /dev/null
+++ b/drivers/nvme/host/Kconfig
@@ -0,0 +1,11 @@
+config NVME_CORE
+ bool
+
+config BLK_DEV_NVME
+ bool "NVM Express block device"
+ depends on PCI && BLOCK
+ select NVME_CORE
+ ---help---
+ The NVM Express driver is for solid state drives directly
+ connected to the PCI or PCI Express bus. If you know you
+ don't have one of these, it is safe to answer N.
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
new file mode 100644
index 0000000000..9afbc0d2e1
--- /dev/null
+++ b/drivers/nvme/host/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -I$(src)
+
+obj-$(CONFIG_NVME_CORE) += nvme-core.o
+obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
+
+nvme-core-y := core.o
+nvme-y += pci.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
new file mode 100644
index 0000000000..e0984708b4
--- /dev/null
+++ b/drivers/nvme/host/core.c
@@ -0,0 +1,614 @@
+#include <common.h>
+
+#include "nvme.h"
+
+int __nvme_submit_sync_cmd(struct nvme_ctrl *ctrl,
+ struct nvme_command *cmd,
+ union nvme_result *result,
+ void *buffer, unsigned bufflen,
+ unsigned timeout, int qid)
+{
+ return ctrl->ops->submit_sync_cmd(ctrl, cmd, result, buffer, bufflen,
+ timeout, qid);
+}
+EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
+
+int nvme_submit_sync_cmd(struct nvme_ctrl *ctrl,
+ struct nvme_command *cmd,
+ void *buffer, unsigned bufflen)
+{
+ return __nvme_submit_sync_cmd(ctrl, cmd, NULL, buffer, bufflen, 0,
+ NVME_QID_ADMIN);
+}
+EXPORT_SYMBOL_GPL(nvme_sec_submit);
+
+static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
+{
+ struct nvme_command c = { };
+ int error;
+
+ /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.cns = NVME_ID_CNS_CTRL;
+
+ *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
+ if (!*id)
+ return -ENOMEM;
+
+ error = nvme_submit_sync_cmd(dev, &c, *id,
+ sizeof(struct nvme_id_ctrl));
+ if (error)
+ kfree(*id);
+
+ return error;
+}
+
+static int
+nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
+ void *buffer, size_t buflen, u32 *result)
+{
+ struct nvme_command c;
+ union nvme_result res;
+ int ret;
+
+ memset(&c, 0, sizeof(c));
+ c.features.opcode = nvme_admin_set_features;
+ c.features.fid = cpu_to_le32(fid);
+ c.features.dword11 = cpu_to_le32(dword11);
+
+ ret = __nvme_submit_sync_cmd(dev, &c, &res, buffer, buflen, 0,
+ NVME_QID_ADMIN);
+ if (ret >= 0 && result)
+ *result = le32_to_cpu(res.u32);
+ return ret;
+}
+
+int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
+{
+ u32 q_count = (*count - 1) | ((*count - 1) << 16);
+ u32 result;
+ int status, nr_io_queues;
+
+ status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
+ &result);
+ if (status < 0)
+ return status;
+
+ /*
+ * Degraded controllers might return an error when setting the queue
+ * count. We still want to be able to bring them online and offer
+ * access to the admin queue, as that might be only way to fix them up.
+ */
+ if (status > 0) {
+ dev_err(ctrl->dev, "Could not set queue count (%d)\n", status);
+ *count = 0;
+ } else {
+ nr_io_queues = min(result & 0xffff, result >> 16) + 1;
+ *count = min(*count, nr_io_queues);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_set_queue_count);
+
+static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
+{
+ uint64_t start = get_time_ns();
+ unsigned long timeout =
+ ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2);
+ u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
+ int ret;
+
+ while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
+ if (csts == ~0)
+ return -ENODEV;
+ if ((csts & NVME_CSTS_RDY) == bit)
+ break;
+
+ mdelay(100);
+
+ if (is_timeout(start, timeout)) {
+ dev_err(ctrl->dev,
+ "Device not ready; aborting %s\n", enabled ?
+ "initialisation" : "reset");
+ return -ENODEV;
+ }
+ }
+
+ return ret;
+}
+
+static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
+{
+ struct nvme_command c = { };
+
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST;
+ c.identify.nsid = cpu_to_le32(nsid);
+ return nvme_submit_sync_cmd(dev, &c, ns_list, NVME_IDENTIFY_DATA_SIZE);
+}
+
+static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl,
+ unsigned nsid)
+{
+ struct nvme_id_ns *id;
+ struct nvme_command c = { };
+ int error;
+
+ /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.nsid = cpu_to_le32(nsid);
+ c.identify.cns = NVME_ID_CNS_NS;
+
+ id = kmalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return NULL;
+
+ error = nvme_submit_sync_cmd(ctrl, &c, id, sizeof(*id));
+ if (error) {
+ dev_warn(ctrl->dev, "Identify namespace failed\n");
+ kfree(id);
+ return NULL;
+ }
+
+ return id;
+}
+
+static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
+ unsigned nsid, struct nvme_id_ns *id)
+{
+ static int instance = 1;
+ struct nvme_ns_head *head;
+ int ret = -ENOMEM;
+
+ head = kzalloc(sizeof(*head), GFP_KERNEL);
+ if (!head)
+ goto out;
+
+ head->instance = instance++;
+ head->ns_id = nsid;
+
+ return head;
+out:
+ return ERR_PTR(ret);
+}
+
+static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
+ struct nvme_id_ns *id)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ const bool is_shared = id->nmic & (1 << 0);
+ struct nvme_ns_head *head = NULL;
+
+ if (is_shared) {
+ dev_info(ctrl->dev, "Skipping shared namespace %u\n", nsid);
+ return -ENOTSUPP;
+ }
+
+ head = nvme_alloc_ns_head(ctrl, nsid, id);
+ if (IS_ERR(head))
+ return PTR_ERR(head);
+
+ ns->head = head;
+
+ return 0;
+}
+
+#define DISK_NAME_LEN 32
+
+static void nvme_update_disk_info(struct block_device *blk, struct nvme_ns *ns,
+ struct nvme_id_ns *id)
+{
+ blk->blockbits = ns->lba_shift;
+ blk->num_blocks = le64_to_cpup(&id->nsze);
+
+ ns->readonly = id->nsattr & (1 << 0);
+}
+
+static void __nvme_revalidate_disk(struct block_device *blk,
+ struct nvme_id_ns *id)
+{
+ struct nvme_ns *ns = to_nvme_ns(blk);
+
+ /*
+ * If identify namespace failed, use default 512 byte block size so
+ * block layer can use before failing read/write for 0 capacity.
+ */
+ ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
+ if (ns->lba_shift == 0)
+ ns->lba_shift = 9;
+
+ nvme_update_disk_info(blk, ns, id);
+}
+
+static void nvme_setup_rw(struct nvme_ns *ns, struct nvme_command *cmnd,
+ int block, int num_block)
+{
+ cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
+ cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, block));
+ cmnd->rw.length = cpu_to_le16(num_block - 1);
+ cmnd->rw.control = 0;
+ cmnd->rw.dsmgmt = 0;
+}
+
+static void nvme_setup_flush(struct nvme_ns *ns, struct nvme_command *cmnd)
+{
+ memset(cmnd, 0, sizeof(*cmnd));
+ cmnd->common.opcode = nvme_cmd_flush;
+ cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
+}
+
+static int nvme_submit_sync_rw(struct nvme_ns *ns, struct nvme_command *cmnd,
+ void *buffer, int block, int num_blocks)
+{
+ /*
+ * ns->ctrl->max_hw_sectors is in units of 512 bytes, so we
+ * need to make sure we adjust it to discovered lba_shift
+ */
+ const u32 max_hw_sectors =
+ ns->ctrl->max_hw_sectors >> (ns->lba_shift - 9);
+ int ret;
+
+ if (num_blocks > max_hw_sectors) {
+ while (num_blocks) {
+ const int chunk = min_t(int, num_blocks,
+ max_hw_sectors);
+
+ ret = nvme_submit_sync_rw(ns, cmnd, buffer, block,
+ chunk);
+ if (ret)
+ break;
+
+ num_blocks -= chunk;
+ buffer += chunk;
+ block += chunk;
+ }
+
+ return ret;
+ }
+
+ nvme_setup_rw(ns, cmnd, block, num_blocks);
+
+ ret = __nvme_submit_sync_cmd(ns->ctrl, cmnd, NULL, buffer,
+ num_blocks << ns->lba_shift,
+ 0, NVME_QID_IO);
+
+ if (ret) {
+ dev_err(ns->ctrl->dev,
+ "I/O failed: block: %d, num blocks: %d, status code type: %xh, status code %02xh\n",
+ block, num_blocks, (ret >> 8) & 0xf,
+ ret & 0xff);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+static int nvme_block_device_read(struct block_device *blk, void *buffer,
+ int block, int num_blocks)
+{
+ struct nvme_ns *ns = to_nvme_ns(blk);
+ struct nvme_command cmnd = { };
+
+ cmnd.rw.opcode = nvme_cmd_read;
+
+ return nvme_submit_sync_rw(ns, &cmnd, buffer, block, num_blocks);
+}
+
+static int __maybe_unused
+nvme_block_device_write(struct block_device *blk, const void *buffer,
+ int block, int num_blocks)
+{
+ struct nvme_ns *ns = to_nvme_ns(blk);
+ struct nvme_command cmnd = { };
+
+ if (ns->readonly)
+ return -EINVAL;
+
+ cmnd.rw.opcode = nvme_cmd_write;
+
+ return nvme_submit_sync_rw(ns, &cmnd, (void *)buffer, block,
+ num_blocks);
+}
+
+static int __maybe_unused nvme_block_device_flush(struct block_device *blk)
+{
+ struct nvme_ns *ns = to_nvme_ns(blk);
+ struct nvme_command cmnd = { };
+
+ nvme_setup_flush(ns, &cmnd);
+
+ return __nvme_submit_sync_cmd(ns->ctrl, &cmnd, NULL, NULL,
+ 0, 0, NVME_QID_IO);
+}
+
+static struct block_device_ops nvme_block_device_ops = {
+ .read = nvme_block_device_read,
+#ifdef CONFIG_BLOCK_WRITE
+ .write = nvme_block_device_write,
+ .flush = nvme_block_device_flush,
+#endif
+};
+
+static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+{
+ struct nvme_ns *ns;
+ struct nvme_id_ns *id;
+ char disk_name[DISK_NAME_LEN];
+ int ret, flags;
+
+ ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+ if (!ns)
+ return;
+
+ ns->ctrl = ctrl;
+ ns->lba_shift = 9; /* set to a default value for 512 until
+ * disk is validated */
+
+ id = nvme_identify_ns(ctrl, nsid);
+ if (!id)
+ goto out_free_ns;
+
+ if (id->ncap == 0)
+ goto out_free_id;
+
+ if (nvme_init_ns_head(ns, nsid, id))
+ goto out_free_id;
+
+ nvme_set_disk_name(disk_name, ns, ctrl, &flags);
+
+ ns->blk.dev = ctrl->dev;
+ ns->blk.ops = &nvme_block_device_ops;
+ ns->blk.cdev.name = strdup(disk_name);
+
+ __nvme_revalidate_disk(&ns->blk, id);
+ kfree(id);
+
+ ret = blockdevice_register(&ns->blk);
+ if (ret) {
+ dev_err(ctrl->dev, "Cannot register block device (%d)\n", ret);
+ goto out_free_id;
+ }
+
+ return;
+out_free_id:
+ kfree(id);
+out_free_ns:
+ kfree(ns);
+}
+
+static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
+{
+ __le32 *ns_list;
+ unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024);
+ int ret = 0;
+
+ ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
+ if (!ns_list)
+ return -ENOMEM;
+
+ for (i = 0; i < num_lists; i++) {
+ ret = nvme_identify_ns_list(ctrl, prev, ns_list);
+ if (ret)
+ goto out;
+
+ for (j = 0; j < min(nn, 1024U); j++) {
+ nsid = le32_to_cpu(ns_list[j]);
+ if (!nsid)
+ goto out;
+
+ nvme_alloc_ns(ctrl, nsid);
+ }
+ nn -= j;
+ }
+ out:
+ kfree(ns_list);
+ return ret;
+}
+
+static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
+{
+ unsigned i;
+
+ for (i = 1; i <= nn; i++)
+ nvme_alloc_ns(ctrl, i);
+}
+
+static void nvme_scan_work(struct nvme_ctrl *ctrl)
+{
+ struct nvme_id_ctrl *id;
+ unsigned nn;
+
+ if (nvme_identify_ctrl(ctrl, &id))
+ return;
+
+ nn = le32_to_cpu(id->nn);
+ if (ctrl->vs >= NVME_VS(1, 1, 0)) {
+ if (!nvme_scan_ns_list(ctrl, nn))
+ goto out_free_id;
+ }
+ nvme_scan_ns_sequential(ctrl, nn);
+out_free_id:
+ kfree(id);
+}
+
+void nvme_start_ctrl(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->queue_count > 1)
+ nvme_scan_work(ctrl);
+}
+EXPORT_SYMBOL_GPL(nvme_start_ctrl);
+
+/*
+ * If the device has been passed off to us in an enabled state, just clear
+ * the enabled bit. The spec says we should set the 'shutdown notification
+ * bits', but doing so may cause the device to complete commands to the
+ * admin queue ... and we don't know what memory that might be pointing at!
+ */
+int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
+{
+ int ret;
+
+ ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
+ ctrl->ctrl_config &= ~NVME_CC_ENABLE;
+
+ ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
+ if (ret)
+ return ret;
+
+ return nvme_wait_ready(ctrl, cap, false);
+}
+EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
+
+int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
+{
+ /*
+ * Default to a 4K page size, with the intention to update this
+ * path in the future to accomodate architectures with differing
+ * kernel and IO page sizes.
+ */
+ unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12;
+ int ret;
+
+ if (page_shift < dev_page_min) {
+ dev_err(ctrl->dev,
+ "Minimum device page size %u too large for host (%u)\n",
+ 1 << dev_page_min, 1 << page_shift);
+ return -ENODEV;
+ }
+
+ ctrl->page_size = 1 << page_shift;
+
+ ctrl->ctrl_config = NVME_CC_CSS_NVM;
+ ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
+ ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
+ ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
+ ctrl->ctrl_config |= NVME_CC_ENABLE;
+
+ ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
+ if (ret)
+ return ret;
+ return nvme_wait_ready(ctrl, cap, true);
+}
+EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
+
+int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
+{
+ uint64_t start = get_time_ns();
+ unsigned long timeout = SHUTDOWN_TIMEOUT;
+ u32 csts;
+ int ret;
+
+ ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
+ ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
+
+ ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
+ if (ret)
+ return ret;
+
+ while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
+ if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
+ break;
+
+ mdelay(100);
+
+ if (is_timeout(start, timeout)) {
+ dev_err(ctrl->dev,
+ "Device shutdown incomplete; abort shutdown\n");
+ return -ENODEV;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
+
+#define NVME_ID_MAX_LEN 41
+
+static void nvme_print(struct nvme_ctrl *ctrl, const char *prefix,
+ const char *_string, size_t _length)
+{
+ char string[NVME_ID_MAX_LEN];
+ const size_t length = min(_length, sizeof(string) - 1);
+
+ memcpy(string, _string, length);
+ string[length - 1] = '\0';
+
+ dev_info(ctrl->dev, "%s: %s\n", prefix, string);
+}
+
+static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+{
+ nvme_print(ctrl, "serial", id->sn, sizeof(id->sn));
+ nvme_print(ctrl, "model", id->mn, sizeof(id->mn));
+ nvme_print(ctrl, "firmware", id->fr, sizeof(id->fr));
+
+ return 0;
+}
+
+/*
+ * Initialize the cached copies of the Identify data and various controller
+ * register in our nvme_ctrl structure. This should be called as soon as
+ * the admin queue is fully up and running.
+ */
+int nvme_init_identify(struct nvme_ctrl *ctrl)
+{
+ struct nvme_id_ctrl *id;
+ u64 cap;
+ int ret, page_shift;
+ u32 max_hw_sectors;
+
+ ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
+ if (ret) {
+ dev_err(ctrl->dev, "Reading VS failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap);
+ if (ret) {
+ dev_err(ctrl->dev, "Reading CAP failed (%d)\n", ret);
+ return ret;
+ }
+ page_shift = NVME_CAP_MPSMIN(cap) + 12;
+
+ ret = nvme_identify_ctrl(ctrl, &id);
+ if (ret) {
+ dev_err(ctrl->dev, "Identify Controller failed (%d)\n", ret);
+ return -EIO;
+ }
+
+ ret = nvme_init_subsystem(ctrl, id);
+ if (ret)
+ return ret;
+
+ if (id->mdts)
+ max_hw_sectors = 1 << (id->mdts + page_shift - 9);
+ else
+ max_hw_sectors = UINT_MAX;
+ ctrl->max_hw_sectors =
+ min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
+
+ kfree(id);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_init_identify);
+
+
+/*
+ * Initialize a NVMe controller structures. This needs to be called during
+ * earliest initialization so that we have the initialized structured around
+ * during probing.
+ */
+int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device_d *dev,
+ const struct nvme_ctrl_ops *ops)
+{
+ static int instance = 0;
+
+ ctrl->dev = dev;
+ ctrl->ops = ops;
+ ctrl->instance = instance++;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_init_ctrl);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
new file mode 100644
index 0000000000..4ec4aef972
--- /dev/null
+++ b/drivers/nvme/host/nvme.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2011-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _NVME_H
+#define _NVME_H
+
+#include <linux/nvme.h>
+#include <dma.h>
+#include <block.h>
+
+#define ADMIN_TIMEOUT (60 * HZ)
+#define SHUTDOWN_TIMEOUT ( 5 * HZ)
+
+/*
+ * Common request structure for NVMe passthrough. All drivers must have
+ * this structure as the first member of their request-private data.
+ */
+struct nvme_request {
+ struct nvme_command *cmd;
+ union nvme_result result;
+ u16 status;
+
+ void *buffer;
+ unsigned int buffer_len;
+ dma_addr_t buffer_dma_addr;
+ enum dma_data_direction dma_dir;
+};
+
+struct nvme_ctrl {
+ const struct nvme_ctrl_ops *ops;
+ struct device_d *dev;
+ int instance;
+
+ u32 ctrl_config;
+ u32 queue_count;
+ u64 cap;
+ u32 page_size;
+ u32 max_hw_sectors;
+ u32 vs;
+};
+
+/*
+ * Anchor structure for namespaces. There is one for each namespace in a
+ * NVMe subsystem that any of our controllers can see, and the namespace
+ * structure for each controller is chained of it. For private namespaces
+ * there is a 1:1 relation to our namespace structures, that is ->list
+ * only ever has a single entry for private namespaces.
+ */
+struct nvme_ns_head {
+ unsigned ns_id;
+ int instance;
+};
+
+struct nvme_ns {
+ struct nvme_ctrl *ctrl;
+ struct nvme_ns_head *head;
+ struct block_device blk;
+
+ int lba_shift;
+ bool readonly;
+};
+
+static inline struct nvme_ns *to_nvme_ns(struct block_device *blk)
+{
+ return container_of(blk, struct nvme_ns, blk);
+}
+
+struct nvme_ctrl_ops {
+ int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
+ int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
+ int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
+
+ int (*submit_sync_cmd)(struct nvme_ctrl *ctrl,
+ struct nvme_command *cmd,
+ union nvme_result *result,
+ void *buffer,
+ unsigned bufflen,
+ unsigned timeout, int qid);
+};
+
+static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
+{
+ u32 val = 0;
+
+ if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
+ return false;
+ return val & NVME_CSTS_RDY;
+}
+
+static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
+{
+ return (sector >> (ns->lba_shift - 9));
+}
+
+static inline void nvme_end_request(struct nvme_request *rq, __le16 status,
+ union nvme_result result)
+{
+ rq->status = le16_to_cpu(status) >> 1;
+ rq->result = result;
+}
+
+int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
+int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
+int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
+int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device_d *dev,
+ const struct nvme_ctrl_ops *ops);
+void nvme_start_ctrl(struct nvme_ctrl *ctrl);
+int nvme_init_identify(struct nvme_ctrl *ctrl);
+
+enum nvme_queue_id {
+ NVME_QID_ADMIN,
+ NVME_QID_IO,
+ NVME_QID_NUM,
+ NVME_QID_ANY = -1,
+};
+
+int __nvme_submit_sync_cmd(struct nvme_ctrl *ctrl,
+ struct nvme_command *cmd,
+ union nvme_result *result,
+ void *buffer, unsigned bufflen,
+ unsigned timeout, int qid);
+int nvme_submit_sync_cmd(struct nvme_ctrl *ctrl,
+ struct nvme_command *cmd,
+ void *buffer, unsigned bufflen);
+
+
+int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
+/*
+ * Without the multipath code enabled, multiple controller per subsystems are
+ * visible as devices and thus we cannot use the subsystem instance.
+ */
+static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
+ struct nvme_ctrl *ctrl, int *flags)
+{
+ sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
+}
+
+#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
new file mode 100644
index 0000000000..387bc45a7b
--- /dev/null
+++ b/drivers/nvme/host/pci.c
@@ -0,0 +1,697 @@
+
+#include <common.h>
+#include <init.h>
+#include <io.h>
+#include <io-64-nonatomic-lo-hi.h>
+#include <linux/pci.h>
+
+#include <dma.h>
+
+#include "nvme.h"
+
+#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
+#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
+
+#define NVME_MAX_KB_SZ 4096
+
+static int io_queue_depth = 2;
+
+struct nvme_dev;
+
+/*
+ * An NVM Express queue. Each device has at least two (one for admin
+ * commands and one for I/O commands).
+ */
+struct nvme_queue {
+ struct nvme_dev *dev;
+ struct nvme_request *req;
+ struct nvme_command *sq_cmds;
+ volatile struct nvme_completion *cqes;
+ dma_addr_t sq_dma_addr;
+ dma_addr_t cq_dma_addr;
+ u32 __iomem *q_db;
+ u16 q_depth;
+ u16 sq_tail;
+ u16 cq_head;
+ u16 qid;
+ u8 cq_phase;
+
+ u16 counter;
+};
+
+/*
+ * Represents an NVM Express device. Each nvme_dev is a PCI function.
+ */
+struct nvme_dev {
+ struct nvme_queue queues[NVME_QID_NUM];
+ u32 __iomem *dbs;
+ struct device_d *dev;
+ unsigned online_queues;
+ unsigned max_qid;
+ int q_depth;
+ u32 db_stride;
+ void __iomem *bar;
+ bool subsystem;
+ struct nvme_ctrl ctrl;
+ __le64 *prp_pool;
+ unsigned int prp_pool_size;
+ dma_addr_t prp_dma;
+};
+
+static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
+{
+ return container_of(ctrl, struct nvme_dev, ctrl);
+}
+
+static int nvme_pci_setup_prps(struct nvme_dev *dev,
+ const struct nvme_request *req,
+ struct nvme_rw_command *cmnd)
+{
+ int length = req->buffer_len;
+ const int page_size = dev->ctrl.page_size;
+ dma_addr_t dma_addr = req->buffer_dma_addr;
+ u32 offset = dma_addr & (page_size - 1);
+ u64 prp1 = dma_addr;
+ __le64 *prp_list;
+ int i, nprps;
+ dma_addr_t prp_dma;
+
+
+ length -= (page_size - offset);
+ if (length <= 0) {
+ prp_dma = 0;
+ goto done;
+ }
+
+ dma_addr += (page_size - offset);
+
+ if (length <= page_size) {
+ prp_dma = dma_addr;
+ goto done;
+ }
+
+ nprps = DIV_ROUND_UP(length, page_size);
+ if (nprps > dev->prp_pool_size) {
+ dma_free_coherent(dev->prp_pool, dev->prp_dma,
+ dev->prp_pool_size * sizeof(u64));
+ dev->prp_pool_size = nprps;
+ dev->prp_pool = dma_alloc_coherent(nprps * sizeof(u64),
+ &dev->prp_dma);
+ }
+
+ prp_list = dev->prp_pool;
+ prp_dma = dev->prp_dma;
+
+ i = 0;
+ for (;;) {
+ if (i == page_size >> 3) {
+ __le64 *old_prp_list = prp_list;
+ prp_list = &prp_list[i];
+ prp_dma += page_size;
+ prp_list[0] = old_prp_list[i - 1];
+ old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+ i = 1;
+ }
+
+ prp_list[i++] = cpu_to_le64(dma_addr);
+ dma_addr += page_size;
+ length -= page_size;
+ if (length <= 0)
+ break;
+ }
+
+done:
+ cmnd->dptr.prp1 = cpu_to_le64(prp1);
+ cmnd->dptr.prp2 = cpu_to_le64(prp_dma);
+
+ return 0;
+}
+
+static int nvme_map_data(struct nvme_dev *dev, struct nvme_request *req)
+{
+ if (!req->buffer || !req->buffer_len)
+ return 0;
+
+ req->buffer_dma_addr = dma_map_single(dev->dev, req->buffer,
+ req->buffer_len, req->dma_dir);
+ if (dma_mapping_error(dev->dev, req->buffer_dma_addr))
+ return -EFAULT;
+
+ return nvme_pci_setup_prps(dev, req, &req->cmd->rw);
+}
+
+static void nvme_unmap_data(struct nvme_dev *dev, struct nvme_request *req)
+{
+ if (!req->buffer || !req->buffer_len)
+ return;
+
+ dma_unmap_single(dev->dev, req->buffer_dma_addr, req->buffer_len,
+ req->dma_dir);
+}
+
+static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
+{
+ struct nvme_queue *nvmeq = &dev->queues[qid];
+
+ if (dev->ctrl.queue_count > qid)
+ return 0;
+
+ nvmeq->cqes = dma_alloc_coherent(CQ_SIZE(depth),
+ &nvmeq->cq_dma_addr);
+ if (!nvmeq->cqes)
+ goto free_nvmeq;
+
+ nvmeq->sq_cmds = dma_alloc_coherent(SQ_SIZE(depth),
+ &nvmeq->sq_dma_addr);
+ if (!nvmeq->sq_cmds)
+ goto free_cqdma;
+
+ nvmeq->dev = dev;
+ nvmeq->cq_head = 0;
+ nvmeq->cq_phase = 1;
+ nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
+ nvmeq->q_depth = depth;
+ nvmeq->qid = qid;
+ dev->ctrl.queue_count++;
+
+ return 0;
+
+ free_cqdma:
+ dma_free_coherent((void *)nvmeq->cqes, nvmeq->cq_dma_addr,
+ CQ_SIZE(depth));
+ free_nvmeq:
+ return -ENOMEM;
+}
+
+static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.delete_queue.opcode = opcode;
+ c.delete_queue.qid = cpu_to_le16(id);
+
+ return nvme_submit_sync_cmd(&dev->ctrl, &c, NULL, 0);
+}
+
+static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
+ struct nvme_queue *nvmeq, s16 vector)
+{
+ struct nvme_command c;
+ int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
+
+ /*
+ * Note: we (ab)use the fact that the prp fields survive if no data
+ * is attached to the request.
+ */
+ memset(&c, 0, sizeof(c));
+ c.create_cq.opcode = nvme_admin_create_cq;
+ c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
+ c.create_cq.cqid = cpu_to_le16(qid);
+ c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+ c.create_cq.cq_flags = cpu_to_le16(flags);
+ c.create_cq.irq_vector = cpu_to_le16(vector);
+
+ return nvme_submit_sync_cmd(&dev->ctrl, &c, NULL, 0);
+}
+
+static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
+ struct nvme_queue *nvmeq)
+{
+ struct nvme_command c;
+ int flags = NVME_QUEUE_PHYS_CONTIG;
+
+ /*
+ * Note: we (ab)use the fact that the prp fields survive if no data
+ * is attached to the request.
+ */
+ memset(&c, 0, sizeof(c));
+ c.create_sq.opcode = nvme_admin_create_sq;
+ c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
+ c.create_sq.sqid = cpu_to_le16(qid);
+ c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+ c.create_sq.sq_flags = cpu_to_le16(flags);
+ c.create_sq.cqid = cpu_to_le16(qid);
+
+ return nvme_submit_sync_cmd(&dev->ctrl, &c, NULL, 0);
+}
+
+static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
+{
+ return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
+}
+
+static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
+{
+ struct nvme_dev *dev = nvmeq->dev;
+
+ nvmeq->sq_tail = 0;
+ nvmeq->cq_head = 0;
+ nvmeq->cq_phase = 1;
+ nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
+ dev->online_queues++;
+}
+
+static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
+{
+ struct nvme_dev *dev = nvmeq->dev;
+ int result;
+ s16 vector;
+
+ vector = 0;
+ result = adapter_alloc_cq(dev, qid, nvmeq, vector);
+ if (result)
+ return result;
+
+ result = adapter_alloc_sq(dev, qid, nvmeq);
+ if (result < 0)
+ return result;
+ else if (result)
+ goto release_cq;
+
+ nvme_init_queue(nvmeq, qid);
+
+ return result;
+
+release_cq:
+ adapter_delete_cq(dev, qid);
+ return result;
+}
+
+/**
+ * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
+ * @nvmeq: The queue to use
+ * @cmd: The command to send
+ */
+static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+{
+ memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd));
+
+ if (++nvmeq->sq_tail == nvmeq->q_depth)
+ nvmeq->sq_tail = 0;
+ writel(nvmeq->sq_tail, nvmeq->q_db);
+}
+
+/* We read the CQE phase first to check if the rest of the entry is valid */
+static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
+{
+ return (le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) ==
+ nvmeq->cq_phase;
+}
+
+static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
+{
+ u16 head = nvmeq->cq_head;
+
+ writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
+}
+
+static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
+{
+ volatile struct nvme_completion *cqe = &nvmeq->cqes[idx];
+ struct nvme_request *req = nvmeq->req;
+
+ if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
+ dev_warn(nvmeq->dev->ctrl.dev,
+ "invalid id %d completed on queue %d\n",
+ cqe->command_id, le16_to_cpu(cqe->sq_id));
+ return;
+ }
+
+ if (WARN_ON(cqe->command_id != req->cmd->common.command_id))
+ return;
+
+ nvme_end_request(req, cqe->status, cqe->result);
+}
+
+static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
+{
+ while (start != end) {
+ nvme_handle_cqe(nvmeq, start);
+ if (++start == nvmeq->q_depth)
+ start = 0;
+ }
+}
+
+static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
+{
+ if (++nvmeq->cq_head == nvmeq->q_depth) {
+ nvmeq->cq_head = 0;
+ nvmeq->cq_phase = !nvmeq->cq_phase;
+ }
+}
+
+static inline bool nvme_process_cq(struct nvme_queue *nvmeq, u16 *start,
+ u16 *end, int tag)
+{
+ bool found = false;
+
+ *start = nvmeq->cq_head;
+ while (!found && nvme_cqe_pending(nvmeq)) {
+ if (nvmeq->cqes[nvmeq->cq_head].command_id == tag)
+ found = true;
+ nvme_update_cq_head(nvmeq);
+ }
+ *end = nvmeq->cq_head;
+
+ if (*start != *end)
+ nvme_ring_cq_doorbell(nvmeq);
+ return found;
+}
+
+static bool nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
+{
+ u16 start, end;
+ bool found;
+
+ if (!nvme_cqe_pending(nvmeq))
+ return false;
+
+ found = nvme_process_cq(nvmeq, &start, &end, tag);
+
+ nvme_complete_cqes(nvmeq, start, end);
+ return found;
+}
+
+static int nvme_pci_submit_sync_cmd(struct nvme_ctrl *ctrl,
+ struct nvme_command *cmd,
+ union nvme_result *result,
+ void *buffer,
+ unsigned int buffer_len,
+ unsigned timeout, int qid)
+{
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
+ struct nvme_queue *nvmeq = &dev->queues[qid];
+ struct nvme_request req = { };
+ const u16 tag = nvmeq->counter++ & (nvmeq->q_depth - 1);
+ enum dma_data_direction dma_dir;
+ int ret;
+
+ switch (qid) {
+ case NVME_QID_ADMIN:
+ switch (cmd->common.opcode) {
+ case nvme_admin_create_sq:
+ case nvme_admin_create_cq:
+ case nvme_admin_delete_sq:
+ case nvme_admin_delete_cq:
+ case nvme_admin_set_features:
+ dma_dir = DMA_TO_DEVICE;
+ break;
+ case nvme_admin_identify:
+ dma_dir = DMA_FROM_DEVICE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case NVME_QID_IO:
+ switch (cmd->rw.opcode) {
+ case nvme_cmd_write:
+ dma_dir = DMA_TO_DEVICE;
+ break;
+ case nvme_cmd_read:
+ dma_dir = DMA_FROM_DEVICE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cmd->common.command_id = tag;
+
+ timeout = timeout ?: ADMIN_TIMEOUT;
+
+ req.cmd = cmd;
+ req.buffer = buffer;
+ req.buffer_len = buffer_len;
+ req.dma_dir = dma_dir;
+
+ ret = nvme_map_data(dev, &req);
+ if (ret) {
+ dev_err(dev->dev, "Failed to map request data\n");
+ return ret;
+ }
+
+ nvme_submit_cmd(nvmeq, cmd);
+
+ nvmeq->req = &req;
+ ret = wait_on_timeout(timeout, nvme_poll(nvmeq, tag));
+ nvmeq->req = NULL;
+
+ nvme_unmap_data(dev, &req);
+
+ if (result)
+ *result = req.result;
+
+ return ret ?: req.status;
+}
+
+static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
+{
+ int result;
+ u32 aqa;
+ struct nvme_queue *nvmeq;
+
+ dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
+ NVME_CAP_NSSRC(dev->ctrl.cap) : 0;
+
+ if (dev->subsystem &&
+ (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
+ writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
+
+ result = nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
+ if (result < 0)
+ return result;
+
+ result = nvme_alloc_queue(dev, NVME_QID_ADMIN, NVME_AQ_DEPTH);
+ if (result)
+ return result;
+
+ nvmeq = &dev->queues[NVME_QID_ADMIN];
+ aqa = nvmeq->q_depth - 1;
+ aqa |= aqa << 16;
+
+ writel(aqa, dev->bar + NVME_REG_AQA);
+ writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
+ writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
+
+ result = nvme_enable_ctrl(&dev->ctrl, dev->ctrl.cap);
+ if (result)
+ return result;
+
+ nvme_init_queue(nvmeq, NVME_QID_ADMIN);
+
+ return result;
+}
+
+static int nvme_create_io_queues(struct nvme_dev *dev)
+{
+ unsigned i, max;
+ int ret = 0;
+
+ for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
+ if (nvme_alloc_queue(dev, i, dev->q_depth)) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ max = min(dev->max_qid, dev->ctrl.queue_count - 1);
+ for (i = dev->online_queues; i <= max; i++) {
+ ret = nvme_create_queue(&dev->queues[i], i);
+ if (ret)
+ break;
+ }
+
+ /*
+ * Ignore failing Create SQ/CQ commands, we can continue with less
+ * than the desired amount of queues, and even a controller without
+ * I/O queues can still be used to issue admin commands. This might
+ * be useful to upgrade a buggy firmware for example.
+ */
+ return ret >= 0 ? 0 : ret;
+}
+
+static int nvme_setup_io_queues(struct nvme_dev *dev)
+{
+ int result, nr_io_queues;
+
+ nr_io_queues = NVME_QID_NUM - 1;
+ result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
+ if (result < 0)
+ return result;
+
+ dev->max_qid = nr_io_queues;
+
+ return nvme_create_io_queues(dev);
+}
+
+static int nvme_pci_enable(struct nvme_dev *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ if (pci_enable_device(pdev))
+ return -ENOMEM;
+
+ pci_set_master(pdev);
+
+ if (readl(dev->bar + NVME_REG_CSTS) == -1)
+ return -ENODEV;
+
+ dev->ctrl.cap = readq(dev->bar + NVME_REG_CAP);
+
+ dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1,
+ io_queue_depth);
+ dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
+ dev->dbs = dev->bar + 4096;
+
+ return 0;
+}
+
+static void nvme_reset_work(struct nvme_dev *dev)
+{
+ int result = -ENODEV;
+
+ result = nvme_pci_enable(dev);
+ if (result)
+ goto out;
+
+ result = nvme_pci_configure_admin_queue(dev);
+ if (result)
+ goto out;
+
+ /*
+ * Limit the max command size to prevent iod->sg allocations going
+ * over a single page.
+ */
+ dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
+
+ result = nvme_init_identify(&dev->ctrl);
+ if (result)
+ goto out;
+
+ result = nvme_setup_io_queues(dev);
+ if (result) {
+ dev_err(dev->ctrl.dev, "IO queues not created\n");
+ goto out;
+ }
+
+ nvme_start_ctrl(&dev->ctrl);
+out:
+ return;
+}
+
+static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
+{
+ *val = readl(to_nvme_dev(ctrl)->bar + off);
+ return 0;
+}
+
+static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
+{
+ writel(val, to_nvme_dev(ctrl)->bar + off);
+ return 0;
+}
+
+static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
+{
+ *val = readq(to_nvme_dev(ctrl)->bar + off);
+ return 0;
+}
+
+static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
+ .reg_read32 = nvme_pci_reg_read32,
+ .reg_write32 = nvme_pci_reg_write32,
+ .reg_read64 = nvme_pci_reg_read64,
+ .submit_sync_cmd = nvme_pci_submit_sync_cmd,
+};
+
+static void nvme_dev_map(struct nvme_dev *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ dev->bar = pci_iomap(pdev, 0);
+}
+
+static void nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
+{
+ int ret;
+ ret = adapter_delete_queue(nvmeq->dev, opcode, nvmeq->qid);
+ if (ret < 0)
+ dev_err(nvmeq->dev->dev, "%s: %s\n", __func__,
+ strerror(-ret));
+ else if (ret)
+ dev_err(nvmeq->dev->dev,
+ "%s: status code type: %xh, status code %02xh\n",
+ __func__, (ret >> 8) & 0xf, ret & 0xff);
+}
+
+static void nvme_disable_io_queues(struct nvme_dev *dev)
+{
+ int i, queues = dev->online_queues - 1;
+
+ for (i = queues; i > 0; i--) {
+ nvme_delete_queue(&dev->queues[i], nvme_admin_delete_sq);
+ nvme_delete_queue(&dev->queues[i], nvme_admin_delete_cq);
+ }
+}
+
+static void nvme_disable_admin_queue(struct nvme_dev *dev)
+{
+ struct nvme_queue *nvmeq = &dev->queues[0];
+ u16 start, end;
+
+ nvme_shutdown_ctrl(&dev->ctrl);
+ nvme_process_cq(nvmeq, &start, &end, -1);
+ nvme_complete_cqes(nvmeq, start, end);
+}
+
+static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct nvme_dev *dev;
+ int result;
+
+ dev = xzalloc(sizeof(*dev));
+ dev->dev = &pdev->dev;
+ pdev->dev.priv = dev;
+
+ nvme_dev_map(dev);
+ result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops);
+ if (result)
+ return result;
+
+ nvme_reset_work(dev);
+
+ return 0;
+}
+
+static void nvme_remove(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pdev->dev.priv;
+ bool dead = true;
+
+ u32 csts = readl(dev->bar + NVME_REG_CSTS);
+
+ dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY));
+
+ if (!dead && dev->ctrl.queue_count > 0) {
+ nvme_disable_io_queues(dev);
+ nvme_disable_admin_queue(dev);
+ }
+}
+
+static const struct pci_device_id nvme_id_table[] = {
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, PCI_ANY_ID) },
+ { 0, },
+};
+
+static struct pci_driver nvme_driver = {
+ .name = "nvme",
+ .id_table = nvme_id_table,
+ .probe = nvme_probe,
+ .remove = nvme_remove,
+};
+device_pci_driver(nvme_driver);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index a140b1dd07..7a9315a0b6 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -37,9 +37,4 @@ static struct pci_driver xhci_pci_driver = {
.id_table = xhci_pci_tbl,
.probe = xhci_pci_probe,
};
-
-static int xhci_pci_init(void)
-{
- return pci_register_driver(&xhci_pci_driver);
-}
-device_initcall(xhci_pci_init);
+device_pci_driver(xhci_pci_driver);