Thomas Heijligen has uploaded this change for review.

View Change

libpayload: nvme driver

Change-Id: Ie75b1dc743dac3426c230c57ee23b771ba3a6e0c
Signed-off-by: Thomas Heijligen <thomas.heijligen@secunet.com>
---
M payloads/libpayload/drivers/Makefile.inc
M payloads/libpayload/drivers/storage/Kconfig
A payloads/libpayload/drivers/storage/nvme.c
M payloads/libpayload/drivers/storage/storage.c
A payloads/libpayload/include/storage/nvme.h
M payloads/libpayload/include/storage/storage.h
M payloads/libpayload/include/x86/arch/io.h
A payloads/libpayload/sample/nvme_test.c
8 files changed, 429 insertions(+), 0 deletions(-)

git pull ssh://review.coreboot.org:29418/coreboot refs/changes/82/33582/1
diff --git a/payloads/libpayload/drivers/Makefile.inc b/payloads/libpayload/drivers/Makefile.inc
index 40e587c..676dbd3 100644
--- a/payloads/libpayload/drivers/Makefile.inc
+++ b/payloads/libpayload/drivers/Makefile.inc
@@ -77,6 +77,7 @@
libc-$(CONFIG_LP_STORAGE) += storage/storage.c
libc-$(CONFIG_LP_STORAGE_AHCI) += storage/ahci.c
libc-$(CONFIG_LP_STORAGE_AHCI) += storage/ahci_common.c
+libc-$(CONFIG_LP_STORAGE_NVME) += storage/nvme.c
ifeq ($(CONFIG_LP_STORAGE_ATA),y)
libc-$(CONFIG_LP_STORAGE_ATA) += storage/ata.c
libc-$(CONFIG_LP_STORAGE_ATA) += storage/ahci_ata.c
diff --git a/payloads/libpayload/drivers/storage/Kconfig b/payloads/libpayload/drivers/storage/Kconfig
index 04e9a29..3eabf6a 100644
--- a/payloads/libpayload/drivers/storage/Kconfig
+++ b/payloads/libpayload/drivers/storage/Kconfig
@@ -57,3 +57,10 @@
help
If this option is selected only AHCI controllers which are known
to work will be used.
+
+config STORAGE_NVME
+ bool "Support for NVMe devices"
+ depends on STORAGE && PCI
+ default y
+ help
+ Select this option if you want support for NVMe devices
diff --git a/payloads/libpayload/drivers/storage/nvme.c b/payloads/libpayload/drivers/storage/nvme.c
new file mode 100644
index 0000000..871759d
--- /dev/null
+++ b/payloads/libpayload/drivers/storage/nvme.c
@@ -0,0 +1,331 @@
+#include <libpayload.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <pci.h>
+#include <pci/pci.h>
+#include <storage/nvme.h>
+#include <storage/storage.h>
+
+#define PCI_CLASS_CODE_NVME 0x0108
+
+// NVME Controller Configuration
+#define NVME_CC_EN (1 << 0)
+#define NVME_CC_CSS (0 << 4)
+#define NVME_CC_MPS (0 << 7)
+#define NVME_CC_AMS (0 << 11)
+#define NVME_CC_SHN (0 << 14)
+#define NVME_CC_IOSQES (6 << 16)
+#define NVME_CC_IOCQES (4 << 20)
+
+#define NVME_QUEUE_SIZE 2
+#define NVME_SQ_ENTRY_SIZE 64
+#define NVME_CQ_ENTRY_SIZE 16
+
+struct nvme_s_queue_entry {
+ uint32_t dw[16];
+};
+
+struct nvme_c_queue_entry {
+ uint32_t dw[4];
+};
+
+enum nvme_queue {
+ NVME_ADMIN_QUEUE = 0,
+ NVME_IO_QUEUE = 2,
+ ads = 0,
+ adc = 1,
+ ios = 2,
+ ioc = 3,
+};
+
+static storage_poll_t nvme_poll(struct storage_dev *dev);
+static void nvme_detach_device(struct storage_dev *dev);
+static ssize_t nvme_read_blocks512(struct storage_dev *dev, lba_t start, size_t count, unsigned char *buf);
+
+static int create_admin_queues(struct nvme_dev *nvme);
+static int create_io_submission_queue(struct nvme_dev *nvme);
+static int create_io_completion_queue(struct nvme_dev *nvme);
+static void delete_admin_queues(struct nvme_dev *nvme);
+static void delete_io_submission_queue(struct nvme_dev *nvme);
+static void delete_io_completion_queue(struct nvme_dev *nvme);
+static int nvme_cmd(struct nvme_dev *nvme, enum nvme_queue q, const struct nvme_s_queue_entry *cmd);
+static int read(struct nvme_dev *nvme, void *buffer, uint64_t base, uint16_t count);
+
+
+static storage_poll_t nvme_poll(struct storage_dev *dev)
+{
+ return POLL_MEDIUM_PRESENT;
+}
+
+static void nvme_detach_device(struct storage_dev *dev)
+{
+ //nvme_free(dev->driver_struct);
+ // FIXME remove from list
+}
+
+static ssize_t nvme_read_blocks512(struct storage_dev *dev, lba_t start, size_t count, unsigned char *buf)
+{
+ void *buffer = memalign(0x1000, count * 512);
+ if (!buffer)
+ return 0;
+
+ for (int i = 0; i < count; i++) {
+ if (read((struct nvme_dev*)dev, buffer+(i*512), start + i, 1)) {
+ free(buffer);
+ return 0;
+ }
+ }
+
+ memcpy(buf, buffer, count * 512);
+ free(buffer);
+ return count;
+}
+
+static int read(struct nvme_dev *nvme, void *buffer, uint64_t base, uint16_t count)
+{
+ if (count == 0)
+ return -1;
+
+ struct nvme_s_queue_entry e = {
+ .dw[0] = 0x02,
+ .dw[1] = 0x1,
+ .dw[6] = virt_to_phys(buffer),
+ .dw[10] = base,
+ .dw[11] = base >> 32,
+ .dw[12] = count - 1,
+ };
+ return nvme_cmd(nvme, ios, &e);
+}
+
+static void delete_io_submission_queue(struct nvme_dev *nvme)
+{
+ // TODO
+}
+
+static int create_io_submission_queue(struct nvme_dev *nvme)
+{
+ void *sq_buffer = memalign(0x1000, NVME_SQ_ENTRY_SIZE * NVME_QUEUE_SIZE);
+ if (!sq_buffer) {
+ printf("NVMe ERROR: Faild to allocate memory for io submission queue.\n");
+ return -1;
+ }
+ memset(sq_buffer, 0, NVME_SQ_ENTRY_SIZE * NVME_QUEUE_SIZE);
+
+ struct nvme_s_queue_entry e = {
+ .dw[0] = 0x01,
+ .dw[6] = virt_to_phys(sq_buffer),
+ .dw[10] = (NVME_QUEUE_SIZE << 16) | ios >> 1,
+ .dw[11] = (1 << 16) | 1,
+ };
+
+ int res = nvme_cmd(nvme, NVME_ADMIN_QUEUE, &e);
+ if (res) {
+ printf("NVMe ERROR: nvme_cmd returned with %i.\n", res);
+ free(sq_buffer);
+ return res;
+ }
+
+ uint8_t cap_dstrd = (read64(nvme->config) >> 32) & 0xf;
+ nvme->queue[ios].base = sq_buffer;
+ nvme->queue[ios].bell = nvme->config + 0x1000 + (ios * (4 << cap_dstrd));
+ nvme->queue[ios].idx = 0;
+ return 0;
+}
+
+static void delete_io_completion_queue(struct nvme_dev *nvme)
+{
+ // TODO
+}
+
+static int create_io_completion_queue(struct nvme_dev *nvme)
+{
+ void *const cq_buffer = memalign(0x1000, NVME_CQ_ENTRY_SIZE * NVME_QUEUE_SIZE);
+ if (!cq_buffer) {
+ printf("NVMe ERROR: Faild to allocate memory for io competion queue.\n");
+ return -1;
+ }
+ memset(cq_buffer, 0, NVME_CQ_ENTRY_SIZE * NVME_QUEUE_SIZE);
+
+ const struct nvme_s_queue_entry e = {
+ .dw[0] = 0x05,
+ .dw[6] = virt_to_phys(cq_buffer),
+ .dw[10] = (NVME_QUEUE_SIZE << 16) | ioc >> 1,
+ .dw[11] = 1,
+ };
+
+ int res = nvme_cmd(nvme, NVME_ADMIN_QUEUE, &e);
+ if (res) {
+ printf("NVMe ERROR: nvme_cmd returned with %i.\n", res);
+ free(cq_buffer);
+ return res;
+ }
+
+ uint8_t cap_dstrd = (read64(nvme->config) >> 32) & 0xf;
+ nvme->queue[ioc].base = cq_buffer;
+ nvme->queue[ioc].bell = nvme->config + 0x1000 + (ioc * (4 << cap_dstrd));
+ nvme->queue[ioc].idx = 0;
+ nvme->queue[ioc].round = 0;
+
+ return 0;
+}
+
+static int nvme_cmd(struct nvme_dev *nvme, enum nvme_queue q, const struct nvme_s_queue_entry *cmd)
+{
+ int sq = q, cq = q+1;
+
+ void *s_entry = nvme->queue[sq].base + (nvme->queue[sq].idx * NVME_SQ_ENTRY_SIZE);
+ memcpy(s_entry, cmd, NVME_SQ_ENTRY_SIZE);
+ write32(nvme->queue[sq].bell, nvme->queue[sq].idx + 1);
+ nvme->queue[sq].idx = (nvme->queue[sq].idx + 1) & 1;
+
+ struct nvme_c_queue_entry *c_entry = nvme->queue[cq].base + (nvme->queue[cq].idx * NVME_CQ_ENTRY_SIZE);
+ while (((c_entry->dw[3] >> 16 ) & 0x1) == nvme->queue[cq].round)
+ ; // FIXME timeout
+ write32(nvme->queue[cq].bell, nvme->queue[cq].idx + 1);
+ nvme->queue[cq].idx = (nvme->queue[cq].idx + 1) & 1;
+ if (nvme->queue[cq].idx == 0)
+ nvme->queue[cq].round = (nvme->queue[cq].round + 1) & 1;
+ return c_entry->dw[3] >> 17;
+}
+
+static void delete_admin_queues(struct nvme_dev *nvme)
+{
+ free(nvme->queue[ads].base);
+ free(nvme->queue[adc].base);
+ // TODO clean nvme admin queue struct ???
+}
+
+static int create_admin_queues(struct nvme_dev *nvme)
+{
+ uint8_t cap_dstrd = (read64(nvme->config) >> 32) & 0xf;
+ write32(nvme->config + 0x24, NVME_QUEUE_SIZE << 16 | NVME_QUEUE_SIZE);
+
+ void *sq_buffer = memalign(0x1000, NVME_SQ_ENTRY_SIZE * NVME_QUEUE_SIZE);
+ if (!sq_buffer) {
+ printf("NVMe ERROR: faild to allocated memory for admin submission queue\n");
+ return -1;
+ }
+ memset(sq_buffer, 0, NVME_SQ_ENTRY_SIZE * NVME_QUEUE_SIZE);
+ write64(nvme->config + 0x28, virt_to_phys(sq_buffer));
+
+ nvme->queue[ads].base = sq_buffer;
+ nvme->queue[ads].bell = nvme->config + 0x1000 + (ads * (4 << cap_dstrd));
+ nvme->queue[ads].idx = 0;
+
+ void *cq_buffer = memalign(0x1000, NVME_CQ_ENTRY_SIZE * NVME_QUEUE_SIZE);
+ if (!cq_buffer) {
+ printf("NVMe ERROR: Faild to allocate memory for admin completion queue\n");
+ free(cq_buffer);
+ return -1;
+ }
+ memset(cq_buffer, 0, NVME_CQ_ENTRY_SIZE * NVME_QUEUE_SIZE);
+ write64(nvme->config + 0x30, virt_to_phys(cq_buffer));
+
+ nvme->queue[adc].base = cq_buffer;
+ nvme->queue[adc].bell = nvme->config + 0x1000 + (adc * (4 << cap_dstrd));
+ nvme->queue[adc].idx = 0;
+ nvme->queue[adc].round = 0;
+
+ return 0;
+}
+
+static void nvme_init(pcidev_t dev)
+{
+ printf("NVMe init (Device %02x:%02x.%02x)\n", PCI_BUS(dev), PCI_SLOT(dev), PCI_FUNC(dev));
+
+ void *pci_bar0 = phys_to_virt(pci_read_config32(dev, 0x10) & ~0x3ff);
+
+ if ( !(((read64(pci_bar0) >> 37 ) & 0xff) == 0x01)) {
+ printf("NVMe ERROR: PCIe device does not support the NVMe command set\n");
+ return;
+ }
+
+ struct nvme_dev *nvme = malloc(sizeof(*nvme));
+ if (!nvme) {
+ printf("NVMe ERROR: Faild to allocate buffer for nvme driver struct\n");
+ return;
+ }
+ nvme->storage_dev.port_type = PORT_TYPE_NVME;
+ nvme->storage_dev.poll = nvme_poll;
+ nvme->storage_dev.read_blocks512 = nvme_read_blocks512;
+ nvme->storage_dev.write_blocks512 = NULL; // not implemented
+ nvme->storage_dev.detach_device = nvme_detach_device;
+ nvme->config = pci_bar0;
+
+ uint32_t cc = 0;
+ write32(nvme->config + 0x1c, 0);
+
+ int status, timeout = (read64(nvme->config) >> 24 & 0xff) * 500;
+ do {
+ status = read32(nvme->config + 0x1c) & 0x3;
+ if (status == 0x2) {
+ printf("NVMe ERROR: Faild to disable controller. FATAL ERROR\n");
+ goto abort;
+ }
+ if (timeout < 0) {
+ printf("NVMe ERROR: Faild to disable controller. Timeout.\n");
+ goto abort;
+ }
+ timeout -= 10;
+ mdelay(10);
+ } while (status != 0x0);
+
+ if (create_admin_queues(nvme))
+ goto abort;
+
+ cc = NVME_CC_EN | NVME_CC_CSS | NVME_CC_MPS | NVME_CC_AMS |NVME_CC_SHN
+ | NVME_CC_IOSQES | NVME_CC_IOCQES;
+ write32(nvme->config + 0x14, cc);
+
+ timeout = (read64(nvme->config) >> 24 & 0xff) * 500;
+ do {
+ status = read32(nvme->config + 0x1c) & 0x3;
+ if (status == 0x2) {
+ printf("NVMe ERROR: Faild to disable controller. FATAL ERROR\n");
+ goto abort;
+ }
+ if (timeout < 0) {
+ printf("NVMe ERROR: Faild to disable controller. Timeout.\n");
+ goto abort;
+ }
+ timeout -= 10;
+ mdelay(10);
+ } while (status != 0x1);
+
+ uint16_t command = pci_read_config16(dev, PCI_COMMAND);
+ pci_write_config16(dev, PCI_COMMAND, command | PCI_COMMAND_MASTER);
+
+ if (create_io_completion_queue(nvme))
+ goto abort;
+ if (create_io_submission_queue(nvme))
+ goto abort;
+
+ storage_attach_device((storage_dev_t*)nvme);
+ printf("NVMe init done.\n");
+ return;
+
+abort:
+ delete_io_submission_queue(nvme);
+ delete_io_completion_queue(nvme);
+ delete_admin_queues(nvme);
+ free(nvme);
+ printf("failed\n");
+ return;
+}
+
+void nvme_initialize(void)
+{
+ int bus, dev, func;
+ uint16_t class;
+
+ for (bus = 0; bus < 256; ++bus) {
+ for (dev = 0; dev < 32; ++dev) {
+ for (func = 0; func < 8; ++func) {
+ class = pci_read_config16(PCI_DEV(bus, dev, func), 0xa);
+ if (class == PCI_CLASS_CODE_NVME)
+ nvme_init(PCI_DEV(bus, dev, func));
+ }
+ }
+ }
+}
diff --git a/payloads/libpayload/drivers/storage/storage.c b/payloads/libpayload/drivers/storage/storage.c
index a7141ee..55cb60d 100644
--- a/payloads/libpayload/drivers/storage/storage.c
+++ b/payloads/libpayload/drivers/storage/storage.c
@@ -31,6 +31,9 @@
#if CONFIG(LP_STORAGE_AHCI)
# include <storage/ahci.h>
#endif
+#if CONFIG(LP_STORAGE_NVME)
+#include <storage/nvme.h>
+#endif
#include <storage/storage.h>


@@ -113,4 +116,7 @@
#if CONFIG(LP_STORAGE_AHCI)
ahci_initialize();
#endif
+#if CONFIG(LP_STORAGE_NVME)
+ nvme_initialize();
+#endif
}
diff --git a/payloads/libpayload/include/storage/nvme.h b/payloads/libpayload/include/storage/nvme.h
new file mode 100644
index 0000000..090c6d0
--- /dev/null
+++ b/payloads/libpayload/include/storage/nvme.h
@@ -0,0 +1,22 @@
+#ifndef _STORAGE_NVME_H
+#define _STORAGE_NVME_H
+
+#include <stdint.h>
+#include "storage.h"
+
+struct nvme_dev {
+ storage_dev_t storage_dev;
+
+ void *config;
+ void *admin_s_queue;
+ struct {
+ void *base;
+ uint32_t *bell;
+ uint16_t idx; // bool pos 0 or 1
+ uint16_t round; // bool round 0 or 1+0xd
+ } queue[4];
+};
+
+void nvme_initialize(void);
+
+#endif /* _STORAGE_NVME_H */
diff --git a/payloads/libpayload/include/storage/storage.h b/payloads/libpayload/include/storage/storage.h
index 2dc70b0..d1f998e 100644
--- a/payloads/libpayload/include/storage/storage.h
+++ b/payloads/libpayload/include/storage/storage.h
@@ -45,6 +45,7 @@
PORT_TYPE_IDE = (1 << 0),
PORT_TYPE_SATA = (1 << 1),
PORT_TYPE_USB = (1 << 2),
+ PORT_TYPE_NVME = (1 << 3),
} storage_port_t;

typedef enum {
diff --git a/payloads/libpayload/include/x86/arch/io.h b/payloads/libpayload/include/x86/arch/io.h
index c417ce0..46836d9 100644
--- a/payloads/libpayload/include/x86/arch/io.h
+++ b/payloads/libpayload/include/x86/arch/io.h
@@ -64,6 +64,11 @@
return *((volatile uint32_t *)(addr));
}

+static inline __attribute__((always_inline)) uint64_t read64(const volatile void *addr)
+{
+ return *((volatile uint64_t *)(addr));
+}
+
static inline __attribute__((always_inline)) void write8(volatile void *addr, uint8_t value)
{
*((volatile uint8_t *)(addr)) = value;
@@ -79,6 +84,11 @@
*((volatile uint32_t *)(addr)) = value;
}

+static inline __attribute__((always_inline)) void write64(volatile void *addr, uint64_t value)
+{
+ *((volatile uint64_t *)(addr)) = value;
+}
+
static inline unsigned int inl(int port)
{
unsigned long val;
diff --git a/payloads/libpayload/sample/nvme_test.c b/payloads/libpayload/sample/nvme_test.c
new file mode 100644
index 0000000..d2bdf10
--- /dev/null
+++ b/payloads/libpayload/sample/nvme_test.c
@@ -0,0 +1,51 @@
+/*
+ * This file is part of the libpayload project.
+ *
+ * Copyright (C) 2008 Advanced Micro Devices, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* Example file for libpayload. */
+
+#include <libpayload-config.h>
+#include <libpayload.h>
+#include <storage/storage.h>
+
+#define STORAGE_ID 0
+
+int main(void)
+{
+ printf("---------- TEST PROGRAM BEGIN ----------\n");
+ storage_initialize();
+
+ void *buffer = memalign(0x1000, 0x2000);
+ storage_read_blocks512(STORAGE_ID, 0, 15, buffer);
+ printf("\nbuffer content:\n");
+ //hexdump(buffer,0x2000);
+
+ printf("----------- TEST PROGRAM END -----------\n");
+ halt();
+ return 0;
+}

To view, visit change 33582. To unsubscribe, or for help writing mail filters, visit settings.

Gerrit-Project: coreboot
Gerrit-Branch: master
Gerrit-Change-Id: Ie75b1dc743dac3426c230c57ee23b771ba3a6e0c
Gerrit-Change-Number: 33582
Gerrit-PatchSet: 1
Gerrit-Owner: Thomas Heijligen <src@posteo.de>
Gerrit-MessageType: newchange