Merge branch 'master' into kqueue

This commit is contained in:
BuildTools 2019-02-23 04:45:28 -05:00
commit 193691897c
17 changed files with 738 additions and 196 deletions

View File

@ -53,7 +53,14 @@ __FBSDID("$FreeBSD$");
#define MAX_FW_SLOTS (7)
SET_CONCAT_DEF(logpage, struct logpage_function);
static SLIST_HEAD(,logpage_function) logpages;
void
logpage_register(struct logpage_function *p)
{
SLIST_INSERT_HEAD(&logpages, p, link);
}
const char *
kv_lookup(const struct kv_name *kv, size_t kv_count, uint32_t key)
@ -326,15 +333,15 @@ NVME_LOGPAGE(fw,
static void
logpage_help(void)
{
const struct logpage_function * const *f;
const struct logpage_function *f;
const char *v;
fprintf(stderr, "\n");
fprintf(stderr, "%-8s %-10s %s\n", "Page", "Vendor","Page Name");
fprintf(stderr, "-------- ---------- ----------\n");
for (f = logpage_begin(); f < logpage_limit(); f++) {
v = (*f)->vendor == NULL ? "-" : (*f)->vendor;
fprintf(stderr, "0x%02x %-10s %s\n", (*f)->log_page, v, (*f)->name);
SLIST_FOREACH(f, &logpages, link) {
v = f->vendor == NULL ? "-" : f->vendor;
fprintf(stderr, "0x%02x %-10s %s\n", f->log_page, v, f->name);
}
exit(1);
@ -352,7 +359,7 @@ logpage(const struct nvme_function *nf, int argc, char *argv[])
uint32_t nsid, size;
void *buf;
const char *vendor = NULL;
const struct logpage_function * const *f;
const struct logpage_function *f;
struct nvme_controller_data cdata;
print_fn_t print_fn;
uint8_t ns_smart;
@ -438,14 +445,14 @@ logpage(const struct nvme_function *nf, int argc, char *argv[])
* the page is vendor specific, don't match the print function
* unless the vendors match.
*/
for (f = logpage_begin(); f < logpage_limit(); f++) {
if ((*f)->vendor != NULL && vendor != NULL &&
strcmp((*f)->vendor, vendor) != 0)
SLIST_FOREACH(f, &logpages, link) {
if (f->vendor != NULL && vendor != NULL &&
strcmp(f->vendor, vendor) != 0)
continue;
if (log_page != (*f)->log_page)
if (log_page != f->log_page)
continue;
print_fn = (*f)->print_fn;
size = (*f)->size;
print_fn = f->print_fn;
size = f->size;
break;
}
}

View File

@ -312,19 +312,17 @@ load_dir(const char *dir)
warnx("Can't load %s: %s", path, dlerror());
else {
/*
* Add in the top (for cli commands) and logpage (for
* logpage parsing) linker sets. We have to do this by
* hand because linker sets aren't automatically merged.
* Add in the top (for cli commands)linker sets. We have
* to do this by hand because linker sets aren't
* automatically merged.
*/
void *begin, *limit;
begin = dlsym(h, "__start_set_top");
limit = dlsym(h, "__stop_set_top");
if (begin)
add_to_top(begin, limit);
begin = dlsym(h, "__start_set_logpage");
limit = dlsym(h, "__stop_set_logpage");
if (begin)
add_to_logpage(begin, limit);
/* log pages use constructors so are done on load */
}
free(path);
path = NULL;
@ -337,7 +335,6 @@ main(int argc, char *argv[])
{
add_to_top(NVME_CMD_BEGIN(top), NVME_CMD_LIMIT(top));
add_to_logpage(NVME_LOGPAGE_BEGIN, NVME_LOGPAGE_LIMIT);
load_dir("/lib/nvmecontrol");
load_dir("/usr/local/lib/nvmecontrol");

View File

@ -32,6 +32,7 @@
#define __NVMECONTROL_H__
#include <sys/linker_set.h>
#include <sys/queue.h>
#include <dev/nvme/nvme.h>
struct nvme_function;
@ -56,6 +57,7 @@ struct nvme_function {
typedef void (*print_fn_t)(const struct nvme_controller_data *cdata, void *buf, uint32_t size);
struct logpage_function {
SLIST_ENTRY(logpage_function) link;
uint8_t log_page;
const char *vendor;
const char *name;
@ -64,7 +66,6 @@ struct logpage_function {
};
#define NVME_LOGPAGESET(sym) DATA_SET(NVME_SETNAME(logpage), sym)
#define NVME_LOGPAGE(unique, lp, vend, nam, fn, sz) \
static struct logpage_function unique ## _lpf = { \
.log_page = lp, \
@ -73,10 +74,8 @@ struct logpage_function {
.print_fn = fn, \
.size = sz, \
} ; \
NVME_LOGPAGESET(unique ## _lpf)
#define NVME_LOGPAGE_BEGIN SET_BEGIN(NVME_SETNAME(logpage))
#define NVME_LOGPAGE_LIMIT SET_LIMIT(NVME_SETNAME(logpage))
#define NVME_LOGPAGE_DECLARE(t) SET_DECLARE(NVME_SETNAME(logpage), t)
static void logpage_reg_##unique(void) __attribute__((constructor)); \
static void logpage_reg_##unique(void) { logpage_register(&unique##_lpf); }
#define DEFAULT_SIZE (4096)
struct kv_name {
@ -87,7 +86,7 @@ struct kv_name {
const char *kv_lookup(const struct kv_name *kv, size_t kv_count, uint32_t key);
NVME_CMD_DECLARE(top, struct nvme_function);
NVME_LOGPAGE_DECLARE(struct logpage_function);
void logpage_register(struct logpage_function *p);
struct set_concat {
void **begin;
@ -105,7 +104,6 @@ void add_to_ ## set(t **b, t **e) \
#define SET_CONCAT_DECL(set, t) \
void add_to_ ## set(t **b, t **e)
SET_CONCAT_DECL(top, struct nvme_function);
SET_CONCAT_DECL(logpage, struct logpage_function);
#define NVME_CTRLR_PREFIX "nvme"
#define NVME_NS_PREFIX "ns"

View File

@ -38,6 +38,7 @@
.Cd "options SC_ALT_MOUSE_IMAGE"
.Cd "options SC_CUT_SEPCHARS=_characters_"
.Cd "options SC_CUT_SPACES2TABS"
.Cd "options SC_DFLT_TERM"
.Cd "options SC_DISABLE_KDBKEY"
.Cd "options SC_DISABLE_REBOOT"
.Cd "options SC_HISTORY_SIZE=N"
@ -48,6 +49,9 @@
.Cd "options SC_NO_PALETTE_LOADING"
.Cd "options SC_NO_SUSPEND_VTYSWITCH"
.Cd "options SC_NO_SYSMOUSE"
.Cd "options SC_NO_TERM_DUMB"
.Cd "options SC_NO_TERM_SC"
.Cd "options SC_NO_TERM_SCTEKEN"
.Cd "options SC_PIXEL_MODE"
.Cd "options SC_TWOBUTTON_MOUSE"
.Cd "options SC_NORM_ATTR=_attribute_"
@ -285,6 +289,8 @@ This options instructs the driver to convert leading spaces into tabs
when copying data into cut buffer.
This might be useful to preserve
indentation when copying tab-indented text.
.It Dv SC_DFLT_TERM=_name_
This option specifies the name of the preferred terminal emulator.
.It Dv SC_DISABLE_KDBKEY
This option disables the ``debug'' key combination (by default, it is
.Dv Alt-Esc ,
@ -409,6 +415,15 @@ will fail if this option is defined.
This option implies the
.Dv SC_NO_CUTPASTE
option too.
.It Dv SC_NO_TERM_DUMB
.It Dv SC_NO_TERM_SC
.It Dv SC_NO_TERM_SCTEKEN
These options remove the
.Qq dumb ,
.Qq sc ,
and
.Qq scteken
terminal emulators, respectively.
.El
.Ss Driver Flags
The following driver flags can be used to control the

View File

@ -195,8 +195,9 @@ MALLOC_DEFINE(M_AXP8XX_REG, "AXP8xx regulator", "AXP8xx power regulator");
#define AXP_BAT_COULOMB_LO 0xe3
#define AXP_BAT_CAP_WARN 0xe6
#define AXP_BAT_CAP_WARN_LV1 0xf0 /* Bits 4, 5, 6, 7 */
#define AXP_BAT_CAP_WARN_LV2 0xf /* Bits 0, 1, 2, 3 */
#define AXP_BAT_CAP_WARN_LV1 0xf0 /* Bits 4, 5, 6, 7 */
#define AXP_BAP_CAP_WARN_LV1BASE 5 /* 5-20%, 1% per step */
#define AXP_BAT_CAP_WARN_LV2 0xf /* Bits 0, 1, 2, 3 */
/* Sensor conversion macros */
#define AXP_SENSOR_BAT_H(hi) ((hi) << 4)
@ -1527,6 +1528,7 @@ axp8xx_attach(device_t dev)
/* Get thresholds */
if (axp8xx_read(dev, AXP_BAT_CAP_WARN, &val, 1) == 0) {
sc->warn_thres = (val & AXP_BAT_CAP_WARN_LV1) >> 4;
sc->warn_thres += AXP_BAP_CAP_WARN_LV1BASE;
sc->shut_thres = (val & AXP_BAT_CAP_WARN_LV2);
if (bootverbose) {
device_printf(dev,

View File

@ -1488,6 +1488,7 @@ options MAXCONS=16 # number of virtual consoles
options SC_ALT_MOUSE_IMAGE # simplified mouse cursor in text mode
options SC_DFLT_FONT # compile font in
makeoptions SC_DFLT_FONT=cp850
options SC_DFLT_TERM=\"sc\" # default terminal emulator
options SC_DISABLE_KDBKEY # disable `debug' key
options SC_DISABLE_REBOOT # disable reboot key sequence
options SC_HISTORY_SIZE=200 # number of history buffer lines
@ -1518,6 +1519,9 @@ options SC_NO_HISTORY
options SC_NO_MODE_CHANGE
options SC_NO_SYSMOUSE
options SC_NO_SUSPEND_VTYSWITCH
#!options SC_NO_TERM_DUMB
#!options SC_NO_TERM_SC
#!options SC_NO_TERM_SCTEKEN
# `flags' for sc
# 0x80 Put the video card in the VESA 800x600 dots, 16 color mode
@ -2736,7 +2740,7 @@ options U3G_DEBUG
# options for ukbd:
options UKBD_DFLT_KEYMAP # specify the built-in keymap
makeoptions UKBD_DFLT_KEYMAP=jp
makeoptions UKBD_DFLT_KEYMAP=jp.106
# options for uplcom:
options UPLCOM_INTR_INTERVAL=100 # interrupt pipe interval

View File

@ -3133,7 +3133,9 @@ dev/syscons/rain/rain_saver.c optional rain_saver
dev/syscons/schistory.c optional sc
dev/syscons/scmouse.c optional sc
dev/syscons/scterm.c optional sc
dev/syscons/scterm-teken.c optional sc
dev/syscons/scterm-dumb.c optional sc !SC_NO_TERM_DUMB
dev/syscons/scterm-sc.c optional sc !SC_NO_TERM_SC
dev/syscons/scterm-teken.c optional sc !SC_NO_TERM_TEKEN
dev/syscons/scvidctl.c optional sc
dev/syscons/scvtb.c optional sc
dev/syscons/snake/snake_saver.c optional snake_saver
@ -4883,7 +4885,7 @@ security/mac_veriexec/mac_veriexec_sha1.c optional mac_veriexec_sha1
security/mac_veriexec/mac_veriexec_sha256.c optional mac_veriexec_sha256
security/mac_veriexec/mac_veriexec_sha384.c optional mac_veriexec_sha384
security/mac_veriexec/mac_veriexec_sha512.c optional mac_veriexec_sha512
teken/teken.c optional sc | vt
teken/teken.c optional sc !SC_NO_TERM_TEKEN | vt
ufs/ffs/ffs_alloc.c optional ffs
ufs/ffs/ffs_balloc.c optional ffs
ufs/ffs/ffs_inode.c optional ffs

View File

@ -767,6 +767,7 @@ SC_CUT_SPACES2TABS opt_syscons.h
SC_CUT_SEPCHARS opt_syscons.h
SC_DEBUG_LEVEL opt_syscons.h
SC_DFLT_FONT opt_syscons.h
SC_DFLT_TERM opt_syscons.h
SC_DISABLE_KDBKEY opt_syscons.h
SC_DISABLE_REBOOT opt_syscons.h
SC_HISTORY_SIZE opt_syscons.h
@ -780,6 +781,9 @@ SC_NO_HISTORY opt_syscons.h
SC_NO_MODE_CHANGE opt_syscons.h
SC_NO_SUSPEND_VTYSWITCH opt_syscons.h
SC_NO_SYSMOUSE opt_syscons.h
SC_NO_TERM_DUMB opt_syscons.h
SC_NO_TERM_SC opt_syscons.h
SC_NO_TERM_TEKEN opt_syscons.h
SC_NORM_ATTR opt_syscons.h
SC_NORM_REV_ATTR opt_syscons.h
SC_PIXEL_MODE opt_syscons.h

View File

@ -36,6 +36,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/bio.h>
#include <sys/bitstring.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/lock.h>
@ -51,10 +52,240 @@ __FBSDID("$FreeBSD$");
#define _COMPONENT ACPI_OEM
ACPI_MODULE_NAME("NVDIMM")
static struct uuid intel_nvdimm_dsm_uuid =
{0x4309AC30,0x0D11,0x11E4,0x91,0x91,{0x08,0x00,0x20,0x0C,0x9A,0x66}};
#define INTEL_NVDIMM_DSM_REV 1
#define INTEL_NVDIMM_DSM_GET_LABEL_SIZE 4
#define INTEL_NVDIMM_DSM_GET_LABEL_DATA 5
static devclass_t nvdimm_devclass;
static devclass_t nvdimm_root_devclass;
MALLOC_DEFINE(M_NVDIMM, "nvdimm", "NVDIMM driver memory");
static int
read_label_area_size(struct nvdimm_dev *nv)
{
ACPI_OBJECT *result_buffer;
ACPI_HANDLE handle;
ACPI_STATUS status;
ACPI_BUFFER result;
uint32_t *out;
int error;
handle = nvdimm_root_get_acpi_handle(nv->nv_dev);
if (handle == NULL)
return (ENODEV);
result.Length = ACPI_ALLOCATE_BUFFER;
result.Pointer = NULL;
status = acpi_EvaluateDSM(handle, (uint8_t *)&intel_nvdimm_dsm_uuid,
INTEL_NVDIMM_DSM_REV, INTEL_NVDIMM_DSM_GET_LABEL_SIZE, NULL,
&result);
error = ENXIO;
if (ACPI_SUCCESS(status) && result.Pointer != NULL &&
result.Length >= sizeof(ACPI_OBJECT)) {
result_buffer = result.Pointer;
if (result_buffer->Type == ACPI_TYPE_BUFFER &&
result_buffer->Buffer.Length >= 12) {
out = (uint32_t *)result_buffer->Buffer.Pointer;
nv->label_area_size = out[1];
nv->max_label_xfer = out[2];
error = 0;
}
}
if (result.Pointer != NULL)
AcpiOsFree(result.Pointer);
return (error);
}
static int
read_label_area(struct nvdimm_dev *nv, uint8_t *dest, off_t offset,
off_t length)
{
ACPI_BUFFER result;
ACPI_HANDLE handle;
ACPI_OBJECT params_pkg, params_buf, *result_buf;
ACPI_STATUS status;
uint32_t params[2];
off_t to_read;
int error;
error = 0;
handle = nvdimm_root_get_acpi_handle(nv->nv_dev);
if (offset < 0 || length <= 0 ||
offset + length > nv->label_area_size ||
handle == NULL)
return (ENODEV);
params_pkg.Type = ACPI_TYPE_PACKAGE;
params_pkg.Package.Count = 1;
params_pkg.Package.Elements = &params_buf;
params_buf.Type = ACPI_TYPE_BUFFER;
params_buf.Buffer.Length = sizeof(params);
params_buf.Buffer.Pointer = (UINT8 *)params;
while (length > 0) {
to_read = MIN(length, nv->max_label_xfer);
params[0] = offset;
params[1] = to_read;
result.Length = ACPI_ALLOCATE_BUFFER;
result.Pointer = NULL;
status = acpi_EvaluateDSM(handle,
(uint8_t *)&intel_nvdimm_dsm_uuid, INTEL_NVDIMM_DSM_REV,
INTEL_NVDIMM_DSM_GET_LABEL_DATA, &params_pkg, &result);
if (ACPI_FAILURE(status) ||
result.Length < sizeof(ACPI_OBJECT) ||
result.Pointer == NULL) {
error = ENXIO;
break;
}
result_buf = (ACPI_OBJECT *)result.Pointer;
if (result_buf->Type != ACPI_TYPE_BUFFER ||
result_buf->Buffer.Pointer == NULL ||
result_buf->Buffer.Length != 4 + to_read ||
((uint16_t *)result_buf->Buffer.Pointer)[0] != 0) {
error = ENXIO;
break;
}
bcopy(result_buf->Buffer.Pointer + 4, dest, to_read);
dest += to_read;
offset += to_read;
length -= to_read;
if (result.Pointer != NULL) {
AcpiOsFree(result.Pointer);
result.Pointer = NULL;
}
}
if (result.Pointer != NULL)
AcpiOsFree(result.Pointer);
return (error);
}
static uint64_t
fletcher64(const void *data, size_t length)
{
size_t i;
uint32_t a, b;
const uint32_t *d;
a = 0;
b = 0;
d = (const uint32_t *)data;
length = length / sizeof(uint32_t);
for (i = 0; i < length; i++) {
a += d[i];
b += a;
}
return ((uint64_t)b << 32 | a);
}
static bool
label_index_is_valid(struct nvdimm_label_index *index, uint32_t max_labels,
size_t size, size_t offset)
{
uint64_t checksum;
index = (struct nvdimm_label_index *)((uint8_t *)index + offset);
if (strcmp(index->signature, NVDIMM_INDEX_BLOCK_SIGNATURE) != 0)
return false;
checksum = index->checksum;
index->checksum = 0;
if (checksum != fletcher64(index, size) ||
index->this_offset != size * offset || index->this_size != size ||
index->other_offset != size * (offset == 0 ? 1 : 0) ||
index->seq == 0 || index->seq > 3 || index->slot_cnt > max_labels ||
index->label_size != 1)
return false;
return true;
}
static int
read_label(struct nvdimm_dev *nv, int num)
{
struct nvdimm_label_entry *entry, *i, *next;
uint64_t checksum;
off_t offset;
int error;
offset = nv->label_index->label_offset +
num * (128 << nv->label_index->label_size);
entry = malloc(sizeof(*entry), M_NVDIMM, M_WAITOK);
error = read_label_area(nv, (uint8_t *)&entry->label, offset,
sizeof(struct nvdimm_label));
if (error != 0) {
free(entry, M_NVDIMM);
return (error);
}
checksum = entry->label.checksum;
entry->label.checksum = 0;
if (checksum != fletcher64(&entry->label, sizeof(entry->label)) ||
entry->label.slot != num) {
free(entry, M_NVDIMM);
return (ENXIO);
}
/* Insertion ordered by dimm_phys_addr */
if (SLIST_EMPTY(&nv->labels) ||
entry->label.dimm_phys_addr <=
SLIST_FIRST(&nv->labels)->label.dimm_phys_addr) {
SLIST_INSERT_HEAD(&nv->labels, entry, link);
return (0);
}
SLIST_FOREACH_SAFE(i, &nv->labels, link, next) {
if (next == NULL ||
entry->label.dimm_phys_addr <= next->label.dimm_phys_addr) {
SLIST_INSERT_AFTER(i, entry, link);
return (0);
}
}
__unreachable();
}
static int
read_labels(struct nvdimm_dev *nv)
{
struct nvdimm_label_index *indices;
size_t bitfield_size, index_size, num_labels;
int error, n;
bool index_0_valid, index_1_valid;
for (index_size = 256; ; index_size += 256) {
num_labels = 8 * (index_size -
sizeof(struct nvdimm_label_index));
if (index_size + num_labels * sizeof(struct nvdimm_label) >=
nv->label_area_size)
break;
}
num_labels = (nv->label_area_size - index_size) /
sizeof(struct nvdimm_label);
bitfield_size = roundup2(num_labels, 8) / 8;
indices = malloc(2 * index_size, M_NVDIMM, M_WAITOK);
error = read_label_area(nv, (void *)indices, 0, 2 * index_size);
if (error != 0) {
free(indices, M_NVDIMM);
return (error);
}
index_0_valid = label_index_is_valid(indices, num_labels, index_size,
0);
index_1_valid = label_index_is_valid(indices, num_labels, index_size,
1);
if (!index_0_valid && !index_1_valid) {
free(indices, M_NVDIMM);
return (ENXIO);
}
if (index_0_valid && index_1_valid &&
(indices[1].seq > indices[0].seq ||
(indices[1].seq == 1 && indices[0].seq == 3)))
index_0_valid = false;
nv->label_index = malloc(index_size, M_NVDIMM, M_WAITOK);
bcopy(indices + (index_0_valid ? 0 : 1), nv->label_index, index_size);
free(indices, M_NVDIMM);
for (bit_ffc_at((bitstr_t *)nv->label_index->free, 0, num_labels, &n);
n >= 0;
bit_ffc_at((bitstr_t *)nv->label_index->free, n + 1, num_labels,
&n)) {
read_label(nv, n);
}
return (0);
}
struct nvdimm_dev *
nvdimm_find_by_handle(nfit_handle_t nv_handle)
{
@ -90,6 +321,7 @@ nvdimm_attach(device_t dev)
ACPI_TABLE_NFIT *nfitbl;
ACPI_HANDLE handle;
ACPI_STATUS status;
int error;
nv = device_get_softc(dev);
handle = nvdimm_root_get_acpi_handle(dev);
@ -107,6 +339,14 @@ nvdimm_attach(device_t dev)
acpi_nfit_get_flush_addrs(nfitbl, nv->nv_handle, &nv->nv_flush_addr,
&nv->nv_flush_addr_cnt);
AcpiPutTable(&nfitbl->Header);
error = read_label_area_size(nv);
if (error == 0) {
/*
* Ignoring errors reading labels. Not all NVDIMMs
* support labels and namespaces.
*/
read_labels(nv);
}
return (0);
}
@ -114,9 +354,15 @@ static int
nvdimm_detach(device_t dev)
{
struct nvdimm_dev *nv;
struct nvdimm_label_entry *label, *next;
nv = device_get_softc(dev);
free(nv->nv_flush_addr, M_NVDIMM);
free(nv->label_index, M_NVDIMM);
SLIST_FOREACH_SAFE(label, &nv->labels, link, next) {
SLIST_REMOVE_HEAD(&nv->labels, link);
free(label, M_NVDIMM);
}
return (0);
}
@ -216,6 +462,7 @@ nvdimm_root_create_spas(struct nvdimm_root_dev *dev, ACPI_TABLE_NFIT *nfitbl)
free(spa, M_NVDIMM);
break;
}
nvdimm_create_namespaces(spa_mapping, nfitbl);
SLIST_INSERT_HEAD(&dev->spas, spa_mapping, link);
}
free(spas, M_NVDIMM);
@ -273,6 +520,7 @@ nvdimm_root_detach(device_t dev)
root = device_get_softc(dev);
SLIST_FOREACH_SAFE(spa, &root->spas, link, next) {
nvdimm_destroy_namespaces(spa);
nvdimm_spa_fini(spa);
SLIST_REMOVE_HEAD(&root->spas, link);
free(spa, M_NVDIMM);

View File

@ -0,0 +1,97 @@
/*-
* Copyright (c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/bio.h>
#include <sys/bus.h>
#include <sys/malloc.h>
#include <sys/uuid.h>
#include <contrib/dev/acpica/include/acpi.h>
#include <dev/acpica/acpivar.h>
#include <dev/nvdimm/nvdimm_var.h>
int
nvdimm_create_namespaces(struct SPA_mapping *spa, ACPI_TABLE_NFIT *nfitbl)
{
ACPI_NFIT_MEMORY_MAP **regions;
struct nvdimm_dev *nv;
struct nvdimm_label_entry *e;
struct nvdimm_namespace *ns;
nfit_handle_t dimm_handle;
char *name;
int i, error, num_regions;
acpi_nfit_get_region_mappings_by_spa_range(nfitbl, spa->spa_nfit_idx,
&regions, &num_regions);
if (num_regions == 0 || num_regions != regions[0]->InterleaveWays) {
free(regions, M_NVDIMM);
return (ENXIO);
}
dimm_handle = regions[0]->DeviceHandle;
nv = nvdimm_find_by_handle(dimm_handle);
if (nv == NULL) {
free(regions, M_NVDIMM);
return (ENXIO);
}
i = 0;
error = 0;
SLIST_FOREACH(e, &nv->labels, link) {
ns = malloc(sizeof(struct nvdimm_namespace), M_NVDIMM,
M_WAITOK | M_ZERO);
ns->dev.spa_domain = spa->dev.spa_domain;
ns->dev.spa_phys_base = spa->dev.spa_phys_base +
regions[0]->RegionOffset +
num_regions *
(e->label.dimm_phys_addr - regions[0]->Address);
ns->dev.spa_len = num_regions * e->label.raw_size;
ns->dev.spa_efi_mem_flags = spa->dev.spa_efi_mem_flags;
asprintf(&name, M_NVDIMM, "spa%dns%d", spa->spa_nfit_idx, i);
error = nvdimm_spa_dev_init(&ns->dev, name);
free(name, M_NVDIMM);
if (error != 0)
break;
SLIST_INSERT_HEAD(&spa->namespaces, ns, link);
i++;
}
free(regions, M_NVDIMM);
return (error);
}
void
nvdimm_destroy_namespaces(struct SPA_mapping *spa)
{
struct nvdimm_namespace *ns, *next;
SLIST_FOREACH_SAFE(ns, &spa->namespaces, link, next) {
SLIST_REMOVE_HEAD(&spa->namespaces, link);
nvdimm_spa_dev_fini(&ns->dev);
free(ns, M_NVDIMM);
}
}

View File

@ -143,31 +143,31 @@ nvdimm_spa_type_from_uuid(struct uuid *uuid)
}
static vm_memattr_t
nvdimm_spa_memattr(struct SPA_mapping *spa)
nvdimm_spa_memattr(struct nvdimm_spa_dev *dev)
{
vm_memattr_t mode;
if ((spa->spa_efi_mem_flags & EFI_MD_ATTR_WB) != 0)
if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_WB) != 0)
mode = VM_MEMATTR_WRITE_BACK;
else if ((spa->spa_efi_mem_flags & EFI_MD_ATTR_WT) != 0)
else if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_WT) != 0)
mode = VM_MEMATTR_WRITE_THROUGH;
else if ((spa->spa_efi_mem_flags & EFI_MD_ATTR_WC) != 0)
else if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_WC) != 0)
mode = VM_MEMATTR_WRITE_COMBINING;
else if ((spa->spa_efi_mem_flags & EFI_MD_ATTR_WP) != 0)
else if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_WP) != 0)
mode = VM_MEMATTR_WRITE_PROTECTED;
else if ((spa->spa_efi_mem_flags & EFI_MD_ATTR_UC) != 0)
else if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_UC) != 0)
mode = VM_MEMATTR_UNCACHEABLE;
else {
if (bootverbose)
printf("SPA%d mapping attr unsupported\n",
spa->spa_nfit_idx);
printf("SPA mapping attr %#lx unsupported\n",
dev->spa_efi_mem_flags);
mode = VM_MEMATTR_UNCACHEABLE;
}
return (mode);
}
static int
nvdimm_spa_uio(struct SPA_mapping *spa, struct uio *uio)
nvdimm_spa_uio(struct nvdimm_spa_dev *dev, struct uio *uio)
{
struct vm_page m, *ma;
off_t off;
@ -175,14 +175,14 @@ nvdimm_spa_uio(struct SPA_mapping *spa, struct uio *uio)
int error, n;
error = 0;
if (spa->spa_kva == NULL) {
mattr = nvdimm_spa_memattr(spa);
if (dev->spa_kva == NULL) {
mattr = nvdimm_spa_memattr(dev);
vm_page_initfake(&m, 0, mattr);
ma = &m;
while (uio->uio_resid > 0) {
if (uio->uio_offset >= spa->spa_len)
if (uio->uio_offset >= dev->spa_len)
break;
off = spa->spa_phys_base + uio->uio_offset;
off = dev->spa_phys_base + uio->uio_offset;
vm_page_updatefake(&m, trunc_page(off), mattr);
n = PAGE_SIZE;
if (n > uio->uio_resid)
@ -193,14 +193,14 @@ nvdimm_spa_uio(struct SPA_mapping *spa, struct uio *uio)
}
} else {
while (uio->uio_resid > 0) {
if (uio->uio_offset >= spa->spa_len)
if (uio->uio_offset >= dev->spa_len)
break;
n = INT_MAX;
if (n > uio->uio_resid)
n = uio->uio_resid;
if (uio->uio_offset + n > spa->spa_len)
n = spa->spa_len - uio->uio_offset;
error = uiomove((char *)spa->spa_kva + uio->uio_offset,
if (uio->uio_offset + n > dev->spa_len)
n = dev->spa_len - uio->uio_offset;
error = uiomove((char *)dev->spa_kva + uio->uio_offset,
n, uio);
if (error != 0)
break;
@ -217,20 +217,20 @@ nvdimm_spa_rw(struct cdev *dev, struct uio *uio, int ioflag)
}
static int
nvdimm_spa_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
nvdimm_spa_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
struct thread *td)
{
struct SPA_mapping *spa;
struct nvdimm_spa_dev *dev;
int error;
spa = dev->si_drv1;
dev = cdev->si_drv1;
error = 0;
switch (cmd) {
case DIOCGSECTORSIZE:
*(u_int *)data = DEV_BSIZE;
break;
case DIOCGMEDIASIZE:
*(off_t *)data = spa->spa_len;
*(off_t *)data = dev->spa_len;
break;
default:
error = ENOTTY;
@ -240,19 +240,19 @@ nvdimm_spa_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
}
static int
nvdimm_spa_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size,
nvdimm_spa_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
vm_object_t *objp, int nprot)
{
struct SPA_mapping *spa;
struct nvdimm_spa_dev *dev;
spa = dev->si_drv1;
if (spa->spa_obj == NULL)
dev = cdev->si_drv1;
if (dev->spa_obj == NULL)
return (ENXIO);
if (*offset >= spa->spa_len || *offset + size < *offset ||
*offset + size > spa->spa_len)
if (*offset >= dev->spa_len || *offset + size < *offset ||
*offset + size > dev->spa_len)
return (EINVAL);
vm_object_reference(spa->spa_obj);
*objp = spa->spa_obj;
vm_object_reference(dev->spa_obj);
*objp = dev->spa_obj;
return (0);
}
@ -267,18 +267,17 @@ static struct cdevsw spa_cdevsw = {
};
static void
nvdimm_spa_g_all_unmapped(struct SPA_mapping *spa, struct bio *bp,
int rw)
nvdimm_spa_g_all_unmapped(struct nvdimm_spa_dev *dev, struct bio *bp, int rw)
{
struct vm_page maa[bp->bio_ma_n];
vm_page_t ma[bp->bio_ma_n];
vm_memattr_t mattr;
int i;
mattr = nvdimm_spa_memattr(spa);
mattr = nvdimm_spa_memattr(dev);
for (i = 0; i < nitems(ma); i++) {
maa[i].flags = 0;
vm_page_initfake(&maa[i], spa->spa_phys_base +
vm_page_initfake(&maa[i], dev->spa_phys_base +
trunc_page(bp->bio_offset) + PAGE_SIZE * i, mattr);
ma[i] = &maa[i];
}
@ -293,30 +292,30 @@ nvdimm_spa_g_all_unmapped(struct SPA_mapping *spa, struct bio *bp,
static void
nvdimm_spa_g_thread(void *arg)
{
struct SPA_mapping *spa;
struct g_spa *sc;
struct bio *bp;
struct uio auio;
struct iovec aiovec;
int error;
spa = arg;
sc = arg;
for (;;) {
mtx_lock(&spa->spa_g_mtx);
mtx_lock(&sc->spa_g_mtx);
for (;;) {
bp = bioq_takefirst(&spa->spa_g_queue);
bp = bioq_takefirst(&sc->spa_g_queue);
if (bp != NULL)
break;
msleep(&spa->spa_g_queue, &spa->spa_g_mtx, PRIBIO,
msleep(&sc->spa_g_queue, &sc->spa_g_mtx, PRIBIO,
"spa_g", 0);
if (!spa->spa_g_proc_run) {
spa->spa_g_proc_exiting = true;
wakeup(&spa->spa_g_queue);
mtx_unlock(&spa->spa_g_mtx);
if (!sc->spa_g_proc_run) {
sc->spa_g_proc_exiting = true;
wakeup(&sc->spa_g_queue);
mtx_unlock(&sc->spa_g_mtx);
kproc_exit(0);
}
continue;
}
mtx_unlock(&spa->spa_g_mtx);
mtx_unlock(&sc->spa_g_mtx);
if (bp->bio_cmd != BIO_READ && bp->bio_cmd != BIO_WRITE &&
bp->bio_cmd != BIO_FLUSH) {
error = EOPNOTSUPP;
@ -325,13 +324,15 @@ nvdimm_spa_g_thread(void *arg)
error = 0;
if (bp->bio_cmd == BIO_FLUSH) {
if (spa->spa_kva != NULL) {
pmap_large_map_wb(spa->spa_kva, spa->spa_len);
if (sc->dev->spa_kva != NULL) {
pmap_large_map_wb(sc->dev->spa_kva,
sc->dev->spa_len);
} else {
pmap_flush_cache_phys_range(
(vm_paddr_t)spa->spa_phys_base,
(vm_paddr_t)spa->spa_phys_base +
spa->spa_len, nvdimm_spa_memattr(spa));
(vm_paddr_t)sc->dev->spa_phys_base,
(vm_paddr_t)sc->dev->spa_phys_base +
sc->dev->spa_len,
nvdimm_spa_memattr(sc->dev));
}
/*
* XXX flush IMC
@ -340,8 +341,8 @@ nvdimm_spa_g_thread(void *arg)
}
if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
if (spa->spa_kva != NULL) {
aiovec.iov_base = (char *)spa->spa_kva +
if (sc->dev->spa_kva != NULL) {
aiovec.iov_base = (char *)sc->dev->spa_kva +
bp->bio_offset;
aiovec.iov_len = bp->bio_length;
auio.uio_iov = &aiovec;
@ -356,7 +357,8 @@ nvdimm_spa_g_thread(void *arg)
bp->bio_ma_offset, bp->bio_length, &auio);
bp->bio_resid = auio.uio_resid;
} else {
nvdimm_spa_g_all_unmapped(spa, bp, bp->bio_cmd);
nvdimm_spa_g_all_unmapped(sc->dev, bp,
bp->bio_cmd);
bp->bio_resid = bp->bio_length;
error = 0;
}
@ -371,11 +373,11 @@ nvdimm_spa_g_thread(void *arg)
auio.uio_rw = bp->bio_cmd == BIO_READ ? UIO_READ :
UIO_WRITE;
auio.uio_td = curthread;
error = nvdimm_spa_uio(spa, &auio);
error = nvdimm_spa_uio(sc->dev, &auio);
bp->bio_resid = auio.uio_resid;
}
bp->bio_bcount = bp->bio_length;
devstat_end_transaction_bio(spa->spa_g_devstat, bp);
devstat_end_transaction_bio(sc->spa_g_devstat, bp);
completed:
bp->bio_completed = bp->bio_length;
g_io_deliver(bp, error);
@ -385,18 +387,18 @@ nvdimm_spa_g_thread(void *arg)
static void
nvdimm_spa_g_start(struct bio *bp)
{
struct SPA_mapping *spa;
struct g_spa *sc;
spa = bp->bio_to->geom->softc;
sc = bp->bio_to->geom->softc;
if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
mtx_lock(&spa->spa_g_stat_mtx);
devstat_start_transaction_bio(spa->spa_g_devstat, bp);
mtx_unlock(&spa->spa_g_stat_mtx);
mtx_lock(&sc->spa_g_stat_mtx);
devstat_start_transaction_bio(sc->spa_g_devstat, bp);
mtx_unlock(&sc->spa_g_stat_mtx);
}
mtx_lock(&spa->spa_g_mtx);
bioq_disksort(&spa->spa_g_queue, bp);
wakeup(&spa->spa_g_queue);
mtx_unlock(&spa->spa_g_mtx);
mtx_lock(&sc->spa_g_mtx);
bioq_disksort(&sc->spa_g_queue, bp);
wakeup(&sc->spa_g_queue);
mtx_unlock(&sc->spa_g_mtx);
}
static int
@ -406,11 +408,16 @@ nvdimm_spa_g_access(struct g_provider *pp, int r, int w, int e)
return (0);
}
static struct g_geom * nvdimm_spa_g_create(struct nvdimm_spa_dev *dev,
const char *name);
static g_ctl_destroy_geom_t nvdimm_spa_g_destroy_geom;
struct g_class nvdimm_spa_g_class = {
.name = "SPA",
.version = G_VERSION,
.start = nvdimm_spa_g_start,
.access = nvdimm_spa_g_access,
.destroy_geom = nvdimm_spa_g_destroy_geom,
};
DECLARE_GEOM_CLASS(nvdimm_spa_g_class, g_spa);
@ -418,49 +425,63 @@ int
nvdimm_spa_init(struct SPA_mapping *spa, ACPI_NFIT_SYSTEM_ADDRESS *nfitaddr,
enum SPA_mapping_type spa_type)
{
struct make_dev_args mda;
struct sglist *spa_sg;
int error, error1;
char *name;
int error;
spa->spa_type = spa_type;
spa->spa_domain = ((nfitaddr->Flags & ACPI_NFIT_PROXIMITY_VALID) != 0) ?
nfitaddr->ProximityDomain : -1;
spa->spa_nfit_idx = nfitaddr->RangeIndex;
spa->spa_phys_base = nfitaddr->Address;
spa->spa_len = nfitaddr->Length;
spa->spa_efi_mem_flags = nfitaddr->MemoryMapping;
spa->dev.spa_domain =
((nfitaddr->Flags & ACPI_NFIT_PROXIMITY_VALID) != 0) ?
nfitaddr->ProximityDomain : -1;
spa->dev.spa_phys_base = nfitaddr->Address;
spa->dev.spa_len = nfitaddr->Length;
spa->dev.spa_efi_mem_flags = nfitaddr->MemoryMapping;
if (bootverbose) {
printf("NVDIMM SPA%d base %#016jx len %#016jx %s fl %#jx\n",
spa->spa_nfit_idx,
(uintmax_t)spa->spa_phys_base, (uintmax_t)spa->spa_len,
(uintmax_t)spa->dev.spa_phys_base,
(uintmax_t)spa->dev.spa_len,
nvdimm_SPA_uuid_list[spa_type].u_name,
spa->spa_efi_mem_flags);
spa->dev.spa_efi_mem_flags);
}
if (!nvdimm_SPA_uuid_list[spa_type].u_usr_acc)
return (0);
error1 = pmap_large_map(spa->spa_phys_base, spa->spa_len,
&spa->spa_kva, nvdimm_spa_memattr(spa));
asprintf(&name, M_NVDIMM, "spa%d", spa->spa_nfit_idx);
error = nvdimm_spa_dev_init(&spa->dev, name);
free(name, M_NVDIMM);
return (error);
}
int
nvdimm_spa_dev_init(struct nvdimm_spa_dev *dev, const char *name)
{
struct make_dev_args mda;
struct sglist *spa_sg;
char *devname;
int error, error1;
error1 = pmap_large_map(dev->spa_phys_base, dev->spa_len,
&dev->spa_kva, nvdimm_spa_memattr(dev));
if (error1 != 0) {
printf("NVDIMM SPA%d cannot map into KVA, error %d\n",
spa->spa_nfit_idx, error1);
spa->spa_kva = NULL;
printf("NVDIMM %s cannot map into KVA, error %d\n", name,
error1);
dev->spa_kva = NULL;
}
spa_sg = sglist_alloc(1, M_WAITOK);
error = sglist_append_phys(spa_sg, spa->spa_phys_base,
spa->spa_len);
error = sglist_append_phys(spa_sg, dev->spa_phys_base,
dev->spa_len);
if (error == 0) {
spa->spa_obj = vm_pager_allocate(OBJT_SG, spa_sg, spa->spa_len,
dev->spa_obj = vm_pager_allocate(OBJT_SG, spa_sg, dev->spa_len,
VM_PROT_ALL, 0, NULL);
if (spa->spa_obj == NULL) {
printf("NVDIMM SPA%d failed to alloc vm object",
spa->spa_nfit_idx);
if (dev->spa_obj == NULL) {
printf("NVDIMM %s failed to alloc vm object", name);
sglist_free(spa_sg);
}
} else {
printf("NVDIMM SPA%d failed to init sglist, error %d",
spa->spa_nfit_idx, error);
printf("NVDIMM %s failed to init sglist, error %d", name,
error);
sglist_free(spa_sg);
}
@ -471,78 +492,112 @@ nvdimm_spa_init(struct SPA_mapping *spa, ACPI_NFIT_SYSTEM_ADDRESS *nfitaddr,
mda.mda_uid = UID_ROOT;
mda.mda_gid = GID_OPERATOR;
mda.mda_mode = 0660;
mda.mda_si_drv1 = spa;
error = make_dev_s(&mda, &spa->spa_dev, "nvdimm_spa%d",
spa->spa_nfit_idx);
mda.mda_si_drv1 = dev;
asprintf(&devname, M_NVDIMM, "nvdimm_%s", name);
error = make_dev_s(&mda, &dev->spa_dev, "%s", devname);
free(devname, M_NVDIMM);
if (error != 0) {
printf("NVDIMM SPA%d cannot create devfs node, error %d\n",
spa->spa_nfit_idx, error);
printf("NVDIMM %s cannot create devfs node, error %d\n", name,
error);
if (error1 == 0)
error1 = error;
}
dev->spa_g = nvdimm_spa_g_create(dev, name);
if (dev->spa_g == NULL && error1 == 0)
error1 = ENXIO;
return (error1);
}
bioq_init(&spa->spa_g_queue);
mtx_init(&spa->spa_g_mtx, "spag", NULL, MTX_DEF);
mtx_init(&spa->spa_g_stat_mtx, "spagst", NULL, MTX_DEF);
spa->spa_g_proc_run = true;
spa->spa_g_proc_exiting = false;
error = kproc_create(nvdimm_spa_g_thread, spa, &spa->spa_g_proc, 0, 0,
"g_spa%d", spa->spa_nfit_idx);
static struct g_geom *
nvdimm_spa_g_create(struct nvdimm_spa_dev *dev, const char *name)
{
struct g_geom *gp;
struct g_spa *sc;
int error;
gp = NULL;
sc = malloc(sizeof(struct g_spa), M_NVDIMM, M_WAITOK | M_ZERO);
sc->dev = dev;
bioq_init(&sc->spa_g_queue);
mtx_init(&sc->spa_g_mtx, "spag", NULL, MTX_DEF);
mtx_init(&sc->spa_g_stat_mtx, "spagst", NULL, MTX_DEF);
sc->spa_g_proc_run = true;
sc->spa_g_proc_exiting = false;
error = kproc_create(nvdimm_spa_g_thread, sc, &sc->spa_g_proc, 0, 0,
"g_spa");
if (error != 0) {
printf("NVDIMM SPA%d cannot create geom worker, error %d\n",
spa->spa_nfit_idx, error);
if (error1 == 0)
error1 = error;
mtx_destroy(&sc->spa_g_mtx);
mtx_destroy(&sc->spa_g_stat_mtx);
free(sc, M_NVDIMM);
printf("NVDIMM %s cannot create geom worker, error %d\n", name,
error);
} else {
g_topology_lock();
spa->spa_g = g_new_geomf(&nvdimm_spa_g_class, "spa%d",
spa->spa_nfit_idx);
spa->spa_g->softc = spa;
spa->spa_p = g_new_providerf(spa->spa_g, "spa%d",
spa->spa_nfit_idx);
spa->spa_p->mediasize = spa->spa_len;
spa->spa_p->sectorsize = DEV_BSIZE;
spa->spa_p->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE |
gp = g_new_geomf(&nvdimm_spa_g_class, "%s", name);
gp->softc = sc;
sc->spa_p = g_new_providerf(gp, "%s", name);
sc->spa_p->mediasize = dev->spa_len;
sc->spa_p->sectorsize = DEV_BSIZE;
sc->spa_p->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE |
G_PF_ACCEPT_UNMAPPED;
g_error_provider(spa->spa_p, 0);
spa->spa_g_devstat = devstat_new_entry("spa", spa->spa_nfit_idx,
DEV_BSIZE, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT,
g_error_provider(sc->spa_p, 0);
sc->spa_g_devstat = devstat_new_entry("spa", -1, DEV_BSIZE,
DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT,
DEVSTAT_PRIORITY_MAX);
g_topology_unlock();
}
return (error1);
return (gp);
}
void
nvdimm_spa_fini(struct SPA_mapping *spa)
{
mtx_lock(&spa->spa_g_mtx);
spa->spa_g_proc_run = false;
wakeup(&spa->spa_g_queue);
while (!spa->spa_g_proc_exiting)
msleep(&spa->spa_g_queue, &spa->spa_g_mtx, PRIBIO, "spa_e", 0);
mtx_unlock(&spa->spa_g_mtx);
if (spa->spa_g != NULL) {
g_topology_lock();
g_wither_geom(spa->spa_g, ENXIO);
g_topology_unlock();
spa->spa_g = NULL;
spa->spa_p = NULL;
}
if (spa->spa_g_devstat != NULL) {
devstat_remove_entry(spa->spa_g_devstat);
spa->spa_g_devstat = NULL;
}
if (spa->spa_dev != NULL) {
destroy_dev(spa->spa_dev);
spa->spa_dev = NULL;
}
vm_object_deallocate(spa->spa_obj);
if (spa->spa_kva != NULL) {
pmap_large_unmap(spa->spa_kva, spa->spa_len);
spa->spa_kva = NULL;
}
mtx_destroy(&spa->spa_g_mtx);
mtx_destroy(&spa->spa_g_stat_mtx);
nvdimm_spa_dev_fini(&spa->dev);
}
void
nvdimm_spa_dev_fini(struct nvdimm_spa_dev *dev)
{
if (dev->spa_g != NULL) {
g_topology_lock();
nvdimm_spa_g_destroy_geom(NULL, dev->spa_g->class, dev->spa_g);
g_topology_unlock();
}
if (dev->spa_dev != NULL) {
destroy_dev(dev->spa_dev);
dev->spa_dev = NULL;
}
vm_object_deallocate(dev->spa_obj);
if (dev->spa_kva != NULL) {
pmap_large_unmap(dev->spa_kva, dev->spa_len);
dev->spa_kva = NULL;
}
}
static int
nvdimm_spa_g_destroy_geom(struct gctl_req *req, struct g_class *cp,
struct g_geom *gp)
{
struct g_spa *sc;
sc = gp->softc;
mtx_lock(&sc->spa_g_mtx);
sc->spa_g_proc_run = false;
wakeup(&sc->spa_g_queue);
while (!sc->spa_g_proc_exiting)
msleep(&sc->spa_g_queue, &sc->spa_g_mtx, PRIBIO, "spa_e", 0);
mtx_unlock(&sc->spa_g_mtx);
g_topology_assert();
g_wither_geom(gp, ENXIO);
sc->spa_p = NULL;
if (sc->spa_g_devstat != NULL) {
devstat_remove_entry(sc->spa_g_devstat);
sc->spa_g_devstat = NULL;
}
mtx_destroy(&sc->spa_g_mtx);
mtx_destroy(&sc->spa_g_stat_mtx);
free(sc, M_NVDIMM);
return (0);
}

View File

@ -33,6 +33,51 @@
#ifndef __DEV_NVDIMM_VAR_H__
#define __DEV_NVDIMM_VAR_H__
#define NVDIMM_INDEX_BLOCK_SIGNATURE "NAMESPACE_INDEX"
struct nvdimm_label_index {
char signature[16];
uint8_t flags[3];
uint8_t label_size;
uint32_t seq;
uint64_t this_offset;
uint64_t this_size;
uint64_t other_offset;
uint64_t label_offset;
uint32_t slot_cnt;
uint16_t rev_major;
uint16_t rev_minor;
uint64_t checksum;
uint8_t free[0];
};
struct nvdimm_label {
struct uuid uuid;
char name[64];
uint32_t flags;
uint16_t nlabel;
uint16_t position;
uint64_t set_cookie;
uint64_t lba_size;
uint64_t dimm_phys_addr;
uint64_t raw_size;
uint32_t slot;
uint8_t alignment;
uint8_t reserved[3];
struct uuid type_guid;
struct uuid address_abstraction_guid;
uint8_t reserved1[88];
uint64_t checksum;
};
struct nvdimm_label_entry {
SLIST_ENTRY(nvdimm_label_entry) link;
struct nvdimm_label label;
};
_Static_assert(sizeof(struct nvdimm_label_index) == 72, "Incorrect layout");
_Static_assert(sizeof(struct nvdimm_label) == 256, "Incorrect layout");
typedef uint32_t nfit_handle_t;
enum nvdimm_root_ivar {
@ -53,6 +98,10 @@ struct nvdimm_dev {
nfit_handle_t nv_handle;
uint64_t **nv_flush_addr;
int nv_flush_addr_cnt;
uint32_t label_area_size;
uint32_t max_label_xfer;
struct nvdimm_label_index *label_index;
SLIST_HEAD(, nvdimm_label_entry) labels;
};
enum SPA_mapping_type {
@ -67,28 +116,43 @@ enum SPA_mapping_type {
SPA_TYPE_UNKNOWN = 127,
};
struct SPA_mapping {
SLIST_ENTRY(SPA_mapping) link;
enum SPA_mapping_type spa_type;
struct nvdimm_spa_dev {
int spa_domain;
int spa_nfit_idx;
uint64_t spa_phys_base;
uint64_t spa_len;
uint64_t spa_efi_mem_flags;
void *spa_kva;
struct vm_object *spa_obj;
struct cdev *spa_dev;
struct g_geom *spa_g;
};
struct g_spa {
struct nvdimm_spa_dev *dev;
struct g_provider *spa_p;
struct bio_queue_head spa_g_queue;
struct mtx spa_g_mtx;
struct mtx spa_g_stat_mtx;
struct devstat *spa_g_devstat;
struct proc *spa_g_proc;
struct vm_object *spa_obj;
bool spa_g_proc_run;
bool spa_g_proc_exiting;
};
struct nvdimm_namespace {
SLIST_ENTRY(nvdimm_namespace) link;
struct SPA_mapping *spa;
struct nvdimm_spa_dev dev;
};
struct SPA_mapping {
SLIST_ENTRY(SPA_mapping) link;
enum SPA_mapping_type spa_type;
int spa_nfit_idx;
struct nvdimm_spa_dev dev;
SLIST_HEAD(, nvdimm_namespace) namespaces;
};
MALLOC_DECLARE(M_NVDIMM);
void acpi_nfit_get_dimm_ids(ACPI_TABLE_NFIT *nfitbl, nfit_handle_t **listp,
@ -108,5 +172,9 @@ struct nvdimm_dev *nvdimm_find_by_handle(nfit_handle_t nv_handle);
int nvdimm_spa_init(struct SPA_mapping *spa, ACPI_NFIT_SYSTEM_ADDRESS *nfitaddr,
enum SPA_mapping_type spa_type);
void nvdimm_spa_fini(struct SPA_mapping *spa);
int nvdimm_spa_dev_init(struct nvdimm_spa_dev *dev, const char *name);
void nvdimm_spa_dev_fini(struct nvdimm_spa_dev *dev);
int nvdimm_create_namespaces(struct SPA_mapping *spa, ACPI_TABLE_NFIT *nfitbl);
void nvdimm_destroy_namespaces(struct SPA_mapping *spa);
#endif /* __DEV_NVDIMM_VAR_H__ */

View File

@ -1820,12 +1820,6 @@ tiNumOfLunIOCTLreq(
break;
}
tdIORequestBody = (tdIORequestBody_t *)tiRequestBody;
if(tdIORequestBody == agNULL)
{
status = IOCTL_CALL_FAIL;
break;
}
tdIORequestBody->tiIORequest = tiIORequest;
/* save context if we need to abort later */

View File

@ -5,6 +5,7 @@
KMOD= nvdimm
SRCS= nvdimm.c \
nvdimm_nfit.c \
nvdimm_ns.c \
nvdimm_spa.c
SRCS+= acpi_if.h bus_if.h device_if.h

View File

@ -374,8 +374,6 @@ main(int argc, char **argv)
while (wait(NULL) > 0 || errno == EINTR)
;
free(timefnamefmt);
free(requestor);
return (0);
}
@ -793,9 +791,6 @@ usage(void)
fprintf(stderr,
"usage: newsyslog [-CFNPnrsv] [-a directory] [-d directory] [-f config_file]\n"
" [-S pidfile] [-t timefmt] [[-R tagname] file ...]\n");
/* Free global dynamically-allocated storage. */
free(timefnamefmt);
free(requestor);
exit(1);
}

View File

@ -21,9 +21,10 @@
.Nd system console control and configuration utility
.Sh SYNOPSIS
.Nm
.Op Fl CdLHPpx
.Op Fl CdHLPpx
.Op Fl b Ar color
.Op Fl c Ar appearance
.Op Fl E Ar emulator
.Oo
.Fl f
.Oo
@ -282,6 +283,11 @@ Show the current changes.
.El
.It Fl d
Print out current output screen map.
.It Fl E Ar emulator
Set the terminal emulator to
.Ar emulator .
.It Fl e
Show the active and available terminal emulators.
.It Xo
.Fl f
.Oo

View File

@ -204,11 +204,11 @@ usage(void)
" [foreground [background]] [show]");
else
fprintf(stderr, "%s\n%s\n%s\n%s\n%s\n%s\n",
"usage: vidcontrol [-CdHLPpx] [-b color] [-c appearance] [-f [size] file]",
" [-g geometry] [-h size] [-i active | adapter | mode]",
" [-l screen_map] [-M char] [-m on | off]",
" [-r foreground background] [-S on | off] [-s number]",
" [-T xterm | cons25] [-t N | off] [mode]",
"usage: vidcontrol [-CdHLPpx] [-b color] [-c appearance] [-E emulator]",
" [-f [[size] file]] [-g geometry] [-h size]",
" [-i active | adapter | mode] [-l screen_map] [-M char]",
" [-m on | off] [-r foreground background] [-S on | off]",
" [-s number] [-T xterm | cons25] [-t N | off] [mode]",
" [foreground [background]] [show]");
exit(1);
}
@ -1384,6 +1384,45 @@ clear_history(void)
}
}
static int
get_terminal_emulator(int i, struct term_info *tip)
{
tip->ti_index = i;
if (ioctl(0, CONS_GETTERM, tip) == 0)
return (1);
strlcpy((char *)tip->ti_name, "unknown", sizeof(tip->ti_name));
strlcpy((char *)tip->ti_desc, "unknown", sizeof(tip->ti_desc));
return (0);
}
static void
get_terminal_emulators(void)
{
struct term_info ti;
int i;
for (i = 0; i < 10; i++) {
if (get_terminal_emulator(i, &ti) == 0)
break;
printf("%d: %s (%s)%s\n", i, ti.ti_name, ti.ti_desc,
i == 0 ? " (active)" : "");
}
}
static void
set_terminal_emulator(const char *name)
{
struct term_info old_ti, ti;
get_terminal_emulator(0, &old_ti);
strlcpy((char *)ti.ti_name, name, sizeof(ti.ti_name));
if (ioctl(0, CONS_SETTERM, &ti) != 0)
warn("SETTERM '%s'", name);
get_terminal_emulator(0, &ti);
printf("%s (%s) -> %s (%s)\n", old_ti.ti_name, old_ti.ti_desc,
ti.ti_name, ti.ti_desc);
}
static void
set_terminal_mode(char *arg)
{
@ -1412,7 +1451,7 @@ main(int argc, char **argv)
if (vt4_mode)
opts = "b:Cc:fg:h:Hi:M:m:pPr:S:s:T:t:x";
else
opts = "b:Cc:dfg:h:Hi:l:LM:m:pPr:S:s:T:t:x";
opts = "b:Cc:deE:fg:h:Hi:l:LM:m:pPr:S:s:T:t:x";
while ((opt = getopt(argc, argv, opts)) != -1)
switch(opt) {
@ -1430,6 +1469,16 @@ main(int argc, char **argv)
break;
print_scrnmap();
break;
case 'E':
if (vt4_mode)
break;
set_terminal_emulator(optarg);
break;
case 'e':
if (vt4_mode)
break;
get_terminal_emulators();
break;
case 'f':
optarg = nextarg(argc, argv, &optind, 'f', 0);
if (optarg != NULL) {