Clean up the pci host generic driver

- Support Prefetchable Memory.
 - Use the correct rman when allocating memory and ioports.
 - Translate PCI addresses in bus_alloc_resource to allow physical
   addresses that are different than pci addresses.

Reviewed by:	Robert Crowston <crowston_protonmail.com>
Sponsored by:	Innovate UK
Differential Revision:	https://reviews.freebsd.org/D25121
This commit is contained in:
Andrew Turner 2020-06-17 19:56:17 +00:00
parent 3a6413d81e
commit 9a7053ce96
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=362285
4 changed files with 118 additions and 104 deletions

View File

@ -70,8 +70,11 @@ int
pci_host_generic_core_attach(device_t dev)
{
struct generic_pcie_core_softc *sc;
uint64_t phys_base;
uint64_t pci_base;
uint64_t size;
int error;
int rid;
int rid, tuple;
sc = device_get_softc(dev);
sc->dev = dev;
@ -101,12 +104,23 @@ pci_host_generic_core_attach(device_t dev)
sc->bst = rman_get_bustag(sc->res);
sc->bsh = rman_get_bushandle(sc->res);
sc->has_pmem = false;
sc->pmem_rman.rm_type = RMAN_ARRAY;
sc->pmem_rman.rm_descr = "PCIe Prefetch Memory";
sc->mem_rman.rm_type = RMAN_ARRAY;
sc->mem_rman.rm_descr = "PCIe Memory";
sc->io_rman.rm_type = RMAN_ARRAY;
sc->io_rman.rm_descr = "PCIe IO window";
/* Initialize rman and allocate memory regions */
error = rman_init(&sc->pmem_rman);
if (error) {
device_printf(dev, "rman_init() failed. error = %d\n", error);
return (error);
}
error = rman_init(&sc->mem_rman);
if (error) {
device_printf(dev, "rman_init() failed. error = %d\n", error);
@ -119,6 +133,39 @@ pci_host_generic_core_attach(device_t dev)
return (error);
}
for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
phys_base = sc->ranges[tuple].phys_base;
pci_base = sc->ranges[tuple].pci_base;
size = sc->ranges[tuple].size;
if (phys_base == 0 || size == 0)
continue; /* empty range element */
switch (FLAG_TYPE(sc->ranges[tuple].flags)) {
case FLAG_TYPE_PMEM:
sc->has_pmem = true;
error = rman_manage_region(&sc->pmem_rman,
phys_base, phys_base + size - 1);
break;
case FLAG_TYPE_MEM:
error = rman_manage_region(&sc->mem_rman,
phys_base, phys_base + size - 1);
break;
case FLAG_TYPE_IO:
error = rman_manage_region(&sc->io_rman,
phys_base, phys_base + size - 1);
break;
default:
continue;
}
if (error) {
device_printf(dev, "rman_manage_region() failed."
"error = %d\n", error);
rman_fini(&sc->pmem_rman);
rman_fini(&sc->mem_rman);
rman_fini(&sc->io_rman);
return (error);
}
}
return (0);
}
@ -236,13 +283,15 @@ generic_pcie_write_ivar(device_t dev, device_t child, int index,
}
static struct rman *
generic_pcie_rman(struct generic_pcie_core_softc *sc, int type)
generic_pcie_rman(struct generic_pcie_core_softc *sc, int type, int flags)
{
switch (type) {
case SYS_RES_IOPORT:
return (&sc->io_rman);
case SYS_RES_MEMORY:
if (sc->has_pmem && (flags & RF_PREFETCHABLE) != 0)
return (&sc->pmem_rman);
return (&sc->mem_rman);
default:
break;
@ -266,7 +315,7 @@ pci_host_generic_core_release_resource(device_t dev, device_t child, int type,
}
#endif
rm = generic_pcie_rman(sc, type);
rm = generic_pcie_rman(sc, type, rman_get_flags(res));
if (rm != NULL) {
KASSERT(rman_is_region_manager(res, rm), ("rman mismatch"));
rman_release_resource(res);
@ -282,6 +331,11 @@ pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type,
struct generic_pcie_core_softc *sc;
struct resource *res;
struct rman *rm;
uint64_t phys_base;
uint64_t pci_base;
uint64_t size;
int i, space;
bool found;
sc = device_get_softc(dev);
@ -292,11 +346,56 @@ pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type,
}
#endif
rm = generic_pcie_rman(sc, type);
rm = generic_pcie_rman(sc, type, flags);
if (rm == NULL)
return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
type, rid, start, end, count, flags));
/* Translate the address from a PCI address to a physical address */
switch (type) {
case SYS_RES_IOPORT:
case SYS_RES_MEMORY:
found = false;
for (i = 0; i < MAX_RANGES_TUPLES; i++) {
pci_base = sc->ranges[i].pci_base;
phys_base = sc->ranges[i].phys_base;
size = sc->ranges[i].size;
if (start < pci_base || start >= pci_base + size)
continue;
switch (FLAG_TYPE(sc->ranges[i].flags)) {
case FLAG_TYPE_MEM:
case FLAG_TYPE_PMEM:
space = SYS_RES_MEMORY;
break;
case FLAG_TYPE_IO:
space = SYS_RES_IOPORT;
break;
default:
space = -1;
continue;
}
if (type == space) {
start = start - pci_base + phys_base;
end = end - pci_base + phys_base;
found = true;
break;
}
}
if (!found) {
device_printf(dev,
"Failed to allocate %s resource %lx-%lx for %s\n",
type == SYS_RES_IOPORT ? "IOPORT" : "MEMORY",
start, end, device_get_nameunit(child));
return (NULL);
}
break;
default:
break;
}
if (bootverbose) {
device_printf(dev,
"rman_reserve_resource: start=%#jx, end=%#jx, count=%#jx\n",
@ -330,53 +429,15 @@ generic_pcie_activate_resource(device_t dev, device_t child, int type,
int rid, struct resource *r)
{
struct generic_pcie_core_softc *sc;
uint64_t phys_base;
uint64_t pci_base;
uint64_t size;
int found;
int res;
int i;
sc = device_get_softc(dev);
if ((res = rman_activate_resource(r)) != 0)
return (res);
switch (type) {
case SYS_RES_IOPORT:
case SYS_RES_MEMORY:
found = 0;
for (i = 0; i < MAX_RANGES_TUPLES; i++) {
pci_base = sc->ranges[i].pci_base;
phys_base = sc->ranges[i].phys_base;
size = sc->ranges[i].size;
if ((rman_get_start(r) >= pci_base) && (rman_get_start(r) < (pci_base + size))) {
found = 1;
break;
}
}
if (found) {
rman_set_start(r, rman_get_start(r) - pci_base + phys_base);
rman_set_end(r, rman_get_end(r) - pci_base + phys_base);
res = BUS_ACTIVATE_RESOURCE(device_get_parent(dev),
child, type, rid, r);
} else {
device_printf(dev,
"Failed to activate %s resource\n",
type == SYS_RES_IOPORT ? "IOPORT" : "MEMORY");
res = ENXIO;
}
break;
case SYS_RES_IRQ:
res = BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child,
type, rid, r);
break;
default:
break;
}
return (res);
return (BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child, type,
rid, r));
}
static int
@ -416,7 +477,7 @@ generic_pcie_adjust_resource(device_t dev, device_t child, int type,
end));
#endif
rm = generic_pcie_rman(sc, type);
rm = generic_pcie_rman(sc, type, rman_get_flags(res));
if (rm != NULL)
return (rman_adjust_resource(res, start, end));
return (bus_generic_adjust_resource(dev, child, type, res, start, end));

View File

@ -59,14 +59,20 @@ struct pcie_range {
uint64_t phys_base;
uint64_t size;
uint64_t flags;
#define FLAG_IO (1 << 0)
#define FLAG_MEM (1 << 1)
#define FLAG_TYPE(x) ((x) & FLAG_TYPE_MASK)
#define FLAG_TYPE_MASK 0x3
#define FLAG_TYPE_INVALID 0x0
#define FLAG_TYPE_IO 0x1
#define FLAG_TYPE_MEM 0x2
#define FLAG_TYPE_PMEM 0x3
};
struct generic_pcie_core_softc {
struct pcie_range ranges[MAX_RANGES_TUPLES];
int nranges;
int coherent;
bool has_pmem;
struct rman pmem_rman;
struct rman mem_rman;
struct rman io_rman;
struct resource *res;

View File

@ -159,9 +159,9 @@ pci_host_generic_acpi_parse_resource(ACPI_RESOURCE *res, void *arg)
sc->base.ranges[r].phys_base = min + off;
sc->base.ranges[r].size = max - min + 1;
if (res->Data.Address.ResourceType == ACPI_MEMORY_RANGE)
sc->base.ranges[r].flags |= FLAG_MEM;
sc->base.ranges[r].flags |= FLAG_TYPE_MEM;
else if (res->Data.Address.ResourceType == ACPI_IO_RANGE)
sc->base.ranges[r].flags |= FLAG_IO;
sc->base.ranges[r].flags |= FLAG_TYPE_IO;
sc->base.nranges++;
} else if (res->Data.Address.ResourceType == ACPI_BUS_NUMBER_RANGE) {
sc->base.bus_start = min;
@ -234,12 +234,8 @@ pci_host_generic_acpi_init(device_t dev)
{
struct generic_pcie_acpi_softc *sc;
ACPI_HANDLE handle;
uint64_t phys_base;
uint64_t pci_base;
uint64_t size;
ACPI_STATUS status;
int error;
int tuple;
sc = device_get_softc(dev);
handle = acpi_get_handle(dev);
@ -279,29 +275,6 @@ pci_host_generic_acpi_init(device_t dev)
if (error != 0)
return (error);
for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
phys_base = sc->base.ranges[tuple].phys_base;
pci_base = sc->base.ranges[tuple].pci_base;
size = sc->base.ranges[tuple].size;
if (phys_base == 0 || size == 0)
continue; /* empty range element */
if (sc->base.ranges[tuple].flags & FLAG_MEM) {
error = rman_manage_region(&sc->base.mem_rman,
pci_base, pci_base + size - 1);
} else if (sc->base.ranges[tuple].flags & FLAG_IO) {
error = rman_manage_region(&sc->base.io_rman,
pci_base + PCI_IO_WINDOW_OFFSET,
pci_base + PCI_IO_WINDOW_OFFSET + size - 1);
} else
continue;
if (error) {
device_printf(dev, "rman_manage_region() failed."
"error = %d\n", error);
rman_fini(&sc->base.mem_rman);
return (error);
}
}
return (0);
}

View File

@ -123,12 +123,8 @@ int
pci_host_generic_attach(device_t dev)
{
struct generic_pcie_fdt_softc *sc;
uint64_t phys_base;
uint64_t pci_base;
uint64_t size;
phandle_t node;
int error;
int tuple;
sc = device_get_softc(dev);
@ -157,28 +153,6 @@ pci_host_generic_attach(device_t dev)
if (error != 0)
return (error);
for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
phys_base = sc->base.ranges[tuple].phys_base;
pci_base = sc->base.ranges[tuple].pci_base;
size = sc->base.ranges[tuple].size;
if (phys_base == 0 || size == 0)
continue; /* empty range element */
if (sc->base.ranges[tuple].flags & FLAG_MEM) {
error = rman_manage_region(&sc->base.mem_rman,
pci_base, pci_base + size - 1);
} else if (sc->base.ranges[tuple].flags & FLAG_IO) {
error = rman_manage_region(&sc->base.io_rman,
pci_base, pci_base + size - 1);
} else
continue;
if (error) {
device_printf(dev, "rman_manage_region() failed."
"error = %d\n", error);
rman_fini(&sc->base.mem_rman);
return (error);
}
}
ofw_bus_setup_iinfo(node, &sc->pci_iinfo, sizeof(cell_t));
device_add_child(dev, "pci", -1);
@ -221,9 +195,9 @@ parse_pci_mem_ranges(device_t dev, struct generic_pcie_core_softc *sc)
attributes = (base_ranges[j++] >> SPACE_CODE_SHIFT) & \
SPACE_CODE_MASK;
if (attributes == SPACE_CODE_IO_SPACE) {
sc->ranges[i].flags |= FLAG_IO;
sc->ranges[i].flags |= FLAG_TYPE_IO;
} else {
sc->ranges[i].flags |= FLAG_MEM;
sc->ranges[i].flags |= FLAG_TYPE_MEM;
}
sc->ranges[i].pci_base = 0;