First cut at MI support for PCI Message Signalled Interrupts (MSI):

- Add 3 new functions to the pci_if interface along with suitable wrappers
  to provide the device driver visible API:
  - pci_alloc_msi(dev, int *count) backed by PCI_ALLOC_MSI().  '*count'
    here is an in and out parameter.  The driver stores the desired number
    of messages in '*count' before calling the function.  On success,
    '*count' holds the number of messages allocated to the device.  Also on
    success, the driver can access the messages as SYS_RES_IRQ resources
    starting at rid 1.  Note that the legacy INTx interrupt resource will
    not be available when using MSI.  Note that this function will allocate
    either MSI or MSI-X messages depending on the devices capabilities and
    the 'hw.pci.enable_msix' and 'hw.pci.enable_msi' tunables.  Also note
    that the driver should activate the memory resource that holds the
    MSI-X table and pending bit array (PBA) before calling this function
    if the device supports MSI-X.
  - pci_release_msi(dev) backed by PCI_RELEASE_MSI().  This function
    releases the messages allocated for this device.  All of the
    SYS_RES_IRQ resources need to be released for this function to succeed.
  - pci_msi_count(dev) backed by PCI_MSI_COUNT().  This function returns
    the maximum number of MSI or MSI-X messages supported by this device.
    MSI-X is preferred if present, but this function will honor the
    'hw.pci.enable_msix' and 'hw.pci.enable_msi' tunables.  This function
    should return the largest value that pci_alloc_msi() can return
    (assuming the MD code is able to allocate sufficient backing resources
    for all of the messages).
- Add default implementations for these 3 methods to the pci_driver generic
  PCI bus driver.  (The various other PCI bus drivers such as for ACPI and
  OFW will inherit these default implementations.)  This default
  implementation depends on 4 new pcib_if methods that bubble up through
  the PCI bridges to the MD code to allocate IRQ values and perform any
  needed MD setup code needed:
  - PCIB_ALLOC_MSI() attempts to allocate a group of MSI messages.
  - PCIB_RELEASE_MSI() releases a group of MSI messages.
  - PCIB_ALLOC_MSIX() attempts to allocate a single MSI-X message.
  - PCIB_RELEASE_MSIX() releases a single MSI-X message.
- Add default implementations for these 4 methods that just pass the
  request up to the parent bus's parent bridge driver and use the
  default implementation in the various MI PCI bridge drivers.
- Add MI functions for use by MD code when managing MSI and MSI-X
  interrupts:
  - pci_enable_msi(dev, address, data) programs the MSI capability address
    and data registers for a group of MSI messages
  - pci_enable_msix(dev, index, address, data) initializes a single MSI-X
    message in the MSI-X table
  - pci_mask_msix(dev, index) masks a single MSI-X message
  - pci_unmask_msix(dev, index) unmasks a single MSI-X message
  - pci_pending_msix(dev, index) returns true if the specified MSI-X
    message is currently pending
- Save the MSI capability address and data registers in the pci_cfgreg
  block in a PCI devices ivars and restore the values when a device is
  resumed.  Note that the MSI-X table is not currently restored during
  resume.
- Add constants for MSI-X register offsets and fields.
- Record interesting data about any MSI-X capability blocks we come
  across in the pci_cfgreg block in the ivars for PCI devices.

Tested on:	em (i386, MSI), bce (amd64/i386, MSI), mpt (amd64, MSI-X)
Reviewed by:	scottl, grehan, jfv
MFC after:	2 months
This commit is contained in:
John Baldwin 2006-11-13 21:47:30 +00:00
parent 818b0b4bdf
commit 9bf4c9c1b0
10 changed files with 613 additions and 8 deletions

View File

@ -103,6 +103,10 @@ static device_method_t acpi_pcib_acpi_methods[] = {
DEVMETHOD(pcib_read_config, acpi_pcib_read_config),
DEVMETHOD(pcib_write_config, acpi_pcib_write_config),
DEVMETHOD(pcib_route_interrupt, acpi_pcib_acpi_route_interrupt),
DEVMETHOD(pcib_alloc_msi, pcib_alloc_msi),
DEVMETHOD(pcib_release_msi, pcib_release_msi),
DEVMETHOD(pcib_alloc_msix, pcib_alloc_msix),
DEVMETHOD(pcib_release_msix, pcib_release_msix),
{0, 0}
};

View File

@ -93,6 +93,10 @@ static device_method_t acpi_pcib_pci_methods[] = {
DEVMETHOD(pcib_read_config, pcib_read_config),
DEVMETHOD(pcib_write_config, pcib_write_config),
DEVMETHOD(pcib_route_interrupt, acpi_pcib_pci_route_interrupt),
DEVMETHOD(pcib_alloc_msi, pcib_alloc_msi),
DEVMETHOD(pcib_release_msi, pcib_release_msi),
DEVMETHOD(pcib_alloc_msix, pcib_alloc_msix),
DEVMETHOD(pcib_release_msix, pcib_release_msix),
{0, 0}
};

View File

@ -140,6 +140,9 @@ static device_method_t pci_methods[] = {
DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
DEVMETHOD(pci_release_msi, pci_release_msi_method),
DEVMETHOD(pci_msi_count, pci_msi_count_method),
{ 0, 0 }
};
@ -207,6 +210,16 @@ SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
&pci_do_power_resume, 1,
"Transition from D3 -> D0 on resume.");
static int pci_do_msi = 1;
TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
"Enable support for MSI interrupts");
static int pci_do_msix = 1;
TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
"Enable support for MSI-X interrupts");
/* Find a device_t by bus/slot/function */
device_t
@ -429,6 +442,7 @@ static void
pci_read_extcap(device_t pcib, pcicfgregs *cfg)
{
#define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
uint32_t val;
int ptr, nextptr, ptrptr;
switch (cfg->hdrtype & PCIM_HDRTYPE) {
@ -469,14 +483,25 @@ pci_read_extcap(device_t pcib, pcicfgregs *cfg)
}
break;
case PCIY_MSI: /* PCI MSI */
cfg->msi.msi_location = ptr;
cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
if (cfg->msi.msi_ctrl & PCIM_MSICTRL_64BIT)
cfg->msi.msi_data = PCIR_MSI_DATA_64BIT;
else
cfg->msi.msi_data = PCIR_MSI_DATA;
cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
PCIM_MSICTRL_MMC_MASK)>>1);
break;
case PCIY_MSIX: /* PCI MSI-X */
cfg->msix.msix_location = ptr;
cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
PCIM_MSIXCTRL_TABLE_SIZE) + 1;
val = REG(ptr + PCIR_MSIX_TABLE, 4);
cfg->msix.msix_table_bar = PCIR_BAR(val &
PCIM_MSIX_BIR_MASK);
cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
val = REG(ptr + PCIR_MSIX_PBA, 4);
cfg->msix.msix_pba_bar = PCIR_BAR(val &
PCIM_MSIX_BIR_MASK);
cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
break;
case PCIY_VPD: /* PCI Vital Product Data */
cfg->vpd.vpd_reg = ptr;
pci_read_vpd(pcib, cfg);
@ -870,6 +895,369 @@ pci_find_extcap_method(device_t dev, device_t child, int capability,
return (ENOENT);
}
/*
* Support for MSI-X message interrupts.
*/
void
pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
{
struct pci_devinfo *dinfo = device_get_ivars(dev);
pcicfgregs *cfg = &dinfo->cfg;
uint32_t offset;
KASSERT(cfg->msix.msix_alloc > index, ("bogus index"));
offset = cfg->msix.msix_table_offset + index * 16;
bus_write_4(cfg->msix.msix_table_res, offset, address & 0xffffffff);
bus_write_4(cfg->msix.msix_table_res, offset + 4, address >> 32);
bus_write_4(cfg->msix.msix_table_res, offset + 8, data);
}
void
pci_mask_msix(device_t dev, u_int index)
{
struct pci_devinfo *dinfo = device_get_ivars(dev);
pcicfgregs *cfg = &dinfo->cfg;
uint32_t offset, val;
KASSERT(cfg->msix.msix_msgnum > index, ("bogus index"));
offset = cfg->msix.msix_table_offset + index * 16 + 12;
val = bus_read_4(cfg->msix.msix_table_res, offset);
if (!(val & PCIM_MSIX_VCTRL_MASK)) {
val |= PCIM_MSIX_VCTRL_MASK;
bus_write_4(cfg->msix.msix_table_res, offset, val);
}
}
void
pci_unmask_msix(device_t dev, u_int index)
{
struct pci_devinfo *dinfo = device_get_ivars(dev);
pcicfgregs *cfg = &dinfo->cfg;
uint32_t offset, val;
KASSERT(cfg->msix.msix_alloc > index, ("bogus index"));
offset = cfg->msix.msix_table_offset + index * 16 + 12;
val = bus_read_4(cfg->msix.msix_table_res, offset);
if (val & PCIM_MSIX_VCTRL_MASK) {
val &= ~PCIM_MSIX_VCTRL_MASK;
bus_write_4(cfg->msix.msix_table_res, offset, val);
}
}
int
pci_pending_msix(device_t dev, u_int index)
{
struct pci_devinfo *dinfo = device_get_ivars(dev);
pcicfgregs *cfg = &dinfo->cfg;
uint32_t offset, bit;
KASSERT(cfg->msix.msix_alloc > index, ("bogus index"));
offset = cfg->msix.msix_pba_offset + (index / 4) * 4;
bit = 1 << index % 32;
return (bus_read_4(cfg->msix.msix_pba_res, offset) & bit);
}
static int
pci_alloc_msix(device_t dev, device_t child, int *count)
{
struct pci_devinfo *dinfo = device_get_ivars(child);
pcicfgregs *cfg = &dinfo->cfg;
struct resource_list_entry *rle;
int actual, error, i, irq, max;
/* MSI-X capability present? */
if (cfg->msix.msix_location == 0 || !pci_do_msix)
return (ENODEV);
/* Make sure the appropriate BARs are mapped. */
rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
cfg->msix.msix_table_bar);
if (rle == NULL || rle->res == NULL ||
!(rman_get_flags(rle->res) & RF_ACTIVE))
return (ENXIO);
cfg->msix.msix_table_res = rle->res;
if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
cfg->msix.msix_pba_bar);
if (rle == NULL || rle->res == NULL ||
!(rman_get_flags(rle->res) & RF_ACTIVE))
return (ENXIO);
}
cfg->msix.msix_pba_res = rle->res;
/* Already have allocated messages? */
if (cfg->msix.msix_alloc != 0)
return (ENXIO);
max = min(*count, cfg->msix.msix_msgnum);
for (i = 0; i < max; i++) {
/* Allocate a message. */
error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, i,
&irq);
if (error)
break;
resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
irq, 1);
}
actual = i;
/* Mask all vectors. */
for (i = 0; i < cfg->msix.msix_msgnum; i++)
pci_mask_msix(child, i);
/* Update control register to enable MSI-X. */
cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
cfg->msix.msix_ctrl, 2);
/* Update counts of alloc'd messages. */
cfg->msix.msix_alloc = actual;
*count = actual;
return (0);
}
static int
pci_release_msix(device_t dev, device_t child)
{
struct pci_devinfo *dinfo = device_get_ivars(child);
pcicfgregs *cfg = &dinfo->cfg;
struct resource_list_entry *rle;
int i;
/* Do we have any messages to release? */
if (cfg->msix.msix_alloc == 0)
return (ENODEV);
/* Make sure none of the resources are allocated. */
for (i = 0; i < cfg->msix.msix_alloc; i++) {
rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
KASSERT(rle != NULL, ("missing MSI resource"));
if (rle->res != NULL)
return (EBUSY);
}
/* Update control register with to disable MSI-X. */
cfg->msix.msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
cfg->msix.msix_ctrl, 2);
/* Release the messages. */
for (i = 0; i < cfg->msix.msix_alloc; i++) {
rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
PCIB_RELEASE_MSIX(device_get_parent(dev), child,
rle->start);
resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
}
/* Update alloc count. */
cfg->msix.msix_alloc = 0;
return (0);
}
/*
* Support for MSI message signalled interrupts.
*/
void
pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
{
struct pci_devinfo *dinfo = device_get_ivars(dev);
pcicfgregs *cfg = &dinfo->cfg;
/* Write data and address values. */
cfg->msi.msi_addr = address;
cfg->msi.msi_data = data;
pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_ADDR,
address & 0xffffffff, 4);
if (cfg->msi.msi_ctrl & PCIM_MSICTRL_64BIT) {
pci_write_config(dev, cfg->msi.msi_location +
PCIR_MSI_ADDR_HIGH, address >> 32, 4);
pci_write_config(dev, cfg->msi.msi_location +
PCIR_MSI_DATA_64BIT, data, 2);
} else
pci_write_config(dev, cfg->msi.msi_location +
PCIR_MSI_DATA, data, 2);
/* Enable MSI in the control register. */
cfg->msi.msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_CTRL,
cfg->msi.msi_ctrl, 2);
}
/*
* Restore MSI registers during resume. If MSI is enabled then
* restore the data and address registers in addition to the control
* register.
*/
static void
pci_resume_msi(device_t dev)
{
struct pci_devinfo *dinfo = device_get_ivars(dev);
pcicfgregs *cfg = &dinfo->cfg;
uint64_t address;
uint16_t data;
if (cfg->msi.msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
address = cfg->msi.msi_addr;
data = cfg->msi.msi_data;
pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_ADDR,
address & 0xffffffff, 4);
if (cfg->msi.msi_ctrl & PCIM_MSICTRL_64BIT) {
pci_write_config(dev, cfg->msi.msi_location +
PCIR_MSI_ADDR_HIGH, address >> 32, 4);
pci_write_config(dev, cfg->msi.msi_location +
PCIR_MSI_DATA_64BIT, data, 2);
} else
pci_write_config(dev, cfg->msi.msi_location +
PCIR_MSI_DATA, data, 2);
}
pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_CTRL,
cfg->msi.msi_ctrl, 2);
}
/*
* Attempt to allocate *count MSI messages. The actual number allocated is
* returned in *count. After this function returns, each message will be
* available to the driver as SYS_RES_IRQ resources starting at a rid 1.
*/
int
pci_alloc_msi_method(device_t dev, device_t child, int *count)
{
struct pci_devinfo *dinfo = device_get_ivars(child);
pcicfgregs *cfg = &dinfo->cfg;
struct resource_list_entry *rle;
int actual, error, i, irqs[32];
uint16_t ctrl;
/* Don't let count == 0 get us into trouble. */
if (*count == 0)
return (EINVAL);
/* If rid 0 is allocated, then fail. */
rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
if (rle != NULL && rle->res != NULL)
return (ENXIO);
/* Try MSI-X first. */
error = pci_alloc_msix(dev, child, count);
if (error != ENODEV)
return (error);
/* MSI capability present? */
if (cfg->msi.msi_location == 0 || !pci_do_msi)
return (ENODEV);
/* Already have allocated messages? */
if (cfg->msi.msi_alloc != 0)
return (ENXIO);
/* Don't ask for more than the device supports. */
actual = min(*count, cfg->msi.msi_msgnum);
/* Don't ask for more than 32 messages. */
actual = min(actual, 32);
/* MSI requires power of 2 number of messages. */
if (!powerof2(actual))
return (EINVAL);
for (;;) {
/* Try to allocate N messages. */
error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
cfg->msi.msi_msgnum, irqs);
if (error == 0)
break;
if (actual == 1)
return (error);
/* Try N / 2. */
actual >>= 1;
}
/*
* We now have N actual messages mapped onto SYS_RES_IRQ
* resources in the irqs[] array, so add new resources
* starting at rid 1.
*/
for (i = 0; i < actual; i++)
resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
irqs[i], irqs[i], 1);
/* Update control register with actual count and enable MSI. */
ctrl = cfg->msi.msi_ctrl;
ctrl &= ~PCIM_MSICTRL_MME_MASK;
ctrl |= (ffs(actual) - 1) << 4;
cfg->msi.msi_ctrl = ctrl;
pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
/* Update counts of alloc'd messages. */
cfg->msi.msi_alloc = actual;
*count = actual;
return (0);
}
/* Release the MSI messages associated with this device. */
int
pci_release_msi_method(device_t dev, device_t child)
{
struct pci_devinfo *dinfo = device_get_ivars(child);
pcicfgregs *cfg = &dinfo->cfg;
struct resource_list_entry *rle;
int error, i, irqs[32];
/* Try MSI-X first. */
error = pci_release_msix(dev, child);
if (error != ENODEV)
return (error);
/* Do we have any messages to release? */
if (cfg->msi.msi_alloc == 0)
return (ENODEV);
KASSERT(cfg->msi.msi_alloc <= 32, ("more than 32 alloc'd messages"));
/* Make sure none of the resources are allocated. */
for (i = 0; i < cfg->msi.msi_alloc; i++) {
rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
KASSERT(rle != NULL, ("missing MSI resource"));
if (rle->res != NULL)
return (EBUSY);
irqs[i] = rle->start;
}
/* Update control register with 0 count and disable MSI. */
cfg->msi.msi_ctrl &= ~(PCIM_MSICTRL_MME_MASK | PCIM_MSICTRL_MSI_ENABLE);
pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL,
cfg->msi.msi_ctrl, 2);
/* Release the messages. */
PCIB_RELEASE_MSI(device_get_parent(dev), child, cfg->msi.msi_alloc,
irqs);
for (i = 0; i < cfg->msi.msi_alloc; i++)
resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
/* Update alloc count. */
cfg->msi.msi_alloc = 0;
return (0);
}
/*
* Return the max supported MSI or MSI-X messages this device supports.
* Basically, assuming the MD code can alloc messages, this function
* should return the maximum value that pci_alloc_msi() can return. Thus,
* it is subject to the tunables, etc.
*/
int
pci_msi_count_method(device_t dev, device_t child)
{
struct pci_devinfo *dinfo = device_get_ivars(child);
pcicfgregs *cfg = &dinfo->cfg;
if (pci_do_msix && cfg->msix.msix_location != 0)
return (cfg->msix.msix_msgnum);
if (pci_do_msi && cfg->msi.msi_location != 0)
return (cfg->msi.msi_msgnum);
return (0);
}
/* free pcicfgregs structure and all depending data structures */
int
@ -1175,16 +1563,28 @@ pci_print_verbose(struct pci_devinfo *dinfo)
vwp->start + vwp->len, vwp->value);
}
}
if (cfg->msi.msi_data) {
if (cfg->msi.msi_location) {
int ctrl;
ctrl = cfg->msi.msi_ctrl;
ctrl = cfg->msi.msi_ctrl;
printf("\tMSI supports %d message%s%s%s\n",
cfg->msi.msi_msgnum,
(cfg->msi.msi_msgnum == 1) ? "" : "s",
(ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
(ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
}
if (cfg->msix.msix_location) {
printf("\tMSI-X supports %d message%s ",
cfg->msix.msix_msgnum,
(cfg->msix.msix_msgnum == 1) ? "" : "s");
if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
printf("in map 0x%x\n",
cfg->msix.msix_table_bar);
else
printf("in maps 0x%x and 0x%x\n",
cfg->msix.msix_table_bar,
cfg->msix.msix_pba_bar);
}
}
}
@ -2252,12 +2652,19 @@ pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
if (device_get_parent(child) == dev) {
switch (type) {
case SYS_RES_IRQ:
/*
* Can't alloc legacy interrupt once MSI messages
* have been allocated.
*/
if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
cfg->msix.msix_alloc > 0))
return (NULL);
/*
* If the child device doesn't have an
* interrupt routed and is deserving of an
* interrupt, try to assign it one.
*/
if (!PCI_INTERRUPT_VALID(cfg->intline) &&
if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
(cfg->intpin != 0))
pci_assign_interrupt(dev, child, 0);
break;
@ -2468,6 +2875,13 @@ pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
/*
* Restore MSI configuration if it is present. If MSI is enabled,
* then restore the data and addr registers.
*/
if (dinfo->cfg.msi.msi_location != 0)
pci_resume_msi(dev);
}
void

View File

@ -30,6 +30,15 @@
INTERFACE pci;
CODE {
static int
null_msi_count(device_t dev, device_t child)
{
return (0);
}
};
METHOD u_int32_t read_config {
device_t dev;
device_t child;
@ -102,3 +111,19 @@ METHOD int find_extcap {
int capability;
int *capreg;
};
METHOD int alloc_msi {
device_t dev;
device_t child;
int *count;
};
METHOD int release_msi {
device_t dev;
device_t child;
};
METHOD int msi_count {
device_t dev;
device_t child;
} DEFAULT null_msi_count;

View File

@ -79,6 +79,10 @@ static device_method_t pcib_methods[] = {
DEVMETHOD(pcib_read_config, pcib_read_config),
DEVMETHOD(pcib_write_config, pcib_write_config),
DEVMETHOD(pcib_route_interrupt, pcib_route_interrupt),
DEVMETHOD(pcib_alloc_msi, pcib_alloc_msi),
DEVMETHOD(pcib_release_msi, pcib_release_msi),
DEVMETHOD(pcib_alloc_msix, pcib_alloc_msix),
DEVMETHOD(pcib_release_msix, pcib_release_msix),
{ 0, 0 }
};
@ -539,6 +543,47 @@ pcib_route_interrupt(device_t pcib, device_t dev, int pin)
return(intnum);
}
/* Pass request to alloc MSI messages up to the parent bridge. */
int
pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs)
{
device_t bus;
bus = device_get_parent(pcib);
return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount,
irqs));
}
/* Pass request to release MSI messages up to the parent bridge. */
int
pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs)
{
device_t bus;
bus = device_get_parent(pcib);
return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs));
}
/* Pass request to alloc an MSI-X message up to the parent bridge. */
int
pcib_alloc_msix(device_t pcib, device_t dev, int index, int *irq)
{
device_t bus;
bus = device_get_parent(pcib);
return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, index, irq));
}
/* Pass request to release an MSI-X message up to the parent bridge. */
int
pcib_release_msix(device_t pcib, device_t dev, int irq)
{
device_t bus;
bus = device_get_parent(pcib);
return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq));
}
/*
* Try to read the bus number of a host-PCI bridge using appropriate config
* registers.

View File

@ -66,6 +66,9 @@ int pci_enable_io_method(device_t dev, device_t child, int space);
int pci_disable_io_method(device_t dev, device_t child, int space);
int pci_find_extcap_method(device_t dev, device_t child,
int capability, int *capreg);
int pci_alloc_msi_method(device_t dev, device_t child, int *count);
int pci_release_msi_method(device_t dev, device_t child);
int pci_msi_count_method(device_t dev, device_t child);
struct resource *pci_alloc_resource(device_t dev, device_t child,
int type, int *rid, u_long start, u_long end, u_long count,
u_int flags);

View File

@ -88,3 +88,48 @@ METHOD int route_interrupt {
device_t dev;
int pin;
} DEFAULT null_route_interrupt;
#
# Allocate 'count' MSI messsages mapped onto 'count' IRQs. 'irq' points
# to an array of at least 'count' ints. The max number of messages this
# device supports is included so that the MD code can take that into
# account when assigning resources so that the proper number of low bits
# are clear in the resulting message data value.
#
METHOD int alloc_msi {
device_t pcib;
device_t dev;
int count;
int maxcount;
int *irqs;
};
#
# Release 'count' MSI message mapped onto 'count' IRQs stored in the
# array pointed to by 'irq'.
#
METHOD int release_msi {
device_t pcib;
device_t dev;
int count;
int *irqs;
};
#
# Allocate a single MSI-X message mapped onto '*irq'.
#
METHOD int alloc_msix {
device_t pcib;
device_t dev;
int index;
int *irq;
};
#
# Release a single MSI-X message mapped onto 'irq'.
#
METHOD int release_msix {
device_t pcib;
device_t dev;
int irq;
};

View File

@ -74,5 +74,9 @@ int pcib_maxslots(device_t dev);
uint32_t pcib_read_config(device_t dev, int b, int s, int f, int reg, int width);
void pcib_write_config(device_t dev, int b, int s, int f, int reg, uint32_t val, int width);
int pcib_route_interrupt(device_t pcib, device_t dev, int pin);
int pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs);
int pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs);
int pcib_alloc_msix(device_t pcib, device_t dev, int index, int *irq);
int pcib_release_msix(device_t pcib, device_t dev, int irq);
#endif

View File

@ -430,3 +430,19 @@
#define PCIXM_STATUS_MAXSPLITS 0x0380 /* Maximum Split Transactions */
#define PCIXM_STATUS_MAXCRDS 0x1C00 /* Maximum Cumulative Read Size */
#define PCIXM_STATUS_RCVDSCEM 0x2000 /* Received a Split Comp w/Error msg */
/* MSI-X definitions */
#define PCIR_MSIX_CTRL 0x2
#define PCIM_MSIXCTRL_MSIX_ENABLE 0x8000
#define PCIM_MSIXCTRL_FUNCTION_MASK 0x4000
#define PCIM_MSIXCTRL_TABLE_SIZE 0x07FF
#define PCIR_MSIX_TABLE 0x4
#define PCIR_MSIX_PBA 0x8
#define PCIM_MSIX_BIR_MASK 0x7
#define PCIM_MSIX_BIR_BAR_10 0
#define PCIM_MSIX_BIR_BAR_14 1
#define PCIM_MSIX_BIR_BAR_18 2
#define PCIM_MSIX_BIR_BAR_1C 3
#define PCIM_MSIX_BIR_BAR_20 4
#define PCIM_MSIX_BIR_BAR_24 5
#define PCIM_MSIX_VCTRL_MASK 0x1

View File

@ -77,8 +77,25 @@ struct pcicfg_vpd {
/* Interesting values for PCI MSI */
struct pcicfg_msi {
uint16_t msi_ctrl; /* Message Control */
uint8_t msi_location; /* Offset of MSI capability registers. */
uint8_t msi_msgnum; /* Number of messages */
uint16_t msi_data; /* Location of MSI data word */
int msi_alloc; /* Number of allocated messages. */
uint64_t msi_addr; /* Contents of address register. */
uint16_t msi_data; /* Contents of data register. */
};
/* Interesting values for PCI MSI */
struct pcicfg_msix {
uint16_t msix_ctrl; /* Message Control */
uint8_t msix_location; /* Offset of MSI capability registers. */
uint16_t msix_msgnum; /* Number of messages */
int msix_alloc; /* Number of allocated messages. */
uint8_t msix_table_bar; /* BAR containing vector table. */
uint8_t msix_pba_bar; /* BAR containing PBA. */
uint32_t msix_table_offset;
uint32_t msix_pba_offset;
struct resource *msix_table_res; /* Resource containing vector table. */
struct resource *msix_pba_res; /* Resource containing PBA. */
};
/* config header information common to all header types */
@ -120,6 +137,7 @@ typedef struct pcicfg {
struct pcicfg_pp pp; /* pci power management */
struct pcicfg_vpd vpd; /* pci vital product data */
struct pcicfg_msi msi; /* pci msi */
struct pcicfg_msix msix; /* pci msi-x */
} pcicfgregs;
/* additional type 1 device config header information (PCI to PCI bridge) */
@ -371,8 +389,35 @@ pci_find_extcap(device_t dev, int capability, int *capreg)
return PCI_FIND_EXTCAP(device_get_parent(dev), dev, capability, capreg);
}
static __inline int
pci_alloc_msi(device_t dev, int *count)
{
return (PCI_ALLOC_MSI(device_get_parent(dev), dev, count));
}
static __inline int
pci_release_msi(device_t dev)
{
return (PCI_RELEASE_MSI(device_get_parent(dev), dev));
}
static __inline int
pci_msi_count(device_t dev)
{
return (PCI_MSI_COUNT(device_get_parent(dev), dev));
}
device_t pci_find_bsf(uint8_t, uint8_t, uint8_t);
device_t pci_find_device(uint16_t, uint16_t);
/* Used by MD code to program MSI and MSI-X registers. */
void pci_enable_msi(device_t dev, uint64_t address, uint16_t data);
void pci_enable_msix(device_t dev, u_int index, uint64_t address,
uint32_t data);
void pci_mask_msix(device_t dev, u_int index);
int pci_pending_msix(device_t dev, u_int index);
void pci_unmask_msix(device_t dev, u_int index);
#endif /* _SYS_BUS_H_ */
/*