Sponsored by:	The FreeBSD Foundation
This commit is contained in:
Glen Barber 2016-04-06 01:44:21 +00:00
commit 2263fb580e
53 changed files with 2122 additions and 255 deletions

View File

@ -470,7 +470,7 @@ LIBCOMPAT= SOFT
WMAKE= ${WMAKEENV} ${MAKE} ${WORLD_FLAGS} -f Makefile.inc1 DESTDIR=${WORLDTMP}
IMAKEENV= ${CROSSENV:N_LDSCRIPTROOT=*}
IMAKEENV= ${CROSSENV}
IMAKE= ${IMAKEENV} ${MAKE} -f Makefile.inc1 \
${IMAKE_INSTALL} ${IMAKE_MTREE}
.if empty(.MAKEFLAGS:M-n)

View File

@ -97,7 +97,7 @@ LIBCOMPATWMAKEFLAGS+= CC="${XCC} ${LIBCOMPATCFLAGS}" \
MK_TESTS=no
LIBCOMPATWMAKE+= ${LIBCOMPATWMAKEENV} ${MAKE} ${LIBCOMPATWMAKEFLAGS} \
MK_MAN=no MK_HTML=no
LIBCOMPATIMAKE+= ${LIBCOMPATWMAKE:NINSTALL=*:NDESTDIR=*:N_LDSCRIPTROOT=*} \
LIBCOMPATIMAKE+= ${LIBCOMPATWMAKE:NINSTALL=*:NDESTDIR=*} \
MK_TOOLCHAIN=no ${IMAKE_INSTALL} \
-DLIBRARIES_ONLY

View File

@ -16,7 +16,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd September 26, 2015
.Dd April 4, 2016
.Dt USB_QUIRK 4
.Os
.Sh NAME
@ -234,6 +234,12 @@ device which appears as a USB device on
usbconfig -d ugen0.3 add_quirk UQ_MSC_EJECT_WAIT
.Ed
.Pp
Enable a Holtec/Keep Out F85 gaming keyboard on
.Pa ugen1.4 :
.Bd -literal -offset indent
usbconfig -d ugen1.4 add_quirk UQ_KBD_BOOTPROTO
.Ed
.Pp
To install a quirk at boot time, place one or several lines like the
following in
.Xr loader.conf 5 :

View File

@ -992,3 +992,4 @@ static moduledata_t linux64_elf_mod = {
DECLARE_MODULE_TIED(linux64elf, linux64_elf_mod, SI_SUB_EXEC, SI_ORDER_ANY);
MODULE_DEPEND(linux64elf, linux_common, 1, 1, 1);
FEATURE(linux64, "Linux 64bit support");

View File

@ -1205,3 +1205,4 @@ static moduledata_t linux_elf_mod = {
DECLARE_MODULE_TIED(linuxelf, linux_elf_mod, SI_SUB_EXEC, SI_ORDER_ANY);
MODULE_DEPEND(linuxelf, linux_common, 1, 1, 1);
FEATURE(linux, "Linux 32bit support");

View File

@ -28,6 +28,8 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_platform.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
@ -37,10 +39,12 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/sysctl.h>
#include <machine/bus.h>
#include <machine/intr.h>
#include <dev/gpio/gpiobusvar.h>
#include <dev/ofw/ofw_bus.h>
@ -49,6 +53,10 @@ __FBSDID("$FreeBSD$");
#include "gpio_if.h"
#ifdef ARM_INTRNG
#include "pic_if.h"
#endif
#ifdef DEBUG
#define dprintf(fmt, args...) do { printf("%s(): ", __func__); \
printf(fmt,##args); } while (0)
@ -64,10 +72,10 @@ __FBSDID("$FreeBSD$");
static struct resource_spec bcm_gpio_res_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE },
{ SYS_RES_IRQ, 1, RF_ACTIVE },
{ SYS_RES_IRQ, 2, RF_ACTIVE },
{ SYS_RES_IRQ, 3, RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE }, /* bank 0 interrupt */
{ SYS_RES_IRQ, 1, RF_ACTIVE }, /* bank 1 interrupt */
{ SYS_RES_IRQ, 2, RF_ACTIVE }, /* bank 1 interrupt (mirrored) */
{ SYS_RES_IRQ, 3, RF_ACTIVE }, /* bank 0-1 interrupt (united) */
{ -1, 0, 0 }
};
@ -76,6 +84,15 @@ struct bcm_gpio_sysctl {
uint32_t pin;
};
#ifdef ARM_INTRNG
struct bcm_gpio_irqsrc {
struct intr_irqsrc bgi_isrc;
uint32_t bgi_irq;
uint32_t bgi_reg;
uint32_t bgi_mask;
};
#endif
struct bcm_gpio_softc {
device_t sc_dev;
device_t sc_busdev;
@ -88,10 +105,16 @@ struct bcm_gpio_softc {
int sc_ro_npins;
int sc_ro_pins[BCM_GPIO_PINS];
struct gpio_pin sc_gpio_pins[BCM_GPIO_PINS];
#ifndef ARM_INTRNG
struct intr_event * sc_events[BCM_GPIO_PINS];
#endif
struct bcm_gpio_sysctl sc_sysctl[BCM_GPIO_PINS];
#ifdef ARM_INTRNG
struct bcm_gpio_irqsrc sc_isrcs[BCM_GPIO_PINS];
#else
enum intr_trigger sc_irq_trigger[BCM_GPIO_PINS];
enum intr_polarity sc_irq_polarity[BCM_GPIO_PINS];
#endif
};
enum bcm_gpio_pud {
@ -130,6 +153,13 @@ enum bcm_gpio_pud {
static struct bcm_gpio_softc *bcm_gpio_sc = NULL;
#ifdef ARM_INTRNG
static int bcm_gpio_intr_bank0(void *arg);
static int bcm_gpio_intr_bank1(void *arg);
static int bcm_gpio_pic_attach(struct bcm_gpio_softc *sc);
static int bcm_gpio_pic_detach(struct bcm_gpio_softc *sc);
#endif
static int
bcm_gpio_pin_is_ro(struct bcm_gpio_softc *sc, int pin)
{
@ -661,6 +691,7 @@ bcm_gpio_get_reserved_pins(struct bcm_gpio_softc *sc)
return (0);
}
#ifndef ARM_INTRNG
static int
bcm_gpio_intr(void *arg)
{
@ -694,6 +725,7 @@ bcm_gpio_intr(void *arg)
return (FILTER_HANDLED);
}
#endif
static int
bcm_gpio_probe(device_t dev)
@ -709,6 +741,49 @@ bcm_gpio_probe(device_t dev)
return (BUS_PROBE_DEFAULT);
}
#ifdef ARM_INTRNG
static int
bcm_gpio_intr_attach(device_t dev)
{
struct bcm_gpio_softc *sc;
/*
* Only first two interrupt lines are used. Third line is
* mirrored second line and forth line is common for all banks.
*/
sc = device_get_softc(dev);
if (sc->sc_res[1] == NULL || sc->sc_res[2] == NULL)
return (-1);
if (bcm_gpio_pic_attach(sc) != 0) {
device_printf(dev, "unable to attach PIC\n");
return (-1);
}
if (bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_MISC | INTR_MPSAFE,
bcm_gpio_intr_bank0, NULL, sc, &sc->sc_intrhand[0]) != 0)
return (-1);
if (bus_setup_intr(dev, sc->sc_res[2], INTR_TYPE_MISC | INTR_MPSAFE,
bcm_gpio_intr_bank1, NULL, sc, &sc->sc_intrhand[1]) != 0)
return (-1);
return (0);
}
static void
bcm_gpio_intr_detach(device_t dev)
{
struct bcm_gpio_softc *sc;
sc = device_get_softc(dev);
if (sc->sc_intrhand[0] != NULL)
bus_teardown_intr(dev, sc->sc_res[1], sc->sc_intrhand[0]);
if (sc->sc_intrhand[1] != NULL)
bus_teardown_intr(dev, sc->sc_res[2], sc->sc_intrhand[1]);
bcm_gpio_pic_detach(sc);
}
#else
static int
bcm_gpio_intr_attach(device_t dev)
{
@ -741,6 +816,7 @@ bcm_gpio_intr_detach(device_t dev)
}
}
}
#endif
static int
bcm_gpio_attach(device_t dev)
@ -786,9 +862,11 @@ bcm_gpio_attach(device_t dev)
sc->sc_gpio_pins[i].gp_pin = j;
sc->sc_gpio_pins[i].gp_caps = BCM_GPIO_DEFAULT_CAPS;
sc->sc_gpio_pins[i].gp_flags = bcm_gpio_func_flag(func);
#ifndef ARM_INTRNG
/* The default is active-low interrupts. */
sc->sc_irq_trigger[i] = INTR_TRIGGER_LEVEL;
sc->sc_irq_polarity[i] = INTR_POLARITY_LOW;
#endif
i++;
}
sc->sc_gpio_npins = i;
@ -814,6 +892,289 @@ bcm_gpio_detach(device_t dev)
return (EBUSY);
}
#ifdef ARM_INTRNG
static inline void
bcm_gpio_isrc_eoi(struct bcm_gpio_softc *sc, struct bcm_gpio_irqsrc *bgi)
{
uint32_t bank;
/* Write 1 to clear. */
bank = BCM_GPIO_BANK(bgi->bgi_irq);
BCM_GPIO_WRITE(sc, BCM_GPIO_GPEDS(bank), bgi->bgi_mask);
}
static inline bool
bcm_gpio_isrc_is_level(struct bcm_gpio_irqsrc *bgi)
{
uint32_t bank;
bank = BCM_GPIO_BANK(bgi->bgi_irq);
return (bgi->bgi_reg == BCM_GPIO_GPHEN(bank) ||
bgi->bgi_reg == BCM_GPIO_GPLEN(bank));
}
static inline void
bcm_gpio_isrc_mask(struct bcm_gpio_softc *sc, struct bcm_gpio_irqsrc *bgi)
{
BCM_GPIO_LOCK(sc);
BCM_GPIO_CLEAR_BITS(sc, bgi->bgi_reg, bgi->bgi_mask);
BCM_GPIO_UNLOCK(bcm_gpio_sc);
}
static inline void
bcm_gpio_isrc_unmask(struct bcm_gpio_softc *sc, struct bcm_gpio_irqsrc *bgi)
{
BCM_GPIO_LOCK(sc);
BCM_GPIO_SET_BITS(sc, bgi->bgi_reg, bgi->bgi_mask);
BCM_GPIO_UNLOCK(sc);
}
static int
bcm_gpio_intr_internal(struct bcm_gpio_softc *sc, uint32_t bank)
{
u_int irq;
struct bcm_gpio_irqsrc *bgi;
uint32_t reg;
/* Do not care of spurious interrupt on GPIO. */
reg = BCM_GPIO_READ(sc, BCM_GPIO_GPEDS(bank));
while (reg != 0) {
irq = BCM_GPIO_PINS_PER_BANK * bank + ffs(reg) - 1;
bgi = sc->sc_isrcs + irq;
if (!bcm_gpio_isrc_is_level(bgi))
bcm_gpio_isrc_eoi(sc, bgi);
if (intr_isrc_dispatch(&bgi->bgi_isrc,
curthread->td_intr_frame) != 0) {
bcm_gpio_isrc_mask(sc, bgi);
if (bcm_gpio_isrc_is_level(bgi))
bcm_gpio_isrc_eoi(sc, bgi);
device_printf(sc->sc_dev, "Stray irq %u disabled\n",
irq);
}
reg &= ~bgi->bgi_mask;
}
return (FILTER_HANDLED);
}
static int
bcm_gpio_intr_bank0(void *arg)
{
return (bcm_gpio_intr_internal(arg, 0));
}
static int
bcm_gpio_intr_bank1(void *arg)
{
return (bcm_gpio_intr_internal(arg, 1));
}
static int
bcm_gpio_pic_attach(struct bcm_gpio_softc *sc)
{
int error;
uint32_t irq;
const char *name;
name = device_get_nameunit(sc->sc_dev);
for (irq = 0; irq < BCM_GPIO_PINS; irq++) {
sc->sc_isrcs[irq].bgi_irq = irq;
sc->sc_isrcs[irq].bgi_mask = BCM_GPIO_MASK(irq);
sc->sc_isrcs[irq].bgi_reg = 0;
error = intr_isrc_register(&sc->sc_isrcs[irq].bgi_isrc,
sc->sc_dev, 0, "%s,%u", name, irq);
if (error != 0)
return (error); /* XXX deregister ISRCs */
}
return (intr_pic_register(sc->sc_dev,
OF_xref_from_node(ofw_bus_get_node(sc->sc_dev))));
}
static int
bcm_gpio_pic_detach(struct bcm_gpio_softc *sc)
{
/*
* There has not been established any procedure yet
* how to detach PIC from living system correctly.
*/
device_printf(sc->sc_dev, "%s: not implemented yet\n", __func__);
return (EBUSY);
}
static void
bcm_gpio_pic_disable_intr(device_t dev, struct intr_irqsrc *isrc)
{
struct bcm_gpio_softc *sc = device_get_softc(dev);
struct bcm_gpio_irqsrc *bgi = (struct bcm_gpio_irqsrc *)isrc;
bcm_gpio_isrc_mask(sc, bgi);
}
static void
bcm_gpio_pic_enable_intr(device_t dev, struct intr_irqsrc *isrc)
{
struct bcm_gpio_softc *sc = device_get_softc(dev);
struct bcm_gpio_irqsrc *bgi = (struct bcm_gpio_irqsrc *)isrc;
arm_irq_memory_barrier(bgi->bgi_irq);
bcm_gpio_isrc_unmask(sc, bgi);
}
static int
bcm_gpio_pic_map_fdt(struct bcm_gpio_softc *sc, u_int ncells, pcell_t *cells,
u_int *irqp, uint32_t *regp)
{
u_int irq;
uint32_t reg, bank;
/*
* The first cell is the interrupt number.
* The second cell is used to specify flags:
* bits[3:0] trigger type and level flags:
* 1 = low-to-high edge triggered.
* 2 = high-to-low edge triggered.
* 4 = active high level-sensitive.
* 8 = active low level-sensitive.
*/
if (ncells != 2)
return (EINVAL);
irq = cells[0];
if (irq >= BCM_GPIO_PINS || bcm_gpio_pin_is_ro(sc, irq))
return (EINVAL);
/*
* All interrupt types could be set for an interrupt at one moment.
* At least, the combination of 'low-to-high' and 'high-to-low' edge
* triggered interrupt types can make a sense. However, no combo is
* supported now.
*/
bank = BCM_GPIO_BANK(irq);
if (cells[1] == 1)
reg = BCM_GPIO_GPREN(bank);
else if (cells[1] == 2)
reg = BCM_GPIO_GPFEN(bank);
else if (cells[1] == 4)
reg = BCM_GPIO_GPHEN(bank);
else if (cells[1] == 8)
reg = BCM_GPIO_GPLEN(bank);
else
return (EINVAL);
*irqp = irq;
if (regp != NULL)
*regp = reg;
return (0);
}
static int
bcm_gpio_pic_map_intr(device_t dev, struct intr_map_data *data,
struct intr_irqsrc **isrcp)
{
int error;
u_int irq;
struct bcm_gpio_softc *sc;
if (data->type != INTR_MAP_DATA_FDT)
return (ENOTSUP);
sc = device_get_softc(dev);
error = bcm_gpio_pic_map_fdt(sc, data->fdt.ncells, data->fdt.cells,
&irq, NULL);
if (error == 0)
*isrcp = &sc->sc_isrcs[irq].bgi_isrc;
return (error);
}
static void
bcm_gpio_pic_post_filter(device_t dev, struct intr_irqsrc *isrc)
{
struct bcm_gpio_softc *sc = device_get_softc(dev);
struct bcm_gpio_irqsrc *bgi = (struct bcm_gpio_irqsrc *)isrc;
if (bcm_gpio_isrc_is_level(bgi))
bcm_gpio_isrc_eoi(sc, bgi);
}
static void
bcm_gpio_pic_post_ithread(device_t dev, struct intr_irqsrc *isrc)
{
bcm_gpio_pic_enable_intr(dev, isrc);
}
static void
bcm_gpio_pic_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
{
struct bcm_gpio_softc *sc = device_get_softc(dev);
struct bcm_gpio_irqsrc *bgi = (struct bcm_gpio_irqsrc *)isrc;
bcm_gpio_isrc_mask(sc, bgi);
if (bcm_gpio_isrc_is_level(bgi))
bcm_gpio_isrc_eoi(sc, bgi);
}
static int
bcm_gpio_pic_setup_intr(device_t dev, struct intr_irqsrc *isrc,
struct resource *res, struct intr_map_data *data)
{
u_int irq;
uint32_t bank, reg;
struct bcm_gpio_softc *sc;
struct bcm_gpio_irqsrc *bgi;
if (data == NULL || data->type != INTR_MAP_DATA_FDT)
return (ENOTSUP);
sc = device_get_softc(dev);
bgi = (struct bcm_gpio_irqsrc *)isrc;
/* Get and check config for an interrupt. */
if (bcm_gpio_pic_map_fdt(sc, data->fdt.ncells, data->fdt.cells, &irq,
&reg) != 0 || bgi->bgi_irq != irq)
return (EINVAL);
/*
* If this is a setup for another handler,
* only check that its configuration match.
*/
if (isrc->isrc_handlers != 0)
return (bgi->bgi_reg == reg ? 0 : EINVAL);
bank = BCM_GPIO_BANK(irq);
BCM_GPIO_LOCK(sc);
BCM_GPIO_CLEAR_BITS(sc, BCM_GPIO_GPREN(bank), bgi->bgi_mask);
BCM_GPIO_CLEAR_BITS(sc, BCM_GPIO_GPFEN(bank), bgi->bgi_mask);
BCM_GPIO_CLEAR_BITS(sc, BCM_GPIO_GPHEN(bank), bgi->bgi_mask);
BCM_GPIO_CLEAR_BITS(sc, BCM_GPIO_GPLEN(bank), bgi->bgi_mask);
bgi->bgi_reg = reg;
BCM_GPIO_SET_BITS(sc, reg, bgi->bgi_mask);
BCM_GPIO_UNLOCK(sc);
return (0);
}
static int
bcm_gpio_pic_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
struct resource *res, struct intr_map_data *data)
{
struct bcm_gpio_softc *sc = device_get_softc(dev);
struct bcm_gpio_irqsrc *bgi = (struct bcm_gpio_irqsrc *)isrc;
if (isrc->isrc_handlers == 0) {
BCM_GPIO_LOCK(sc);
BCM_GPIO_CLEAR_BITS(sc, bgi->bgi_reg, bgi->bgi_mask);
bgi->bgi_reg = 0;
BCM_GPIO_UNLOCK(sc);
}
return (0);
}
#else
static uint32_t
bcm_gpio_intr_reg(struct bcm_gpio_softc *sc, unsigned int irq, uint32_t bank)
{
@ -984,6 +1345,7 @@ bcm_gpio_teardown_intr(device_t dev, device_t child, struct resource *ires,
return (err);
}
#endif
static phandle_t
bcm_gpio_get_node(device_t bus, device_t dev)
@ -1010,13 +1372,24 @@ static device_method_t bcm_gpio_methods[] = {
DEVMETHOD(gpio_pin_set, bcm_gpio_pin_set),
DEVMETHOD(gpio_pin_toggle, bcm_gpio_pin_toggle),
#ifdef ARM_INTRNG
/* Interrupt controller interface */
DEVMETHOD(pic_disable_intr, bcm_gpio_pic_disable_intr),
DEVMETHOD(pic_enable_intr, bcm_gpio_pic_enable_intr),
DEVMETHOD(pic_map_intr, bcm_gpio_pic_map_intr),
DEVMETHOD(pic_post_filter, bcm_gpio_pic_post_filter),
DEVMETHOD(pic_post_ithread, bcm_gpio_pic_post_ithread),
DEVMETHOD(pic_pre_ithread, bcm_gpio_pic_pre_ithread),
DEVMETHOD(pic_setup_intr, bcm_gpio_pic_setup_intr),
DEVMETHOD(pic_teardown_intr, bcm_gpio_pic_teardown_intr),
#else
/* Bus interface */
DEVMETHOD(bus_activate_resource, bcm_gpio_activate_resource),
DEVMETHOD(bus_deactivate_resource, bcm_gpio_deactivate_resource),
DEVMETHOD(bus_config_intr, bcm_gpio_config_intr),
DEVMETHOD(bus_setup_intr, bcm_gpio_setup_intr),
DEVMETHOD(bus_teardown_intr, bcm_gpio_teardown_intr),
#endif
/* ofw_bus interface */
DEVMETHOD(ofw_bus_get_node, bcm_gpio_get_node),

View File

@ -30,12 +30,15 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_platform.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/module.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <machine/bus.h>
#include <machine/intr.h>
@ -49,6 +52,10 @@ __FBSDID("$FreeBSD$");
#include <arm/broadcom/bcm2835/bcm2836.h>
#endif
#ifdef ARM_INTRNG
#include "pic_if.h"
#endif
#define INTC_PENDING_BASIC 0x00
#define INTC_PENDING_BANK1 0x04
#define INTC_PENDING_BANK2 0x08
@ -60,17 +67,55 @@ __FBSDID("$FreeBSD$");
#define INTC_DISABLE_BANK2 0x20
#define INTC_DISABLE_BASIC 0x24
#define INTC_PENDING_BASIC_ARM 0x0000FF
#define INTC_PENDING_BASIC_GPU1_PEND 0x000100
#define INTC_PENDING_BASIC_GPU2_PEND 0x000200
#define INTC_PENDING_BASIC_GPU1_7 0x000400
#define INTC_PENDING_BASIC_GPU1_9 0x000800
#define INTC_PENDING_BASIC_GPU1_10 0x001000
#define INTC_PENDING_BASIC_GPU1_18 0x002000
#define INTC_PENDING_BASIC_GPU1_19 0x004000
#define INTC_PENDING_BASIC_GPU2_21 0x008000
#define INTC_PENDING_BASIC_GPU2_22 0x010000
#define INTC_PENDING_BASIC_GPU2_23 0x020000
#define INTC_PENDING_BASIC_GPU2_24 0x040000
#define INTC_PENDING_BASIC_GPU2_25 0x080000
#define INTC_PENDING_BASIC_GPU2_30 0x100000
#define INTC_PENDING_BASIC_MASK 0x1FFFFF
#define INTC_PENDING_BASIC_GPU1_MASK (INTC_PENDING_BASIC_GPU1_7 | \
INTC_PENDING_BASIC_GPU1_9 | \
INTC_PENDING_BASIC_GPU1_10 | \
INTC_PENDING_BASIC_GPU1_18 | \
INTC_PENDING_BASIC_GPU1_19)
#define INTC_PENDING_BASIC_GPU2_MASK (INTC_PENDING_BASIC_GPU2_21 | \
INTC_PENDING_BASIC_GPU2_22 | \
INTC_PENDING_BASIC_GPU2_23 | \
INTC_PENDING_BASIC_GPU2_24 | \
INTC_PENDING_BASIC_GPU2_25 | \
INTC_PENDING_BASIC_GPU2_30)
#define INTC_PENDING_BANK1_MASK (~((1 << 7) | (1 << 9) | (1 << 10) | \
(1 << 18) | (1 << 19)))
#define INTC_PENDING_BANK2_MASK (~((1 << 21) | (1 << 22) | (1 << 23) | \
(1 << 24) | (1 << 25) | (1 << 30)))
#define BANK1_START 8
#define BANK1_END (BANK1_START + 32 - 1)
#define BANK2_START (BANK1_START + 32)
#define BANK2_END (BANK2_START + 32 - 1)
#ifndef ARM_INTRNG
#define BANK3_START (BANK2_START + 32)
#define BANK3_END (BANK3_START + 32 - 1)
#endif
#define IS_IRQ_BASIC(n) (((n) >= 0) && ((n) < BANK1_START))
#define IS_IRQ_BANK1(n) (((n) >= BANK1_START) && ((n) <= BANK1_END))
#define IS_IRQ_BANK2(n) (((n) >= BANK2_START) && ((n) <= BANK2_END))
#ifndef ARM_INTRNG
#define ID_IRQ_BCM2836(n) (((n) >= BANK3_START) && ((n) <= BANK3_END))
#endif
#define IRQ_BANK1(n) ((n) - BANK1_START)
#define IRQ_BANK2(n) ((n) - BANK2_START)
@ -80,11 +125,28 @@ __FBSDID("$FreeBSD$");
#define dprintf(fmt, args...)
#endif
#ifdef ARM_INTRNG
#define BCM_INTC_NIRQS 72 /* 8 + 32 + 32 */
struct bcm_intc_irqsrc {
struct intr_irqsrc bii_isrc;
u_int bii_irq;
uint16_t bii_disable_reg;
uint16_t bii_enable_reg;
uint32_t bii_mask;
};
#endif
struct bcm_intc_softc {
device_t sc_dev;
struct resource * intc_res;
bus_space_tag_t intc_bst;
bus_space_handle_t intc_bsh;
#ifdef ARM_INTRNG
struct resource * intc_irq_res;
void * intc_irq_hdl;
struct bcm_intc_irqsrc intc_isrcs[BCM_INTC_NIRQS];
#endif
};
static struct bcm_intc_softc *bcm_intc_sc = NULL;
@ -94,6 +156,192 @@ static struct bcm_intc_softc *bcm_intc_sc = NULL;
#define intc_write_4(_sc, reg, val) \
bus_space_write_4((_sc)->intc_bst, (_sc)->intc_bsh, (reg), (val))
#ifdef ARM_INTRNG
static inline void
bcm_intc_isrc_mask(struct bcm_intc_softc *sc, struct bcm_intc_irqsrc *bii)
{
intc_write_4(sc, bii->bii_disable_reg, bii->bii_mask);
}
static inline void
bcm_intc_isrc_unmask(struct bcm_intc_softc *sc, struct bcm_intc_irqsrc *bii)
{
intc_write_4(sc, bii->bii_enable_reg, bii->bii_mask);
}
static inline int
bcm2835_intc_active_intr(struct bcm_intc_softc *sc)
{
uint32_t pending, pending_gpu;
pending = intc_read_4(sc, INTC_PENDING_BASIC) & INTC_PENDING_BASIC_MASK;
if (pending == 0)
return (-1);
if (pending & INTC_PENDING_BASIC_ARM)
return (ffs(pending) - 1);
if (pending & INTC_PENDING_BASIC_GPU1_MASK) {
if (pending & INTC_PENDING_BASIC_GPU1_7)
return (BANK1_START + 7);
if (pending & INTC_PENDING_BASIC_GPU1_9)
return (BANK1_START + 9);
if (pending & INTC_PENDING_BASIC_GPU1_10)
return (BANK1_START + 10);
if (pending & INTC_PENDING_BASIC_GPU1_18)
return (BANK1_START + 18);
if (pending & INTC_PENDING_BASIC_GPU1_19)
return (BANK1_START + 19);
}
if (pending & INTC_PENDING_BASIC_GPU2_MASK) {
if (pending & INTC_PENDING_BASIC_GPU2_21)
return (BANK2_START + 21);
if (pending & INTC_PENDING_BASIC_GPU2_22)
return (BANK2_START + 22);
if (pending & INTC_PENDING_BASIC_GPU2_23)
return (BANK2_START + 23);
if (pending & INTC_PENDING_BASIC_GPU2_24)
return (BANK2_START + 24);
if (pending & INTC_PENDING_BASIC_GPU2_25)
return (BANK2_START + 25);
if (pending & INTC_PENDING_BASIC_GPU2_30)
return (BANK2_START + 30);
}
if (pending & INTC_PENDING_BASIC_GPU1_PEND) {
pending_gpu = intc_read_4(sc, INTC_PENDING_BANK1);
pending_gpu &= INTC_PENDING_BANK1_MASK;
if (pending_gpu != 0)
return (BANK1_START + ffs(pending_gpu) - 1);
}
if (pending & INTC_PENDING_BASIC_GPU2_PEND) {
pending_gpu = intc_read_4(sc, INTC_PENDING_BANK2);
pending_gpu &= INTC_PENDING_BANK2_MASK;
if (pending_gpu != 0)
return (BANK2_START + ffs(pending_gpu) - 1);
}
return (-1); /* It shouldn't end here, but it's hardware. */
}
static int
bcm2835_intc_intr(void *arg)
{
int irq, num;
struct bcm_intc_softc *sc = arg;
for (num = 0; ; num++) {
irq = bcm2835_intc_active_intr(sc);
if (irq == -1)
break;
if (intr_isrc_dispatch(&sc->intc_isrcs[irq].bii_isrc,
curthread->td_intr_frame) != 0) {
bcm_intc_isrc_mask(sc, &sc->intc_isrcs[irq]);
device_printf(sc->sc_dev, "Stray irq %u disabled\n",
irq);
}
arm_irq_memory_barrier(0); /* XXX */
}
if (num == 0)
device_printf(sc->sc_dev, "Spurious interrupt detected\n");
return (FILTER_HANDLED);
}
static void
bcm_intc_enable_intr(device_t dev, struct intr_irqsrc *isrc)
{
struct bcm_intc_irqsrc *bii = (struct bcm_intc_irqsrc *)isrc;
arm_irq_memory_barrier(bii->bii_irq);
bcm_intc_isrc_unmask(device_get_softc(dev), bii);
}
static void
bcm_intc_disable_intr(device_t dev, struct intr_irqsrc *isrc)
{
bcm_intc_isrc_mask(device_get_softc(dev),
(struct bcm_intc_irqsrc *)isrc);
}
static int
bcm_intc_map_intr(device_t dev, struct intr_map_data *data,
struct intr_irqsrc **isrcp)
{
u_int irq;
struct bcm_intc_softc *sc;
if (data->type != INTR_MAP_DATA_FDT)
return (ENOTSUP);
if (data->fdt.ncells == 1)
irq = data->fdt.cells[0];
else if (data->fdt.ncells == 2)
irq = data->fdt.cells[0] * 32 + data->fdt.cells[1];
else
return (EINVAL);
if (irq >= BCM_INTC_NIRQS)
return (EINVAL);
sc = device_get_softc(dev);
*isrcp = &sc->intc_isrcs[irq].bii_isrc;
return (0);
}
static void
bcm_intc_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
{
bcm_intc_disable_intr(dev, isrc);
}
static void
bcm_intc_post_ithread(device_t dev, struct intr_irqsrc *isrc)
{
bcm_intc_enable_intr(dev, isrc);
}
static void
bcm_intc_post_filter(device_t dev, struct intr_irqsrc *isrc)
{
}
static int
bcm_intc_pic_register(struct bcm_intc_softc *sc, intptr_t xref)
{
struct bcm_intc_irqsrc *bii;
int error;
uint32_t irq;
const char *name;
name = device_get_nameunit(sc->sc_dev);
for (irq = 0; irq < BCM_INTC_NIRQS; irq++) {
bii = &sc->intc_isrcs[irq];
bii->bii_irq = irq;
if (IS_IRQ_BASIC(irq)) {
bii->bii_disable_reg = INTC_DISABLE_BASIC;
bii->bii_enable_reg = INTC_ENABLE_BASIC;
bii->bii_mask = 1 << irq;
} else if (IS_IRQ_BANK1(irq)) {
bii->bii_disable_reg = INTC_DISABLE_BANK1;
bii->bii_enable_reg = INTC_ENABLE_BANK1;
bii->bii_mask = 1 << IRQ_BANK1(irq);
} else if (IS_IRQ_BANK2(irq)) {
bii->bii_disable_reg = INTC_DISABLE_BANK2;
bii->bii_enable_reg = INTC_ENABLE_BANK2;
bii->bii_mask = 1 << IRQ_BANK2(irq);
} else
return (ENXIO);
error = intr_isrc_register(&bii->bii_isrc, sc->sc_dev, 0,
"%s,%u", name, irq);
if (error != 0)
return (error);
}
return (intr_pic_register(sc->sc_dev, xref));
}
#endif
static int
bcm_intc_probe(device_t dev)
{
@ -112,7 +360,9 @@ bcm_intc_attach(device_t dev)
{
struct bcm_intc_softc *sc = device_get_softc(dev);
int rid = 0;
#ifdef ARM_INTRNG
intptr_t xref;
#endif
sc->sc_dev = dev;
if (bcm_intc_sc)
@ -124,6 +374,32 @@ bcm_intc_attach(device_t dev)
return (ENXIO);
}
#ifdef ARM_INTRNG
xref = OF_xref_from_node(ofw_bus_get_node(dev));
if (bcm_intc_pic_register(sc, xref) != 0) {
bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->intc_res);
device_printf(dev, "could not register PIC\n");
return (ENXIO);
}
rid = 0;
sc->intc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE);
if (sc->intc_irq_res == NULL) {
if (intr_pic_claim_root(dev, xref, bcm2835_intc_intr, sc, 0) != 0) {
/* XXX clean up */
device_printf(dev, "could not set PIC as a root\n");
return (ENXIO);
}
} else {
if (bus_setup_intr(dev, sc->intc_irq_res, INTR_TYPE_CLK,
bcm2835_intc_intr, NULL, sc, &sc->intc_irq_hdl)) {
/* XXX clean up */
device_printf(dev, "could not setup irq handler\n");
return (ENXIO);
}
}
#endif
sc->intc_bst = rman_get_bustag(sc->intc_res);
sc->intc_bsh = rman_get_bushandle(sc->intc_res);
@ -135,6 +411,16 @@ bcm_intc_attach(device_t dev)
static device_method_t bcm_intc_methods[] = {
DEVMETHOD(device_probe, bcm_intc_probe),
DEVMETHOD(device_attach, bcm_intc_attach),
#ifdef ARM_INTRNG
DEVMETHOD(pic_disable_intr, bcm_intc_disable_intr),
DEVMETHOD(pic_enable_intr, bcm_intc_enable_intr),
DEVMETHOD(pic_map_intr, bcm_intc_map_intr),
DEVMETHOD(pic_post_filter, bcm_intc_post_filter),
DEVMETHOD(pic_post_ithread, bcm_intc_post_ithread),
DEVMETHOD(pic_pre_ithread, bcm_intc_pre_ithread),
#endif
{ 0, 0 }
};
@ -148,6 +434,7 @@ static devclass_t bcm_intc_devclass;
DRIVER_MODULE(intc, simplebus, bcm_intc_driver, bcm_intc_devclass, 0, 0);
#ifndef ARM_INTRNG
int
arm_get_next_irq(int last_irq)
{
@ -247,3 +534,4 @@ intr_pic_init_secondary(void)
{
}
#endif
#endif

View File

@ -1,5 +1,6 @@
/*
* Copyright 2015 Andrew Turner.
* Copyright 2016 Svatopluk Kraus
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -28,19 +29,33 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_platform.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/cpuset.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/proc.h>
#include <sys/rman.h>
#ifdef SMP
#include <sys/smp.h>
#endif
#include <machine/bus.h>
#include <machine/intr.h>
#include <machine/resource.h>
#ifdef SMP
#include <machine/smp.h>
#endif
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/ofw/ofw_bus.h>
#ifdef ARM_INTRNG
#include "pic_if.h"
#else
#include <arm/broadcom/bcm2835/bcm2836.h>
#define ARM_LOCAL_BASE 0x40000000
@ -55,7 +70,703 @@ __FBSDID("$FreeBSD$");
#define INT_PENDING_MASK 0x011f
#define MAILBOX0_IRQ 4
#define MAILBOX0_IRQEN (1 << 0)
#endif
#ifdef ARM_INTRNG
#define BCM_LINTC_CONTROL_REG 0x00
#define BCM_LINTC_PRESCALER_REG 0x08
#define BCM_LINTC_GPU_ROUTING_REG 0x0c
#define BCM_LINTC_PMU_ROUTING_SET_REG 0x10
#define BCM_LINTC_PMU_ROUTING_CLR_REG 0x14
#define BCM_LINTC_TIMER_CFG_REG(n) (0x40 + (n) * 4)
#define BCM_LINTC_MBOX_CFG_REG(n) (0x50 + (n) * 4)
#define BCM_LINTC_PENDING_REG(n) (0x60 + (n) * 4)
#define BCM_LINTC_MBOX0_SET_REG(n) (0x80 + (n) * 16)
#define BCM_LINTC_MBOX1_SET_REG(n) (0x84 + (n) * 16)
#define BCM_LINTC_MBOX2_SET_REG(n) (0x88 + (n) * 16)
#define BCM_LINTC_MBOX3_SET_REG(n) (0x8C + (n) * 16)
#define BCM_LINTC_MBOX0_CLR_REG(n) (0xC0 + (n) * 16)
#define BCM_LINTC_MBOX1_CLR_REG(n) (0xC4 + (n) * 16)
#define BCM_LINTC_MBOX2_CLR_REG(n) (0xC8 + (n) * 16)
#define BCM_LINTC_MBOX3_CLR_REG(n) (0xCC + (n) * 16)
/* Prescaler Register */
#define BCM_LINTC_PSR_19_2 0x80000000 /* 19.2 MHz */
/* GPU Interrupt Routing Register */
#define BCM_LINTC_GIRR_IRQ_CORE(n) (n)
#define BCM_LINTC_GIRR_FIQ_CORE(n) ((n) << 2)
/* PMU Interrupt Routing Register */
#define BCM_LINTC_PIRR_IRQ_EN_CORE(n) (1 << (n))
#define BCM_LINTC_PIRR_FIQ_EN_CORE(n) (1 << ((n) + 4))
/* Timer Config Register */
#define BCM_LINTC_TCR_IRQ_EN_TIMER(n) (1 << (n))
#define BCM_LINTC_TCR_FIQ_EN_TIMER(n) (1 << ((n) + 4))
/* MBOX Config Register */
#define BCM_LINTC_MCR_IRQ_EN_MBOX(n) (1 << (n))
#define BCM_LINTC_MCR_FIQ_EN_MBOX(n) (1 << ((n) + 4))
#define BCM_LINTC_CNTPSIRQ_IRQ 0
#define BCM_LINTC_CNTPNSIRQ_IRQ 1
#define BCM_LINTC_CNTHPIRQ_IRQ 2
#define BCM_LINTC_CNTVIRQ_IRQ 3
#define BCM_LINTC_MBOX0_IRQ 4
#define BCM_LINTC_MBOX1_IRQ 5
#define BCM_LINTC_MBOX2_IRQ 6
#define BCM_LINTC_MBOX3_IRQ 7
#define BCM_LINTC_GPU_IRQ 8
#define BCM_LINTC_PMU_IRQ 9
#define BCM_LINTC_AXI_IRQ 10
#define BCM_LINTC_LTIMER_IRQ 11
#define BCM_LINTC_NIRQS 12
#define BCM_LINTC_TIMER0_IRQ BCM_LINTC_CNTPSIRQ_IRQ
#define BCM_LINTC_TIMER1_IRQ BCM_LINTC_CNTPNSIRQ_IRQ
#define BCM_LINTC_TIMER2_IRQ BCM_LINTC_CNTHPIRQ_IRQ
#define BCM_LINTC_TIMER3_IRQ BCM_LINTC_CNTVIRQ_IRQ
#define BCM_LINTC_TIMER0_IRQ_MASK (1 << BCM_LINTC_TIMER0_IRQ)
#define BCM_LINTC_TIMER1_IRQ_MASK (1 << BCM_LINTC_TIMER1_IRQ)
#define BCM_LINTC_TIMER2_IRQ_MASK (1 << BCM_LINTC_TIMER2_IRQ)
#define BCM_LINTC_TIMER3_IRQ_MASK (1 << BCM_LINTC_TIMER3_IRQ)
#define BCM_LINTC_MBOX0_IRQ_MASK (1 << BCM_LINTC_MBOX0_IRQ)
#define BCM_LINTC_GPU_IRQ_MASK (1 << BCM_LINTC_GPU_IRQ)
#define BCM_LINTC_PMU_IRQ_MASK (1 << BCM_LINTC_PMU_IRQ)
#define BCM_LINTC_UP_PENDING_MASK \
(BCM_LINTC_TIMER0_IRQ_MASK | \
BCM_LINTC_TIMER1_IRQ_MASK | \
BCM_LINTC_TIMER2_IRQ_MASK | \
BCM_LINTC_TIMER3_IRQ_MASK | \
BCM_LINTC_GPU_IRQ_MASK | \
BCM_LINTC_PMU_IRQ_MASK)
#define BCM_LINTC_SMP_PENDING_MASK \
(BCM_LINTC_UP_PENDING_MASK | \
BCM_LINTC_MBOX0_IRQ_MASK)
#ifdef SMP
#define BCM_LINTC_PENDING_MASK BCM_LINTC_SMP_PENDING_MASK
#else
#define BCM_LINTC_PENDING_MASK BCM_LINTC_UP_PENDING_MASK
#endif
struct bcm_lintc_irqsrc {
struct intr_irqsrc bli_isrc;
u_int bli_irq;
union {
u_int bli_mask; /* for timers */
u_int bli_value; /* for GPU */
};
};
struct bcm_lintc_softc {
device_t bls_dev;
struct mtx bls_mtx;
struct resource * bls_mem;
bus_space_tag_t bls_bst;
bus_space_handle_t bls_bsh;
struct bcm_lintc_irqsrc bls_isrcs[BCM_LINTC_NIRQS];
};
static struct bcm_lintc_softc *bcm_lintc_sc;
#ifdef SMP
#define BCM_LINTC_NIPIS 32 /* only mailbox 0 is used for IPI */
CTASSERT(INTR_IPI_COUNT <= BCM_LINTC_NIPIS);
#endif
#define BCM_LINTC_LOCK(sc) mtx_lock_spin(&(sc)->bls_mtx)
#define BCM_LINTC_UNLOCK(sc) mtx_unlock_spin(&(sc)->bls_mtx)
#define BCM_LINTC_LOCK_INIT(sc) mtx_init(&(sc)->bls_mtx, \
device_get_nameunit((sc)->bls_dev), "bmc_local_intc", MTX_SPIN)
#define BCM_LINTC_LOCK_DESTROY(sc) mtx_destroy(&(sc)->bls_mtx)
#define bcm_lintc_read_4(sc, reg) \
bus_space_read_4((sc)->bls_bst, (sc)->bls_bsh, (reg))
#define bcm_lintc_write_4(sc, reg, val) \
bus_space_write_4((sc)->bls_bst, (sc)->bls_bsh, (reg), (val))
static inline void
bcm_lintc_rwreg_clr(struct bcm_lintc_softc *sc, uint32_t reg,
uint32_t mask)
{
bcm_lintc_write_4(sc, reg, bcm_lintc_read_4(sc, reg) & ~mask);
}
static inline void
bcm_lintc_rwreg_set(struct bcm_lintc_softc *sc, uint32_t reg,
uint32_t mask)
{
bcm_lintc_write_4(sc, reg, bcm_lintc_read_4(sc, reg) | mask);
}
static void
bcm_lintc_timer_mask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli)
{
cpuset_t *cpus;
uint32_t cpu;
cpus = &bli->bli_isrc.isrc_cpu;
BCM_LINTC_LOCK(sc);
for (cpu = 0; cpu < 4; cpu++)
if (CPU_ISSET(cpu, cpus))
bcm_lintc_rwreg_clr(sc, BCM_LINTC_TIMER_CFG_REG(cpu),
bli->bli_mask);
BCM_LINTC_UNLOCK(sc);
}
static void
bcm_lintc_timer_unmask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli)
{
cpuset_t *cpus;
uint32_t cpu;
cpus = &bli->bli_isrc.isrc_cpu;
BCM_LINTC_LOCK(sc);
for (cpu = 0; cpu < 4; cpu++)
if (CPU_ISSET(cpu, cpus))
bcm_lintc_rwreg_set(sc, BCM_LINTC_TIMER_CFG_REG(cpu),
bli->bli_mask);
BCM_LINTC_UNLOCK(sc);
}
static inline void
bcm_lintc_gpu_mask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli)
{
/* It's accessed just and only by one core. */
bcm_lintc_write_4(sc, BCM_LINTC_GPU_ROUTING_REG, 0);
}
static inline void
bcm_lintc_gpu_unmask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli)
{
/* It's accessed just and only by one core. */
bcm_lintc_write_4(sc, BCM_LINTC_GPU_ROUTING_REG, bli->bli_value);
}
static inline void
bcm_lintc_pmu_mask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli)
{
cpuset_t *cpus;
uint32_t cpu, mask;
mask = 0;
cpus = &bli->bli_isrc.isrc_cpu;
BCM_LINTC_LOCK(sc);
for (cpu = 0; cpu < 4; cpu++)
if (CPU_ISSET(cpu, cpus))
mask |= BCM_LINTC_PIRR_IRQ_EN_CORE(cpu);
/* Write-clear register. */
bcm_lintc_write_4(sc, BCM_LINTC_PMU_ROUTING_CLR_REG, mask);
BCM_LINTC_UNLOCK(sc);
}
static inline void
bcm_lintc_pmu_unmask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli)
{
cpuset_t *cpus;
uint32_t cpu, mask;
mask = 0;
cpus = &bli->bli_isrc.isrc_cpu;
BCM_LINTC_LOCK(sc);
for (cpu = 0; cpu < 4; cpu++)
if (CPU_ISSET(cpu, cpus))
mask |= BCM_LINTC_PIRR_IRQ_EN_CORE(cpu);
/* Write-set register. */
bcm_lintc_write_4(sc, BCM_LINTC_PMU_ROUTING_SET_REG, mask);
BCM_LINTC_UNLOCK(sc);
}
static void
bcm_lintc_mask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli)
{
switch (bli->bli_irq) {
case BCM_LINTC_TIMER0_IRQ:
case BCM_LINTC_TIMER1_IRQ:
case BCM_LINTC_TIMER2_IRQ:
case BCM_LINTC_TIMER3_IRQ:
bcm_lintc_timer_mask(sc, bli);
return;
case BCM_LINTC_MBOX0_IRQ:
case BCM_LINTC_MBOX1_IRQ:
case BCM_LINTC_MBOX2_IRQ:
case BCM_LINTC_MBOX3_IRQ:
return;
case BCM_LINTC_GPU_IRQ:
bcm_lintc_gpu_mask(sc, bli);
return;
case BCM_LINTC_PMU_IRQ:
bcm_lintc_pmu_mask(sc, bli);
return;
default:
panic("%s: not implemented for irq %u", __func__, bli->bli_irq);
}
}
static void
bcm_lintc_unmask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli)
{
switch (bli->bli_irq) {
case BCM_LINTC_TIMER0_IRQ:
case BCM_LINTC_TIMER1_IRQ:
case BCM_LINTC_TIMER2_IRQ:
case BCM_LINTC_TIMER3_IRQ:
bcm_lintc_timer_unmask(sc, bli);
return;
case BCM_LINTC_MBOX0_IRQ:
case BCM_LINTC_MBOX1_IRQ:
case BCM_LINTC_MBOX2_IRQ:
case BCM_LINTC_MBOX3_IRQ:
return;
case BCM_LINTC_GPU_IRQ:
bcm_lintc_gpu_unmask(sc, bli);
return;
case BCM_LINTC_PMU_IRQ:
bcm_lintc_pmu_unmask(sc, bli);
return;
default:
panic("%s: not implemented for irq %u", __func__, bli->bli_irq);
}
}
#ifdef SMP
static inline void
bcm_lintc_ipi_write(struct bcm_lintc_softc *sc, cpuset_t cpus, u_int ipi)
{
u_int cpu;
uint32_t mask;
mask = 1 << ipi;
for (cpu = 0; cpu < mp_ncpus; cpu++)
if (CPU_ISSET(cpu, &cpus))
bcm_lintc_write_4(sc, BCM_LINTC_MBOX0_SET_REG(cpu),
mask);
}
static inline void
bcm_lintc_ipi_dispatch(struct bcm_lintc_softc *sc, u_int cpu,
struct trapframe *tf)
{
u_int ipi;
uint32_t mask;
mask = bcm_lintc_read_4(sc, BCM_LINTC_MBOX0_CLR_REG(cpu));
if (mask == 0) {
device_printf(sc->bls_dev, "Spurious ipi detected\n");
return;
}
for (ipi = 0; mask != 0; mask >>= 1, ipi++) {
if ((mask & 0x01) == 0)
continue;
/*
* Clear an IPI before dispatching to not miss anyone
* and make sure that it's observed by everybody.
*/
bcm_lintc_write_4(sc, BCM_LINTC_MBOX0_CLR_REG(cpu), 1 << ipi);
dsb();
intr_ipi_dispatch(ipi, tf);
}
}
#endif
static inline void
bcm_lintc_irq_dispatch(struct bcm_lintc_softc *sc, u_int irq,
struct trapframe *tf)
{
struct bcm_lintc_irqsrc *bli;
bli = &sc->bls_isrcs[irq];
if (intr_isrc_dispatch(&bli->bli_isrc, tf) != 0)
device_printf(sc->bls_dev, "Stray irq %u detected\n", irq);
}
static int
bcm_lintc_intr(void *arg)
{
struct bcm_lintc_softc *sc;
u_int cpu;
uint32_t num, reg;
struct trapframe *tf;
sc = arg;
cpu = PCPU_GET(cpuid);
tf = curthread->td_intr_frame;
for (num = 0; ; num++) {
reg = bcm_lintc_read_4(sc, BCM_LINTC_PENDING_REG(cpu));
if ((reg & BCM_LINTC_PENDING_MASK) == 0)
break;
#ifdef SMP
if (reg & BCM_LINTC_MBOX0_IRQ_MASK)
bcm_lintc_ipi_dispatch(sc, cpu, tf);
#endif
if (reg & BCM_LINTC_TIMER0_IRQ_MASK)
bcm_lintc_irq_dispatch(sc, BCM_LINTC_TIMER0_IRQ, tf);
if (reg & BCM_LINTC_TIMER1_IRQ_MASK)
bcm_lintc_irq_dispatch(sc, BCM_LINTC_TIMER1_IRQ, tf);
if (reg & BCM_LINTC_TIMER2_IRQ_MASK)
bcm_lintc_irq_dispatch(sc, BCM_LINTC_TIMER2_IRQ, tf);
if (reg & BCM_LINTC_TIMER3_IRQ_MASK)
bcm_lintc_irq_dispatch(sc, BCM_LINTC_TIMER3_IRQ, tf);
if (reg & BCM_LINTC_GPU_IRQ_MASK)
bcm_lintc_irq_dispatch(sc, BCM_LINTC_GPU_IRQ, tf);
if (reg & BCM_LINTC_PMU_IRQ_MASK)
bcm_lintc_irq_dispatch(sc, BCM_LINTC_PMU_IRQ, tf);
arm_irq_memory_barrier(0); /* XXX */
}
reg &= ~BCM_LINTC_PENDING_MASK;
if (reg != 0)
device_printf(sc->bls_dev, "Unknown interrupt(s) %x\n", reg);
else if (num == 0)
device_printf(sc->bls_dev, "Spurious interrupt detected\n");
return (FILTER_HANDLED);
}
static void
bcm_lintc_disable_intr(device_t dev, struct intr_irqsrc *isrc)
{
bcm_lintc_mask(device_get_softc(dev), (struct bcm_lintc_irqsrc *)isrc);
}
static void
bcm_lintc_enable_intr(device_t dev, struct intr_irqsrc *isrc)
{
struct bcm_lintc_irqsrc *bli = (struct bcm_lintc_irqsrc *)isrc;
arm_irq_memory_barrier(bli->bli_irq);
bcm_lintc_unmask(device_get_softc(dev), bli);
}
static int
bcm_lintc_map_intr(device_t dev, struct intr_map_data *data,
struct intr_irqsrc **isrcp)
{
struct bcm_lintc_softc *sc;
if (data->type != INTR_MAP_DATA_FDT)
return (ENOTSUP);
if (data->fdt.ncells != 1 || data->fdt.cells[0] >= BCM_LINTC_NIRQS)
return (EINVAL);
sc = device_get_softc(dev);
*isrcp = &sc->bls_isrcs[data->fdt.cells[0]].bli_isrc;
return (0);
}
static void
bcm_lintc_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
{
struct bcm_lintc_irqsrc *bli = (struct bcm_lintc_irqsrc *)isrc;
if (bli->bli_irq == BCM_LINTC_GPU_IRQ)
bcm_lintc_gpu_mask(device_get_softc(dev), bli);
else {
/*
* Handler for PPI interrupt does not make sense much unless
* there is one bound ithread for each core for it. Thus the
* interrupt can be masked on current core only while ithread
* bounded to this core ensures unmasking on the same core.
*/
panic ("%s: handlers are not supported", __func__);
}
}
static void
bcm_lintc_post_ithread(device_t dev, struct intr_irqsrc *isrc)
{
struct bcm_lintc_irqsrc *bli = (struct bcm_lintc_irqsrc *)isrc;
if (bli->bli_irq == BCM_LINTC_GPU_IRQ)
bcm_lintc_gpu_unmask(device_get_softc(dev), bli);
else {
/* See comment in bcm_lintc_pre_ithread(). */
panic ("%s: handlers are not supported", __func__);
}
}
static void
bcm_lintc_post_filter(device_t dev, struct intr_irqsrc *isrc)
{
}
static int
bcm_lintc_setup_intr(device_t dev, struct intr_irqsrc *isrc,
struct resource *res, struct intr_map_data *data)
{
struct bcm_lintc_softc *sc;
if (isrc->isrc_handlers == 0 && isrc->isrc_flags & INTR_ISRCF_PPI) {
sc = device_get_softc(dev);
BCM_LINTC_LOCK(sc);
CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
BCM_LINTC_UNLOCK(sc);
}
return (0);
}
#ifdef SMP
static bool
bcm_lint_init_on_ap(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli,
u_int cpu)
{
struct intr_irqsrc *isrc;
isrc = &bli->bli_isrc;
KASSERT(isrc->isrc_flags & INTR_ISRCF_PPI,
("%s: irq %d is not PPI", __func__, bli->bli_irq));
if (isrc->isrc_handlers == 0)
return (false);
if (isrc->isrc_flags & INTR_ISRCF_BOUND)
return (CPU_ISSET(cpu, &isrc->isrc_cpu));
CPU_SET(cpu, &isrc->isrc_cpu);
return (true);
}
static void
bcm_lintc_init_rwreg_on_ap(struct bcm_lintc_softc *sc, u_int cpu, u_int irq,
uint32_t reg, uint32_t mask)
{
if (bcm_lint_init_on_ap(sc, &sc->bls_isrcs[irq], cpu))
bcm_lintc_rwreg_set(sc, reg, mask);
}
static void
bcm_lintc_init_pmu_on_ap(struct bcm_lintc_softc *sc, u_int cpu)
{
if (bcm_lint_init_on_ap(sc, &sc->bls_isrcs[BCM_LINTC_PMU_IRQ], cpu)) {
/* Write-set register. */
bcm_lintc_write_4(sc, BCM_LINTC_PMU_ROUTING_SET_REG,
BCM_LINTC_PIRR_IRQ_EN_CORE(cpu));
}
}
static void
bcm_lintc_init_secondary(device_t dev)
{
u_int cpu;
struct bcm_lintc_softc *sc;
cpu = PCPU_GET(cpuid);
sc = device_get_softc(dev);
BCM_LINTC_LOCK(sc);
bcm_lintc_init_rwreg_on_ap(sc, cpu, BCM_LINTC_TIMER0_IRQ,
BCM_LINTC_TIMER_CFG_REG(cpu), BCM_LINTC_TCR_IRQ_EN_TIMER(0));
bcm_lintc_init_rwreg_on_ap(sc, cpu, BCM_LINTC_TIMER1_IRQ,
BCM_LINTC_TIMER_CFG_REG(cpu), BCM_LINTC_TCR_IRQ_EN_TIMER(1));
bcm_lintc_init_rwreg_on_ap(sc, cpu, BCM_LINTC_TIMER2_IRQ,
BCM_LINTC_TIMER_CFG_REG(cpu), BCM_LINTC_TCR_IRQ_EN_TIMER(2));
bcm_lintc_init_rwreg_on_ap(sc, cpu, BCM_LINTC_TIMER3_IRQ,
BCM_LINTC_TIMER_CFG_REG(cpu), BCM_LINTC_TCR_IRQ_EN_TIMER(3));
bcm_lintc_init_pmu_on_ap(sc, cpu);
BCM_LINTC_UNLOCK(sc);
}
static void
bcm_lintc_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
u_int ipi)
{
struct bcm_lintc_softc *sc = device_get_softc(dev);
KASSERT(isrc == &sc->bls_isrcs[BCM_LINTC_MBOX0_IRQ].bli_isrc,
("%s: bad ISRC %p argument", __func__, isrc));
bcm_lintc_ipi_write(sc, cpus, ipi);
}
static int
bcm_lintc_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
{
struct bcm_lintc_softc *sc = device_get_softc(dev);
KASSERT(ipi < BCM_LINTC_NIPIS, ("%s: too high ipi %u", __func__, ipi));
*isrcp = &sc->bls_isrcs[BCM_LINTC_MBOX0_IRQ].bli_isrc;
return (0);
}
#endif
static int
bcm_lintc_pic_attach(struct bcm_lintc_softc *sc)
{
struct bcm_lintc_irqsrc *bisrcs;
int error;
u_int flags;
uint32_t irq;
const char *name;
intptr_t xref;
bisrcs = sc->bls_isrcs;
name = device_get_nameunit(sc->bls_dev);
for (irq = 0; irq < BCM_LINTC_NIRQS; irq++) {
bisrcs[irq].bli_irq = irq;
switch (irq) {
case BCM_LINTC_TIMER0_IRQ:
bisrcs[irq].bli_mask = BCM_LINTC_TCR_IRQ_EN_TIMER(0);
flags = INTR_ISRCF_PPI;
break;
case BCM_LINTC_TIMER1_IRQ:
bisrcs[irq].bli_mask = BCM_LINTC_TCR_IRQ_EN_TIMER(1);
flags = INTR_ISRCF_PPI;
break;
case BCM_LINTC_TIMER2_IRQ:
bisrcs[irq].bli_mask = BCM_LINTC_TCR_IRQ_EN_TIMER(2);
flags = INTR_ISRCF_PPI;
break;
case BCM_LINTC_TIMER3_IRQ:
bisrcs[irq].bli_mask = BCM_LINTC_TCR_IRQ_EN_TIMER(3);
flags = INTR_ISRCF_PPI;
break;
case BCM_LINTC_MBOX0_IRQ:
case BCM_LINTC_MBOX1_IRQ:
case BCM_LINTC_MBOX2_IRQ:
case BCM_LINTC_MBOX3_IRQ:
bisrcs[irq].bli_value = 0; /* not used */
flags = INTR_ISRCF_IPI;
break;
case BCM_LINTC_GPU_IRQ:
bisrcs[irq].bli_value = BCM_LINTC_GIRR_IRQ_CORE(0);
flags = 0;
break;
case BCM_LINTC_PMU_IRQ:
bisrcs[irq].bli_value = 0; /* not used */
flags = INTR_ISRCF_PPI;
break;
default:
bisrcs[irq].bli_value = 0; /* not used */
flags = 0;
break;
}
error = intr_isrc_register(&bisrcs[irq].bli_isrc, sc->bls_dev,
flags, "%s,%u", name, irq);
if (error != 0)
return (error);
}
xref = OF_xref_from_node(ofw_bus_get_node(sc->bls_dev));
error = intr_pic_register(sc->bls_dev, xref);
if (error != 0)
return (error);
return (intr_pic_claim_root(sc->bls_dev, xref, bcm_lintc_intr, sc, 0));
}
static int
bcm_lintc_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "brcm,bcm2836-l1-intc"))
return (ENXIO);
device_set_desc(dev, "BCM2836 Interrupt Controller");
return (BUS_PROBE_DEFAULT);
}
static int
bcm_lintc_attach(device_t dev)
{
struct bcm_lintc_softc *sc;
int cpu, rid;
sc = device_get_softc(dev);
sc->bls_dev = dev;
if (bcm_lintc_sc != NULL)
return (ENXIO);
rid = 0;
sc->bls_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->bls_mem == NULL) {
device_printf(dev, "could not allocate memory resource\n");
return (ENXIO);
}
sc->bls_bst = rman_get_bustag(sc->bls_mem);
sc->bls_bsh = rman_get_bushandle(sc->bls_mem);
bcm_lintc_write_4(sc, BCM_LINTC_CONTROL_REG, 0);
bcm_lintc_write_4(sc, BCM_LINTC_PRESCALER_REG, BCM_LINTC_PSR_19_2);
/* Disable all timers on all cores. */
for (cpu = 0; cpu < 4; cpu++)
bcm_lintc_write_4(sc, BCM_LINTC_TIMER_CFG_REG(cpu), 0);
#ifdef SMP
/* Enable mailbox 0 on all cores used for IPI. */
for (cpu = 0; cpu < 4; cpu++)
bcm_lintc_write_4(sc, BCM_LINTC_MBOX_CFG_REG(cpu),
BCM_LINTC_MCR_IRQ_EN_MBOX(0));
#endif
if (bcm_lintc_pic_attach(sc) != 0) {
device_printf(dev, "could not attach PIC\n");
return (ENXIO);
}
BCM_LINTC_LOCK_INIT(sc);
bcm_lintc_sc = sc;
return (0);
}
static device_method_t bcm_lintc_methods[] = {
DEVMETHOD(device_probe, bcm_lintc_probe),
DEVMETHOD(device_attach, bcm_lintc_attach),
DEVMETHOD(pic_disable_intr, bcm_lintc_disable_intr),
DEVMETHOD(pic_enable_intr, bcm_lintc_enable_intr),
DEVMETHOD(pic_map_intr, bcm_lintc_map_intr),
DEVMETHOD(pic_post_filter, bcm_lintc_post_filter),
DEVMETHOD(pic_post_ithread, bcm_lintc_post_ithread),
DEVMETHOD(pic_pre_ithread, bcm_lintc_pre_ithread),
DEVMETHOD(pic_setup_intr, bcm_lintc_setup_intr),
#ifdef SMP
DEVMETHOD(pic_init_secondary, bcm_lintc_init_secondary),
DEVMETHOD(pic_ipi_send, bcm_lintc_ipi_send),
DEVMETHOD(pic_ipi_setup, bcm_lintc_ipi_setup),
#endif
DEVMETHOD_END
};
static driver_t bcm_lintc_driver = {
"local_intc",
bcm_lintc_methods,
sizeof(struct bcm_lintc_softc),
};
static devclass_t bcm_lintc_devclass;
EARLY_DRIVER_MODULE(local_intc, simplebus, bcm_lintc_driver, bcm_lintc_devclass,
0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
#else
/*
* A driver for features of the bcm2836.
*/
@ -214,3 +925,4 @@ static driver_t bcm2836_driver = {
EARLY_DRIVER_MODULE(bcm2836, nexus, bcm2836_driver, bcm2836_devclass, 0, 0,
BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
#endif

View File

@ -30,10 +30,11 @@
#ifndef _BCM2815_BCM2836_H
#define _BCM2815_BCM2836_H
#ifndef ARM_INTRNG
#define BCM2836_GPU_IRQ 8
int bcm2836_get_next_irq(int);
void bcm2836_mask_irq(uintptr_t);
void bcm2836_unmask_irq(uintptr_t);
#endif
#endif

View File

@ -139,6 +139,7 @@ platform_mp_start_ap(void)
}
}
#ifndef ARM_INTRNG
void
pic_ipi_send(cpuset_t cpus, u_int ipi)
{
@ -176,3 +177,4 @@ void
pic_ipi_clear(int ipi)
{
}
#endif

View File

@ -24,6 +24,8 @@ include "std.armv6"
include "../broadcom/bcm2835/std.rpi"
include "../broadcom/bcm2835/std.bcm2835"
options ARM_INTRNG
options HZ=100
options SCHED_4BSD # 4BSD scheduler
options PLATFORM

View File

@ -24,6 +24,8 @@ include "std.armv6"
include "../broadcom/bcm2835/std.rpi"
include "../broadcom/bcm2835/std.bcm2836"
options ARM_INTRNG
options HZ=100
options SCHED_ULE # ULE scheduler
options SMP # Enable multiple cores

View File

@ -823,12 +823,10 @@ pllx_set_freq(struct pll_sc *sc, uint64_t fin, uint64_t *fout, int flags)
return (0);
}
/* Set bypass. */
/* PLLX doesn't have bypass, disable it first. */
RD4(sc, sc->base_reg, &reg);
reg |= PLL_BASE_BYPASS;
reg &= ~PLL_BASE_ENABLE;
WR4(sc, sc->base_reg, reg);
RD4(sc, sc->base_reg, &reg);
DELAY(100);
/* Set PLL. */
RD4(sc, sc->base_reg, &reg);
@ -840,16 +838,16 @@ pllx_set_freq(struct pll_sc *sc, uint64_t fin, uint64_t *fout, int flags)
RD4(sc, sc->base_reg, &reg);
DELAY(100);
/* Enable lock detection. */
RD4(sc, sc->misc_reg, &reg);
reg |= sc->lock_enable;
WR4(sc, sc->misc_reg, reg);
/* Enable PLL. */
RD4(sc, sc->base_reg, &reg);
reg |= PLL_BASE_ENABLE;
WR4(sc, sc->base_reg, reg);
/* Enable lock detection */
RD4(sc, sc->misc_reg, &reg);
reg |= sc->lock_enable;
WR4(sc, sc->misc_reg, reg);
rv = wait_for_lock(sc);
if (rv != 0) {
/* Disable PLL */
@ -860,10 +858,6 @@ pllx_set_freq(struct pll_sc *sc, uint64_t fin, uint64_t *fout, int flags)
}
RD4(sc, sc->misc_reg, &reg);
/* Clear bypass. */
RD4(sc, sc->base_reg, &reg);
reg &= ~PLL_BASE_BYPASS;
WR4(sc, sc->base_reg, reg);
*fout = ((fin / m) * n) / p;
return (0);
}

View File

@ -205,8 +205,7 @@ super_mux_set_mux(struct clknode *clk, int idx)
(state != SUPER_MUX_STATE_IDLE)) {
panic("Unexpected super mux state: %u", state);
}
shift = state * SUPER_MUX_MUX_WIDTH;
shift = (state - 1) * SUPER_MUX_MUX_WIDTH;
sc->mux = idx;
if (sc->flags & SMF_HAVE_DIVIDER_2) {
if (idx == sc->src_div2) {
@ -222,6 +221,7 @@ super_mux_set_mux(struct clknode *clk, int idx)
}
reg &= ~(((1 << SUPER_MUX_MUX_WIDTH) - 1) << shift);
reg |= idx << shift;
WR4(sc, sc->base_reg, reg);
RD4(sc, sc->base_reg, &dummy);
DEVICE_UNLOCK(sc);

View File

@ -335,12 +335,27 @@ set_cpu_freq(struct tegra124_cpufreq_softc *sc, uint64_t freq)
if (rv != 0)
return (rv);
}
rv = clk_set_freq(sc->clk_cpu_g, point->freq, CLK_SET_ROUND_DOWN);
/* Switch supermux to PLLP first */
rv = clk_set_parent_by_clk(sc->clk_cpu_g, sc->clk_pll_p);
if (rv != 0) {
device_printf(sc->dev, "Can't set parent to PLLP\n");
return (rv);
}
/* Set PLLX frequency */
rv = clk_set_freq(sc->clk_pll_x, point->freq, CLK_SET_ROUND_DOWN);
if (rv != 0) {
device_printf(sc->dev, "Can't set CPU clock frequency\n");
return (rv);
}
rv = clk_set_parent_by_clk(sc->clk_cpu_g, sc->clk_pll_x);
if (rv != 0) {
device_printf(sc->dev, "Can't set parent to PLLX\n");
return (rv);
}
if (sc->act_speed_point->uvolt > point->uvolt) {
/* set cpu voltage */
rv = regulator_set_voltage(sc->supply_vdd_cpu,

View File

@ -253,8 +253,8 @@ tegra_ehci_attach(device_t dev)
}
/* Setup interrupt handler. */
rv = bus_setup_intr(dev, sc->ehci_irq_res, INTR_TYPE_BIO, NULL,
(driver_intr_t *)ehci_interrupt, esc, &esc->sc_intr_hdl);
rv = bus_setup_intr(dev, sc->ehci_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
NULL, (driver_intr_t *)ehci_interrupt, esc, &esc->sc_intr_hdl);
if (rv != 0) {
device_printf(dev, "Could not setup IRQ\n");
goto out;

View File

@ -64,7 +64,7 @@ __FBSDID("$FreeBSD$");
#define INTC_ISR_CLEAR(x) (0x94 + ((x) * 0x20))
#define INTC_SIR_SPURIOUS_MASK 0xffffff80
#define INTS_SIR_ACTIVE_MASK 0x7f
#define INTC_SIR_ACTIVE_MASK 0x7f
#define INTC_NIRQS 128
@ -143,7 +143,7 @@ ti_aintc_intr(void *arg)
}
/* Only level-sensitive interrupts detection is supported. */
irq &= INTS_SIR_ACTIVE_MASK;
irq &= INTC_SIR_ACTIVE_MASK;
if (intr_isrc_dispatch(&sc->aintc_isrcs[irq].tai_isrc,
curthread->td_intr_frame) != 0) {
ti_aintc_irq_mask(sc, irq);

View File

@ -144,7 +144,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <1>;
#interrupt-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pins_reserved>;

View File

@ -137,7 +137,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <1>;
#interrupt-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pins_reserved>;

View File

@ -968,7 +968,7 @@ fdc_worker(struct fdc_data *fdc)
fdc->bp = bioq_takefirst(&fdc->head);
if (fdc->bp == NULL)
msleep(&fdc->head, &fdc->fdc_mtx,
PRIBIO, "-", hz);
PRIBIO, "-", 0);
} while (fdc->bp == NULL &&
(fdc->flags & FDC_KTHREAD_EXIT) == 0);
mtx_unlock(&fdc->fdc_mtx);

View File

@ -60,10 +60,20 @@ struct ds1307_softc {
struct intr_config_hook enum_hook;
uint16_t sc_addr; /* DS1307 slave address. */
uint8_t sc_ctrl;
int sc_mcp7941x;
};
static void ds1307_start(void *);
#ifdef FDT
static const struct ofw_compat_data ds1307_compat_data[] = {
{"dallas,ds1307", (uintptr_t)"Maxim DS1307 RTC"},
{"maxim,ds1307", (uintptr_t)"Maxim DS1307 RTC"},
{"microchip,mcp7941x", (uintptr_t)"Microchip MCP7941x RTC"},
{ NULL, 0 }
};
#endif
static int
ds1307_read(device_t dev, uint16_t addr, uint8_t reg, uint8_t *data, size_t len)
{
@ -167,21 +177,25 @@ ds1307_set_24hrs_mode(struct ds1307_softc *sc)
static int
ds1307_sqwe_sysctl(SYSCTL_HANDLER_ARGS)
{
int sqwe, error, newv;
int sqwe, error, newv, sqwe_bit;
struct ds1307_softc *sc;
sc = (struct ds1307_softc *)arg1;
error = ds1307_ctrl_read(sc);
if (error != 0)
return (error);
sqwe = newv = (sc->sc_ctrl & DS1307_CTRL_SQWE) ? 1 : 0;
if (sc->sc_mcp7941x)
sqwe_bit = MCP7941X_CTRL_SQWE;
else
sqwe_bit = DS1307_CTRL_SQWE;
sqwe = newv = (sc->sc_ctrl & sqwe_bit) ? 1 : 0;
error = sysctl_handle_int(oidp, &newv, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
if (sqwe != newv) {
sc->sc_ctrl &= ~DS1307_CTRL_SQWE;
sc->sc_ctrl &= ~sqwe_bit;
if (newv)
sc->sc_ctrl |= DS1307_CTRL_SQWE;
sc->sc_ctrl |= sqwe_bit;
error = ds1307_ctrl_write(sc);
if (error != 0)
return (error);
@ -252,17 +266,25 @@ ds1307_sqw_out_sysctl(SYSCTL_HANDLER_ARGS)
static int
ds1307_probe(device_t dev)
{
#ifdef FDT
const struct ofw_compat_data *compat;
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "dallas,ds1307") &&
!ofw_bus_is_compatible(dev, "maxim,ds1307"))
compat = ofw_bus_search_compatible(dev, ds1307_compat_data);
if (compat == NULL)
return (ENXIO);
#endif
device_set_desc(dev, (const char *)compat->ocd_data);
return (BUS_PROBE_DEFAULT);
#else
device_set_desc(dev, "Maxim DS1307 RTC");
return (BUS_PROBE_DEFAULT);
#endif
}
static int
@ -277,6 +299,9 @@ ds1307_attach(device_t dev)
sc->enum_hook.ich_func = ds1307_start;
sc->enum_hook.ich_arg = dev;
if (ofw_bus_is_compatible(dev, "microchip,mcp7941x"))
sc->sc_mcp7941x = 1;
/*
* We have to wait until interrupts are enabled. Usually I2C read
* and write only works when the interrupts are available.

View File

@ -50,6 +50,7 @@
#define DS1307_YEAR_MASK 0xff
#define DS1307_CONTROL 0x07
#define DS1307_CTRL_OUT (1 << 7)
#define MCP7941X_CTRL_SQWE (1 << 6)
#define DS1307_CTRL_SQWE (1 << 4)
#define DS1307_CTRL_RS1 (1 << 1)
#define DS1307_CTRL_RS0 (1 << 0)

View File

@ -70,6 +70,9 @@ __FBSDID("$FreeBSD$");
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_ratectl.h>
#ifdef IEEE80211_SUPPORT_SUPERG
#include <net80211/ieee80211_superg.h>
#endif
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
@ -577,6 +580,8 @@ urtwn_attach(device_t self)
#endif
| IEEE80211_C_WPA /* 802.11i */
| IEEE80211_C_WME /* 802.11e */
| IEEE80211_C_SWAMSDUTX /* Do software A-MSDU TX */
| IEEE80211_C_FF /* Atheros fast-frames */
;
ic->ic_cryptocaps =
@ -588,7 +593,9 @@ urtwn_attach(device_t self)
if (urtwn_enable_11n) {
device_printf(self, "enabling 11n\n");
ic->ic_htcaps = IEEE80211_HTC_HT |
#if 0
IEEE80211_HTC_AMPDU |
#endif
IEEE80211_HTC_AMSDU |
IEEE80211_HTCAP_MAXAMSDU_3839 |
IEEE80211_HTCAP_SMPS_OFF;
@ -894,6 +901,15 @@ urtwn_report_intr(struct usb_xfer *xfer, struct urtwn_data *data)
buf = data->buf;
stat = (struct r92c_rx_stat *)buf;
/*
* For 88E chips we can tie the FF flushing here;
* this is where we do know exactly how deep the
* transmit queue is.
*
* But it won't work for R92 chips, so we can't
* take the easy way out.
*/
if (sc->chip & URTWN_CHIP_88E) {
int report_sel = MS(le32toh(stat->rxdw3), R88E_RXDW3_RPT);
@ -1101,7 +1117,7 @@ urtwn_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error)
data = STAILQ_FIRST(&sc->sc_rx_inactive);
if (data == NULL) {
KASSERT(m == NULL, ("mbuf isn't NULL"));
return;
goto finish;
}
STAILQ_REMOVE_HEAD(&sc->sc_rx_inactive, next);
STAILQ_INSERT_TAIL(&sc->sc_rx_active, data, next);
@ -1131,7 +1147,6 @@ urtwn_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error)
(void)ieee80211_input_all(ic, m, rssi - nf,
nf);
}
URTWN_LOCK(sc);
m = next;
}
@ -1150,6 +1165,20 @@ urtwn_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error)
}
break;
}
finish:
/* Finished receive; age anything left on the FF queue by a little bump */
/*
* XXX TODO: just make this a callout timer schedule so we can
* flush the FF staging queue if we're approaching idle.
*/
#ifdef IEEE80211_SUPPORT_SUPERG
URTWN_UNLOCK(sc);
ieee80211_ff_age_all(ic, 1);
URTWN_LOCK(sc);
#endif
/* Kick-start more transmit in case we stalled */
urtwn_start(sc);
}
static void
@ -1161,6 +1190,9 @@ urtwn_txeof(struct urtwn_softc *sc, struct urtwn_data *data, int status)
if (data->ni != NULL) /* not a beacon frame */
ieee80211_tx_complete(data->ni, data->m, status);
if (sc->sc_tx_n_active > 0)
sc->sc_tx_n_active--;
data->ni = NULL;
data->m = NULL;
@ -1269,6 +1301,9 @@ static void
urtwn_bulk_tx_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct urtwn_softc *sc = usbd_xfer_softc(xfer);
#ifdef IEEE80211_SUPPORT_SUPERG
struct ieee80211com *ic = &sc->sc_ic;
#endif
struct urtwn_data *data;
URTWN_ASSERT_LOCKED(sc);
@ -1287,12 +1322,14 @@ urtwn_bulk_tx_callback(struct usb_xfer *xfer, usb_error_t error)
if (data == NULL) {
URTWN_DPRINTF(sc, URTWN_DEBUG_XMIT,
"%s: empty pending queue\n", __func__);
sc->sc_tx_n_active = 0;
goto finish;
}
STAILQ_REMOVE_HEAD(&sc->sc_tx_pending, next);
STAILQ_INSERT_TAIL(&sc->sc_tx_active, data, next);
usbd_xfer_set_frame_data(xfer, 0, data->buf, data->buflen);
usbd_transfer_submit(xfer);
sc->sc_tx_n_active++;
break;
default:
data = STAILQ_FIRST(&sc->sc_tx_active);
@ -1307,6 +1344,35 @@ urtwn_bulk_tx_callback(struct usb_xfer *xfer, usb_error_t error)
break;
}
finish:
#ifdef IEEE80211_SUPPORT_SUPERG
/*
* If the TX active queue drops below a certain
* threshold, ensure we age fast-frames out so they're
* transmitted.
*/
if (sc->sc_tx_n_active <= 1) {
/* XXX ew - net80211 should defer this for us! */
/*
* Note: this sc_tx_n_active currently tracks
* the number of pending transmit submissions
* and not the actual depth of the TX frames
* pending to the hardware. That means that
* we're going to end up with some sub-optimal
* aggregation behaviour.
*/
/*
* XXX TODO: just make this a callout timer schedule so we can
* flush the FF staging queue if we're approaching idle.
*/
URTWN_UNLOCK(sc);
ieee80211_ff_flush(ic, WME_AC_VO);
ieee80211_ff_flush(ic, WME_AC_VI);
ieee80211_ff_flush(ic, WME_AC_BE);
ieee80211_ff_flush(ic, WME_AC_BK);
URTWN_LOCK(sc);
}
#endif
/* Kick-start more transmit */
urtwn_start(sc);
}
@ -3153,6 +3219,11 @@ urtwn_start(struct urtwn_softc *sc)
}
ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
m->m_pkthdr.rcvif = NULL;
URTWN_DPRINTF(sc, URTWN_DEBUG_XMIT, "%s: called; m=%p\n",
__func__,
m);
if (urtwn_tx_data(sc, ni, m, bf) != 0) {
if_inc_counter(ni->ni_vap->iv_ifp,
IFCOUNTER_OERRORS, 1);
@ -5326,6 +5397,10 @@ urtwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
struct urtwn_data *bf;
int error;
URTWN_DPRINTF(sc, URTWN_DEBUG_XMIT, "%s: called; m=%p\n",
__func__,
m);
/* prevent management frames from being sent if we're not ready */
URTWN_LOCK(sc);
if (!(sc->sc_flags & URTWN_RUNNING)) {

View File

@ -17,12 +17,14 @@
* $FreeBSD$
*/
#define URTWN_RX_LIST_COUNT 1
#define URTWN_RX_LIST_COUNT 64
#define URTWN_TX_LIST_COUNT 8
#define URTWN_HOST_CMD_RING_COUNT 32
#define URTWN_RXBUFSZ (16 * 1024)
#define URTWN_TXBUFSZ (sizeof(struct r92c_tx_desc) + IEEE80211_MAX_LEN)
#define URTWN_RXBUFSZ (8 * 1024)
//#define URTWN_TXBUFSZ (sizeof(struct r92c_tx_desc) + IEEE80211_MAX_LEN)
/* Leave enough space for an A-MSDU frame */
#define URTWN_TXBUFSZ (16 * 1024)
#define URTWN_RX_DESC_SIZE (sizeof(struct r92c_rx_stat))
#define URTWN_TX_DESC_SIZE (sizeof(struct r92c_tx_desc))
@ -195,6 +197,7 @@ struct urtwn_softc {
urtwn_datahead sc_rx_inactive;
struct urtwn_data sc_tx[URTWN_TX_LIST_COUNT];
urtwn_datahead sc_tx_active;
int sc_tx_n_active;
urtwn_datahead sc_tx_inactive;
urtwn_datahead sc_tx_pending;

View File

@ -294,7 +294,7 @@ fsl_ehci_attach(device_t self)
}
/* Setup interrupt handler */
err = bus_setup_intr(self, sc->sc_irq_res, INTR_TYPE_BIO,
err = bus_setup_intr(self, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
NULL, (driver_intr_t *)ehci_interrupt, sc, &sc->sc_intr_hdl);
if (err) {
device_printf(self, "Could not setup irq, %d\n", err);

View File

@ -261,8 +261,8 @@ imx_ehci_attach(device_t dev)
}
/* Setup interrupt handler. */
err = bus_setup_intr(dev, sc->ehci_irq_res, INTR_TYPE_BIO, NULL,
(driver_intr_t *)ehci_interrupt, esc, &esc->sc_intr_hdl);
err = bus_setup_intr(dev, sc->ehci_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
NULL, (driver_intr_t *)ehci_interrupt, esc, &esc->sc_intr_hdl);
if (err != 0) {
device_printf(dev, "Could not setup IRQ\n");
goto out;

View File

@ -33,7 +33,6 @@
* This file contains the driver for the Mentor Graphics Inventra USB
* 2.0 High Speed Dual-Role controller.
*
* NOTE: The current implementation only supports Device Side Mode!
*/
#ifdef USB_GLOBAL_INCLUDE_FILE

View File

@ -1178,7 +1178,7 @@ uftdi_cfg_open(struct ucom_softc *ucom)
* DPRINTF() so that you can see the point at which open gets called
* when debugging is enabled.
*/
DPRINTF("");
DPRINTF("\n");
}
static void
@ -1190,7 +1190,7 @@ uftdi_cfg_close(struct ucom_softc *ucom)
* DPRINTF() so that you can see the point at which close gets called
* when debugging is enabled.
*/
DPRINTF("");
DPRINTF("\n");
}
static void
@ -1202,6 +1202,8 @@ uftdi_write_callback(struct usb_xfer *xfer, usb_error_t error)
uint32_t buflen;
uint8_t buf[1];
DPRINTFN(3, "\n");
switch (USB_GET_STATE(xfer)) {
default: /* Error */
if (error != USB_ERR_CANCELLED) {
@ -1262,6 +1264,8 @@ uftdi_read_callback(struct usb_xfer *xfer, usb_error_t error)
int pktmax;
int offset;
DPRINTFN(3, "\n");
usbd_xfer_status(xfer, &buflen, NULL, NULL, NULL);
switch (USB_GET_STATE(xfer)) {
@ -1343,6 +1347,8 @@ uftdi_cfg_set_dtr(struct ucom_softc *ucom, uint8_t onoff)
uint16_t wValue;
struct usb_device_request req;
DPRINTFN(2, "DTR=%u\n", onoff);
wValue = onoff ? FTDI_SIO_SET_DTR_HIGH : FTDI_SIO_SET_DTR_LOW;
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
@ -1362,6 +1368,8 @@ uftdi_cfg_set_rts(struct ucom_softc *ucom, uint8_t onoff)
uint16_t wValue;
struct usb_device_request req;
DPRINTFN(2, "RTS=%u\n", onoff);
wValue = onoff ? FTDI_SIO_SET_RTS_HIGH : FTDI_SIO_SET_RTS_LOW;
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
@ -1381,6 +1389,8 @@ uftdi_cfg_set_break(struct ucom_softc *ucom, uint8_t onoff)
uint16_t wValue;
struct usb_device_request req;
DPRINTFN(2, "BREAK=%u\n", onoff);
if (onoff) {
sc->sc_last_lcr |= FTDI_SIO_SET_BREAK;
} else {
@ -1618,14 +1628,14 @@ uftdi_cfg_param(struct ucom_softc *ucom, struct termios *t)
struct uftdi_param_config cfg;
struct usb_device_request req;
DPRINTF("\n");
if (uftdi_set_parm_soft(ucom, t, &cfg)) {
/* should not happen */
return;
}
sc->sc_last_lcr = cfg.lcr;
DPRINTF("\n");
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = FTDI_SIO_SET_BAUD_RATE;
USETW(req.wValue, cfg.baud_lobits);
@ -1656,8 +1666,7 @@ uftdi_cfg_get_status(struct ucom_softc *ucom, uint8_t *lsr, uint8_t *msr)
{
struct uftdi_softc *sc = ucom->sc_parent;
DPRINTF("msr=0x%02x lsr=0x%02x\n",
sc->sc_msr, sc->sc_lsr);
DPRINTFN(3, "msr=0x%02x lsr=0x%02x\n", sc->sc_msr, sc->sc_lsr);
*msr = sc->sc_msr;
*lsr = sc->sc_lsr;
@ -1669,6 +1678,8 @@ uftdi_reset(struct ucom_softc *ucom, int reset_type)
struct uftdi_softc *sc = ucom->sc_parent;
usb_device_request_t req;
DPRINTFN(2, "\n");
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = FTDI_SIO_RESET;
@ -1686,6 +1697,8 @@ uftdi_set_bitmode(struct ucom_softc *ucom, uint8_t bitmode, uint8_t iomask)
usb_device_request_t req;
int rv;
DPRINTFN(2, "\n");
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = FTDI_SIO_SET_BITMODE;
@ -1710,6 +1723,8 @@ uftdi_get_bitmode(struct ucom_softc *ucom, uint8_t *bitmode, uint8_t *iomask)
struct uftdi_softc *sc = ucom->sc_parent;
usb_device_request_t req;
DPRINTFN(2, "\n");
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = FTDI_SIO_GET_BITMODE;
@ -1727,6 +1742,8 @@ uftdi_set_latency(struct ucom_softc *ucom, int latency)
struct uftdi_softc *sc = ucom->sc_parent;
usb_device_request_t req;
DPRINTFN(2, "\n");
if (latency < 0 || latency > 255)
return (USB_ERR_INVAL);
@ -1748,6 +1765,8 @@ uftdi_get_latency(struct ucom_softc *ucom, int *latency)
usb_error_t err;
uint8_t buf;
DPRINTFN(2, "\n");
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = FTDI_SIO_GET_LATENCY;
@ -1768,6 +1787,8 @@ uftdi_set_event_char(struct ucom_softc *ucom, int echar)
usb_device_request_t req;
uint8_t enable;
DPRINTFN(2, "\n");
enable = (echar == -1) ? 0 : 1;
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
@ -1787,6 +1808,8 @@ uftdi_set_error_char(struct ucom_softc *ucom, int echar)
usb_device_request_t req;
uint8_t enable;
DPRINTFN(2, "\n");
enable = (echar == -1) ? 0 : 1;
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
@ -1807,6 +1830,8 @@ uftdi_read_eeprom(struct ucom_softc *ucom, struct uftdi_eeio *eeio)
usb_error_t err;
uint16_t widx, wlength, woffset;
DPRINTFN(3, "\n");
/* Offset and length must both be evenly divisible by two. */
if ((eeio->offset | eeio->length) & 0x01)
return (EINVAL);
@ -1835,6 +1860,8 @@ uftdi_write_eeprom(struct ucom_softc *ucom, struct uftdi_eeio *eeio)
usb_error_t err;
uint16_t widx, wlength, woffset;
DPRINTFN(3, "\n");
/* Offset and length must both be evenly divisible by two. */
if ((eeio->offset | eeio->length) & 0x01)
return (EINVAL);
@ -1861,6 +1888,8 @@ uftdi_erase_eeprom(struct ucom_softc *ucom, int confirmation)
usb_device_request_t req;
usb_error_t err;
DPRINTFN(2, "\n");
/* Small effort to prevent accidental erasure. */
if (confirmation != UFTDI_CONFIRM_ERASE)
return (EINVAL);
@ -1883,8 +1912,6 @@ uftdi_ioctl(struct ucom_softc *ucom, uint32_t cmd, caddr_t data,
int err;
struct uftdi_bitmode * mode;
DPRINTF("portno: %d cmd: %#x\n", ucom->sc_portno, cmd);
switch (cmd) {
case UFTDIIOC_RESET_IO:
case UFTDIIOC_RESET_RX:

View File

@ -521,7 +521,9 @@ rsu_attach(device_t self)
/* Enable basic HT */
ic->ic_htcaps = IEEE80211_HTC_HT |
#if 0
IEEE80211_HTC_AMPDU |
#endif
IEEE80211_HTC_AMSDU |
IEEE80211_HTCAP_MAXAMSDU_3839 |
IEEE80211_HTCAP_SMPS_OFF;

View File

@ -260,7 +260,7 @@ xctrl_suspend()
#ifdef SMP
/* Send an IPI_BITMAP in case there are pending bitmap IPIs. */
lapic_ipi_vectored(IPI_BITMAP_VECTOR, APIC_IPI_DEST_ALL);
if (smp_started && !CPU_EMPTY(&cpu_suspend_map)) {
if (!CPU_EMPTY(&cpu_suspend_map)) {
/*
* Now that event channels have been initialized,
* resume CPUs.

View File

@ -1197,3 +1197,4 @@ static moduledata_t linux_elf_mod = {
};
DECLARE_MODULE_TIED(linuxelf, linux_elf_mod, SI_SUB_EXEC, SI_ORDER_ANY);
FEATURE(linux, "Linux 32bit support");

View File

@ -93,6 +93,10 @@ SYSCTL_UINT(_kern_racct, OID_AUTO, pcpu_threshold, CTLFLAG_RW, &pcpu_threshold,
static struct mtx racct_lock;
MTX_SYSINIT(racct_lock, &racct_lock, "racct lock", MTX_DEF);
#define RACCT_LOCK() mtx_lock(&racct_lock)
#define RACCT_UNLOCK() mtx_unlock(&racct_lock)
#define RACCT_LOCK_ASSERT() mtx_assert(&racct_lock, MA_OWNED)
static uma_zone_t racct_zone;
static void racct_sub_racct(struct racct *dest, const struct racct *src);
@ -391,7 +395,7 @@ racct_add_racct(struct racct *dest, const struct racct *src)
int i;
ASSERT_RACCT_ENABLED();
mtx_assert(&racct_lock, MA_OWNED);
RACCT_LOCK_ASSERT();
/*
* Update resource usage in dest.
@ -413,7 +417,7 @@ racct_sub_racct(struct racct *dest, const struct racct *src)
int i;
ASSERT_RACCT_ENABLED();
mtx_assert(&racct_lock, MA_OWNED);
RACCT_LOCK_ASSERT();
/*
* Update resource usage in dest.
@ -466,7 +470,7 @@ racct_destroy_locked(struct racct **racctp)
SDT_PROBE1(racct, , racct, destroy, racctp);
mtx_assert(&racct_lock, MA_OWNED);
RACCT_LOCK_ASSERT();
KASSERT(racctp != NULL, ("NULL racctp"));
KASSERT(*racctp != NULL, ("NULL racct"));
@ -493,9 +497,9 @@ racct_destroy(struct racct **racct)
if (!racct_enable)
return;
mtx_lock(&racct_lock);
RACCT_LOCK();
racct_destroy_locked(racct);
mtx_unlock(&racct_lock);
RACCT_UNLOCK();
}
/*
@ -509,7 +513,7 @@ racct_adjust_resource(struct racct *racct, int resource,
{
ASSERT_RACCT_ENABLED();
mtx_assert(&racct_lock, MA_OWNED);
RACCT_LOCK_ASSERT();
KASSERT(racct != NULL, ("NULL racct"));
racct->r_resources[resource] += amount;
@ -574,9 +578,9 @@ racct_add(struct proc *p, int resource, uint64_t amount)
SDT_PROBE3(racct, , rusage, add, p, resource, amount);
mtx_lock(&racct_lock);
RACCT_LOCK();
error = racct_add_locked(p, resource, amount, 0);
mtx_unlock(&racct_lock);
RACCT_UNLOCK();
return (error);
}
@ -593,9 +597,9 @@ racct_add_force(struct proc *p, int resource, uint64_t amount)
SDT_PROBE3(racct, , rusage, add__force, p, resource, amount);
mtx_lock(&racct_lock);
RACCT_LOCK();
racct_add_locked(p, resource, amount, 1);
mtx_unlock(&racct_lock);
RACCT_UNLOCK();
}
static void
@ -625,9 +629,9 @@ racct_add_cred(struct ucred *cred, int resource, uint64_t amount)
if (!racct_enable)
return;
mtx_lock(&racct_lock);
RACCT_LOCK();
racct_add_cred_locked(cred, resource, amount);
mtx_unlock(&racct_lock);
RACCT_UNLOCK();
}
static int
@ -703,9 +707,9 @@ racct_set(struct proc *p, int resource, uint64_t amount)
SDT_PROBE3(racct, , rusage, set__force, p, resource, amount);
mtx_lock(&racct_lock);
RACCT_LOCK();
error = racct_set_locked(p, resource, amount, 0);
mtx_unlock(&racct_lock);
RACCT_UNLOCK();
return (error);
}
@ -718,9 +722,9 @@ racct_set_force(struct proc *p, int resource, uint64_t amount)
SDT_PROBE3(racct, , rusage, set, p, resource, amount);
mtx_lock(&racct_lock);
RACCT_LOCK();
racct_set_locked(p, resource, amount, 1);
mtx_unlock(&racct_lock);
RACCT_UNLOCK();
}
/*
@ -800,7 +804,7 @@ racct_sub(struct proc *p, int resource, uint64_t amount)
KASSERT(RACCT_CAN_DROP(resource),
("%s: called for non-droppable resource %d", __func__, resource));
mtx_lock(&racct_lock);
RACCT_LOCK();
KASSERT(amount <= p->p_racct->r_resources[resource],
("%s: freeing %ju of resource %d, which is more "
"than allocated %jd for %s (pid %d)", __func__, amount, resource,
@ -808,7 +812,7 @@ racct_sub(struct proc *p, int resource, uint64_t amount)
racct_adjust_resource(p->p_racct, resource, -amount);
racct_sub_cred_locked(p->p_ucred, resource, amount);
mtx_unlock(&racct_lock);
RACCT_UNLOCK();
}
static void
@ -843,9 +847,9 @@ racct_sub_cred(struct ucred *cred, int resource, uint64_t amount)
if (!racct_enable)
return;
mtx_lock(&racct_lock);
RACCT_LOCK();
racct_sub_cred_locked(cred, resource, amount);
mtx_unlock(&racct_lock);
RACCT_UNLOCK();
}
/*
@ -866,7 +870,7 @@ racct_proc_fork(struct proc *parent, struct proc *child)
PROC_LOCK(parent);
PROC_LOCK(child);
mtx_lock(&racct_lock);
RACCT_LOCK();
#ifdef RCTL
error = rctl_proc_fork(parent, child);
@ -896,7 +900,7 @@ racct_proc_fork(struct proc *parent, struct proc *child)
error += racct_add_locked(child, RACCT_NTHR, 1, 0);
out:
mtx_unlock(&racct_lock);
RACCT_UNLOCK();
PROC_UNLOCK(child);
PROC_UNLOCK(parent);
@ -919,10 +923,10 @@ racct_proc_fork_done(struct proc *child)
if (!racct_enable)
return;
mtx_lock(&racct_lock);
RACCT_LOCK();
rctl_enforce(child, RACCT_NPROC, 0);
rctl_enforce(child, RACCT_NTHR, 0);
mtx_unlock(&racct_lock);
RACCT_UNLOCK();
#endif
}
@ -958,7 +962,7 @@ racct_proc_exit(struct proc *p)
pct_estimate = 0;
pct = racct_getpcpu(p, pct_estimate);
mtx_lock(&racct_lock);
RACCT_LOCK();
racct_set_locked(p, RACCT_CPU, runtime, 0);
racct_add_cred_locked(p->p_ucred, RACCT_PCTCPU, pct);
@ -970,7 +974,7 @@ racct_proc_exit(struct proc *p)
racct_set_locked(p, i, 0, 0);
}
mtx_unlock(&racct_lock);
RACCT_UNLOCK();
PROC_UNLOCK(p);
#ifdef RCTL
@ -1003,7 +1007,7 @@ racct_proc_ucred_changed(struct proc *p, struct ucred *oldcred,
newpr = newcred->cr_prison;
oldpr = oldcred->cr_prison;
mtx_lock(&racct_lock);
RACCT_LOCK();
if (newuip != olduip) {
racct_sub_racct(olduip->ui_racct, p->p_racct);
racct_add_racct(newuip->ui_racct, p->p_racct);
@ -1020,7 +1024,7 @@ racct_proc_ucred_changed(struct proc *p, struct ucred *oldcred,
racct_add_racct(pr->pr_prison_racct->prr_racct,
p->p_racct);
}
mtx_unlock(&racct_lock);
RACCT_UNLOCK();
#ifdef RCTL
rctl_proc_ucred_changed(p, newcred);
@ -1033,12 +1037,10 @@ racct_move(struct racct *dest, struct racct *src)
ASSERT_RACCT_ENABLED();
mtx_lock(&racct_lock);
RACCT_LOCK();
racct_add_racct(dest, src);
racct_sub_racct(src, src);
mtx_unlock(&racct_lock);
RACCT_UNLOCK();
}
static void
@ -1112,7 +1114,7 @@ racct_decay_callback(struct racct *racct, void *dummy1, void *dummy2)
int64_t r_old, r_new;
ASSERT_RACCT_ENABLED();
mtx_assert(&racct_lock, MA_OWNED);
RACCT_LOCK_ASSERT();
r_old = racct->r_resources[RACCT_PCTCPU];
@ -1128,14 +1130,14 @@ static void
racct_decay_pre(void)
{
mtx_lock(&racct_lock);
RACCT_LOCK();
}
static void
racct_decay_post(void)
{
mtx_unlock(&racct_lock);
RACCT_UNLOCK();
}
static void
@ -1203,13 +1205,13 @@ racctd(void)
} else
pct_estimate = 0;
pct = racct_getpcpu(p, pct_estimate);
mtx_lock(&racct_lock);
RACCT_LOCK();
racct_set_locked(p, RACCT_PCTCPU, pct, 1);
racct_set_locked(p, RACCT_CPU, runtime, 0);
racct_set_locked(p, RACCT_WALLCLOCK,
(uint64_t)wallclock.tv_sec * 1000000 +
wallclock.tv_usec, 0);
mtx_unlock(&racct_lock);
RACCT_UNLOCK();
PROC_UNLOCK(p);
}

View File

@ -181,6 +181,13 @@ static uma_zone_t rctl_rule_zone;
static struct rwlock rctl_lock;
RW_SYSINIT(rctl_lock, &rctl_lock, "RCTL lock");
#define RCTL_RLOCK() rw_rlock(&rctl_lock)
#define RCTL_RUNLOCK() rw_runlock(&rctl_lock)
#define RCTL_WLOCK() rw_wlock(&rctl_lock)
#define RCTL_WUNLOCK() rw_wunlock(&rctl_lock)
#define RCTL_LOCK_ASSERT() rw_assert(&rctl_lock, RA_LOCKED)
#define RCTL_WLOCK_ASSERT() rw_assert(&rctl_lock, RA_WLOCKED)
static int rctl_rule_fully_specified(const struct rctl_rule *rule);
static void rctl_rule_to_sbuf(struct sbuf *sb, const struct rctl_rule *rule);
@ -231,7 +238,7 @@ rctl_proc_rule_to_racct(const struct proc *p, const struct rctl_rule *rule)
struct ucred *cred = p->p_ucred;
ASSERT_RACCT_ENABLED();
rw_assert(&rctl_lock, RA_LOCKED);
RCTL_LOCK_ASSERT();
switch (rule->rr_per) {
case RCTL_SUBJECT_TYPE_PROCESS:
@ -258,7 +265,7 @@ rctl_available_resource(const struct proc *p, const struct rctl_rule *rule)
const struct racct *racct;
ASSERT_RACCT_ENABLED();
rw_assert(&rctl_lock, RA_LOCKED);
RCTL_LOCK_ASSERT();
racct = rctl_proc_rule_to_racct(p, rule);
available = rule->rr_amount - racct->r_resources[rule->rr_resource];
@ -277,8 +284,7 @@ rctl_would_exceed(const struct proc *p, const struct rctl_rule *rule,
int64_t available;
ASSERT_RACCT_ENABLED();
rw_assert(&rctl_lock, RA_LOCKED);
RCTL_LOCK_ASSERT();
available = rctl_available_resource(p, rule);
if (available >= amount)
@ -302,7 +308,7 @@ rctl_pcpu_available(const struct proc *p) {
minavailable = INT64_MAX;
limit = 0;
rw_rlock(&rctl_lock);
RCTL_RLOCK();
LIST_FOREACH(link, &p->p_racct->r_rule_links, rrl_next) {
rule = link->rrl_rule;
@ -317,7 +323,7 @@ rctl_pcpu_available(const struct proc *p) {
}
}
rw_runlock(&rctl_lock);
RCTL_RUNLOCK();
/*
* Return slightly less than actual value of the available
@ -352,7 +358,7 @@ rctl_enforce(struct proc *p, int resource, uint64_t amount)
ASSERT_RACCT_ENABLED();
rw_rlock(&rctl_lock);
RCTL_RLOCK();
/*
* There may be more than one matching rule; go through all of them.
@ -460,7 +466,7 @@ rctl_enforce(struct proc *p, int resource, uint64_t amount)
}
}
rw_runlock(&rctl_lock);
RCTL_RUNLOCK();
if (should_deny) {
/*
@ -482,7 +488,7 @@ rctl_get_limit(struct proc *p, int resource)
ASSERT_RACCT_ENABLED();
rw_rlock(&rctl_lock);
RCTL_RLOCK();
/*
* There may be more than one matching rule; go through all of them.
@ -498,7 +504,7 @@ rctl_get_limit(struct proc *p, int resource)
amount = rule->rr_amount;
}
rw_runlock(&rctl_lock);
RCTL_RUNLOCK();
return (amount);
}
@ -514,7 +520,7 @@ rctl_get_available(struct proc *p, int resource)
ASSERT_RACCT_ENABLED();
rw_rlock(&rctl_lock);
RCTL_RLOCK();
/*
* There may be more than one matching rule; go through all of them.
@ -531,7 +537,7 @@ rctl_get_available(struct proc *p, int resource)
minavailable = available;
}
rw_runlock(&rctl_lock);
RCTL_RUNLOCK();
/*
* XXX: Think about this _hard_.
@ -675,9 +681,9 @@ rctl_racct_add_rule(struct racct *racct, struct rctl_rule *rule)
link->rrl_rule = rule;
link->rrl_exceeded = 0;
rw_wlock(&rctl_lock);
RCTL_WLOCK();
LIST_INSERT_HEAD(&racct->r_rule_links, link, rrl_next);
rw_wunlock(&rctl_lock);
RCTL_WUNLOCK();
}
static int
@ -687,7 +693,7 @@ rctl_racct_add_rule_locked(struct racct *racct, struct rctl_rule *rule)
ASSERT_RACCT_ENABLED();
KASSERT(rctl_rule_fully_specified(rule), ("rule not fully specified"));
rw_assert(&rctl_lock, RA_WLOCKED);
RCTL_WLOCK_ASSERT();
link = uma_zalloc(rctl_rule_link_zone, M_NOWAIT);
if (link == NULL)
@ -713,7 +719,7 @@ rctl_racct_remove_rules(struct racct *racct,
struct rctl_rule_link *link, *linktmp;
ASSERT_RACCT_ENABLED();
rw_assert(&rctl_lock, RA_WLOCKED);
RCTL_WLOCK_ASSERT();
LIST_FOREACH_SAFE(link, &racct->r_rule_links, rrl_next, linktmp) {
if (!rctl_rule_matches(link->rrl_rule, filter))
@ -1172,14 +1178,14 @@ static void
rctl_rule_pre_callback(void)
{
rw_wlock(&rctl_lock);
RCTL_WLOCK();
}
static void
rctl_rule_post_callback(void)
{
rw_wunlock(&rctl_lock);
RCTL_WUNLOCK();
}
static void
@ -1189,7 +1195,7 @@ rctl_rule_remove_callback(struct racct *racct, void *arg2, void *arg3)
int found = 0;
ASSERT_RACCT_ENABLED();
rw_assert(&rctl_lock, RA_WLOCKED);
RCTL_WLOCK_ASSERT();
found += rctl_racct_remove_rules(racct, filter);
@ -1210,9 +1216,9 @@ rctl_rule_remove(struct rctl_rule *filter)
if (filter->rr_subject_type == RCTL_SUBJECT_TYPE_PROCESS &&
filter->rr_subject.rs_proc != NULL) {
p = filter->rr_subject.rs_proc;
rw_wlock(&rctl_lock);
RCTL_WLOCK();
found = rctl_racct_remove_rules(p->p_racct, filter);
rw_wunlock(&rctl_lock);
RCTL_WUNLOCK();
if (found)
return (0);
return (ESRCH);
@ -1229,11 +1235,11 @@ rctl_rule_remove(struct rctl_rule *filter)
filter, (void *)&found);
sx_assert(&allproc_lock, SA_LOCKED);
rw_wlock(&rctl_lock);
RCTL_WLOCK();
FOREACH_PROC_IN_SYSTEM(p) {
found += rctl_racct_remove_rules(p->p_racct, filter);
}
rw_wunlock(&rctl_lock);
RCTL_WUNLOCK();
if (found)
return (0);
@ -1460,7 +1466,7 @@ rctl_get_rules_callback(struct racct *racct, void *arg2, void *arg3)
struct sbuf *sb = (struct sbuf *)arg3;
ASSERT_RACCT_ENABLED();
rw_assert(&rctl_lock, RA_LOCKED);
RCTL_LOCK_ASSERT();
LIST_FOREACH(link, &racct->r_rule_links, rrl_next) {
if (!rctl_rule_matches(link->rrl_rule, filter))
@ -1511,7 +1517,7 @@ sys_rctl_get_rules(struct thread *td, struct rctl_get_rules_args *uap)
KASSERT(sb != NULL, ("sbuf_new failed"));
FOREACH_PROC_IN_SYSTEM(p) {
rw_rlock(&rctl_lock);
RCTL_RLOCK();
LIST_FOREACH(link, &p->p_racct->r_rule_links, rrl_next) {
/*
* Non-process rules will be added to the buffer later.
@ -1525,7 +1531,7 @@ sys_rctl_get_rules(struct thread *td, struct rctl_get_rules_args *uap)
rctl_rule_to_sbuf(sb, link->rrl_rule);
sbuf_printf(sb, ",");
}
rw_runlock(&rctl_lock);
RCTL_RUNLOCK();
}
loginclass_racct_foreach(rctl_get_rules_callback,
@ -1612,13 +1618,13 @@ sys_rctl_get_limits(struct thread *td, struct rctl_get_limits_args *uap)
sb = sbuf_new(NULL, buf, bufsize, SBUF_FIXEDLEN);
KASSERT(sb != NULL, ("sbuf_new failed"));
rw_rlock(&rctl_lock);
RCTL_RLOCK();
LIST_FOREACH(link, &filter->rr_subject.rs_proc->p_racct->r_rule_links,
rrl_next) {
rctl_rule_to_sbuf(sb, link->rrl_rule);
sbuf_printf(sb, ",");
}
rw_runlock(&rctl_lock);
RCTL_RUNLOCK();
if (sbuf_error(sb) == ENOMEM) {
error = ERANGE;
goto out;
@ -1743,7 +1749,7 @@ rctl_proc_ucred_changed(struct proc *p, struct ucred *newcred)
* credentials.
*/
rulecnt = 0;
rw_rlock(&rctl_lock);
RCTL_RLOCK();
LIST_FOREACH(link, &p->p_racct->r_rule_links, rrl_next) {
if (link->rrl_rule->rr_subject_type ==
RCTL_SUBJECT_TYPE_PROCESS)
@ -1755,7 +1761,7 @@ rctl_proc_ucred_changed(struct proc *p, struct ucred *newcred)
rulecnt++;
LIST_FOREACH(link, &newprr->prr_racct->r_rule_links, rrl_next)
rulecnt++;
rw_runlock(&rctl_lock);
RCTL_RUNLOCK();
/*
* Create temporary list. We've dropped the rctl_lock in order
@ -1773,7 +1779,7 @@ rctl_proc_ucred_changed(struct proc *p, struct ucred *newcred)
/*
* Assign rules to the newly allocated list entries.
*/
rw_wlock(&rctl_lock);
RCTL_WLOCK();
LIST_FOREACH(link, &p->p_racct->r_rule_links, rrl_next) {
if (link->rrl_rule->rr_subject_type ==
RCTL_SUBJECT_TYPE_PROCESS) {
@ -1841,13 +1847,13 @@ rctl_proc_ucred_changed(struct proc *p, struct ucred *newcred)
newlink, rrl_next);
}
rw_wunlock(&rctl_lock);
RCTL_WUNLOCK();
return;
}
goaround:
rw_wunlock(&rctl_lock);
RCTL_WUNLOCK();
/*
* Rule list changed while we were not holding the rctl_lock.
@ -1879,7 +1885,7 @@ rctl_proc_fork(struct proc *parent, struct proc *child)
ASSERT_RACCT_ENABLED();
KASSERT(parent->p_racct != NULL, ("process without racct; p = %p", parent));
rw_wlock(&rctl_lock);
RCTL_WLOCK();
/*
* Go through limits applicable to the parent and assign them
@ -1908,7 +1914,7 @@ rctl_proc_fork(struct proc *parent, struct proc *child)
}
}
rw_wunlock(&rctl_lock);
RCTL_WUNLOCK();
return (0);
fail:
@ -1918,7 +1924,7 @@ rctl_proc_fork(struct proc *parent, struct proc *child)
rctl_rule_release(link->rrl_rule);
uma_zfree(rctl_rule_link_zone, link);
}
rw_wunlock(&rctl_lock);
RCTL_WUNLOCK();
return (EAGAIN);
}
@ -1932,14 +1938,14 @@ rctl_racct_release(struct racct *racct)
ASSERT_RACCT_ENABLED();
rw_wlock(&rctl_lock);
RCTL_WLOCK();
while (!LIST_EMPTY(&racct->r_rule_links)) {
link = LIST_FIRST(&racct->r_rule_links);
LIST_REMOVE(link, rrl_next);
rctl_rule_release(link->rrl_rule);
uma_zfree(rctl_rule_link_zone, link);
}
rw_wunlock(&rctl_lock);
RCTL_WUNLOCK();
}
static void

View File

@ -53,9 +53,7 @@ __FBSDID("$FreeBSD$");
#ifdef SMP
MALLOC_DEFINE(M_TOPO, "toponodes", "SMP topology data");
#endif
#ifdef SMP
volatile cpuset_t stopped_cpus;
volatile cpuset_t started_cpus;
volatile cpuset_t suspended_cpus;
@ -895,6 +893,10 @@ topo_init_root(struct topo_node *root)
root->type = TOPO_TYPE_SYSTEM;
}
/*
* Add a child node with the given ID under the given parent.
* Do nothing if there is already a child with that ID.
*/
struct topo_node *
topo_add_node_by_hwid(struct topo_node *parent, int hwid,
topo_node_type type, uintptr_t subtype)
@ -921,6 +923,9 @@ topo_add_node_by_hwid(struct topo_node *parent, int hwid,
return (node);
}
/*
* Find a child node with the given ID under the given parent.
*/
struct topo_node *
topo_find_node_by_hwid(struct topo_node *parent, int hwid,
topo_node_type type, uintptr_t subtype)
@ -938,6 +943,12 @@ topo_find_node_by_hwid(struct topo_node *parent, int hwid,
return (NULL);
}
/*
* Given a node change the order of its parent's child nodes such
* that the node becomes the firt child while preserving the cyclic
* order of the children. In other words, the given node is promoted
* by rotation.
*/
void
topo_promote_child(struct topo_node *child)
{
@ -959,6 +970,10 @@ topo_promote_child(struct topo_node *child)
}
}
/*
* Iterate to the next node in the depth-first search (traversal) of
* the topology tree.
*/
struct topo_node *
topo_next_node(struct topo_node *top, struct topo_node *node)
{
@ -977,6 +992,10 @@ topo_next_node(struct topo_node *top, struct topo_node *node)
return (NULL);
}
/*
* Iterate to the next node in the depth-first search of the topology tree,
* but without descending below the current node.
*/
struct topo_node *
topo_next_nonchild_node(struct topo_node *top, struct topo_node *node)
{
@ -992,6 +1011,10 @@ topo_next_nonchild_node(struct topo_node *top, struct topo_node *node)
return (NULL);
}
/*
* Assign the given ID to the given topology node that represents a logical
* processor.
*/
void
topo_set_pu_id(struct topo_node *node, cpuid_t id)
{
@ -1013,6 +1036,14 @@ topo_set_pu_id(struct topo_node *node, cpuid_t id)
}
}
/*
* Check if the topology is uniform, that is, each package has the same number
* of cores in it and each core has the same number of threads (logical
* processors) in it. If so, calculate the number of package, the number of
* cores per package and the number of logical processors per core.
* 'all' parameter tells whether to include administratively disabled logical
* processors into the analysis.
*/
int
topo_analyze(struct topo_node *topo_root, int all,
int *pkg_count, int *cores_per_pkg, int *thrs_per_core)

View File

@ -1167,9 +1167,6 @@ netisr_start(void *arg)
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
if (nws_count >= netisr_maxthreads)
break;
/* XXXRW: Is skipping absent CPUs still required here? */
if (CPU_ABSENT(pc->pc_cpuid))
continue;
/* Worker will already be present for boot CPU. */
if (pc->pc_netisr != NULL)
continue;

View File

@ -483,7 +483,7 @@ ieee80211_vap_setup(struct ieee80211com *ic, struct ieee80211vap *vap,
vap->iv_htextcaps = ic->ic_htextcaps;
vap->iv_opmode = opmode;
vap->iv_caps |= ieee80211_opcap[opmode];
vap->iv_myaddr = ic->ic_macaddr;
IEEE80211_ADDR_COPY(vap->iv_myaddr, ic->ic_macaddr);
switch (opmode) {
case IEEE80211_M_WDS:
/*
@ -603,7 +603,7 @@ ieee80211_vap_attach(struct ieee80211vap *vap, ifm_change_cb_t media_change,
ifp->if_baudrate = IF_Mbps(maxrate);
ether_ifattach(ifp, macaddr);
vap->iv_myaddr = IF_LLADDR(ifp);
IEEE80211_ADDR_COPY(vap->iv_myaddr, IF_LLADDR(ifp));
/* hook output method setup by ether_ifattach */
vap->iv_output = ifp->if_output;
ifp->if_output = ieee80211_output;

View File

@ -263,7 +263,7 @@ struct mbuf *ieee80211_getmgtframe(uint8_t **frm, int headroom, int pktlen);
#define M_EAPOL M_PROTO3 /* PAE/EAPOL frame */
#define M_PWR_SAV M_PROTO4 /* bypass PS handling */
#define M_MORE_DATA M_PROTO5 /* more data frames to follow */
#define M_FF M_PROTO6 /* fast frame */
#define M_FF M_PROTO6 /* fast frame / A-MSDU */
#define M_TXCB M_PROTO7 /* do tx complete callback */
#define M_AMPDU_MPDU M_PROTO8 /* ok for A-MPDU aggregation */
#define M_FRAG M_PROTO9 /* frame fragmentation */

View File

@ -2078,8 +2078,8 @@ hostap_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0,
} else if (ni->ni_flags & IEEE80211_NODE_HT)
ieee80211_ht_node_cleanup(ni);
#ifdef IEEE80211_SUPPORT_SUPERG
else if (ni->ni_ath_flags & IEEE80211_NODE_ATH)
ieee80211_ff_node_cleanup(ni);
/* Always do ff node cleanup; for A-MSDU */
ieee80211_ff_node_cleanup(ni);
#endif
/*
* Allow AMPDU operation only with unencrypted traffic
@ -2097,6 +2097,10 @@ hostap_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0,
"capinfo 0x%x ucastcipher %d", capinfo,
rsnparms.rsn_ucastcipher);
ieee80211_ht_node_cleanup(ni);
#ifdef IEEE80211_SUPPORT_SUPERG
/* Always do ff node cleanup; for A-MSDU */
ieee80211_ff_node_cleanup(ni);
#endif
vap->iv_stats.is_ht_assoc_downgrade++;
}
/*

View File

@ -65,6 +65,10 @@ struct ieee80211_tx_ampdu {
#define IEEE80211_AMPDU_RUNNING(tap) \
(((tap)->txa_flags & IEEE80211_AGGR_RUNNING) != 0)
/* return non-zero if AMPDU tx for the TID was NACKed */
#define IEEE80211_AMPDU_NACKED(tap)\
(!! ((tap)->txa_flags & IEEE80211_AGGR_NAK))
/* return non-zero if AMPDU tx for the TID is running or started */
#define IEEE80211_AMPDU_REQUESTED(tap) \
(((tap)->txa_flags & \

View File

@ -1014,8 +1014,8 @@ node_cleanup(struct ieee80211_node *ni)
if (ni->ni_flags & IEEE80211_NODE_HT)
ieee80211_ht_node_cleanup(ni);
#ifdef IEEE80211_SUPPORT_SUPERG
else if (ni->ni_ath_flags & IEEE80211_NODE_ATH)
ieee80211_ff_node_cleanup(ni);
/* Always do FF node cleanup; for A-MSDU */
ieee80211_ff_node_cleanup(ni);
#endif
#ifdef IEEE80211_SUPPORT_MESH
/*

View File

@ -191,51 +191,68 @@ ieee80211_vap_pkt_send_dest(struct ieee80211vap *vap, struct mbuf *m,
* otherwise unable to establish a BA stream.
*/
if ((ni->ni_flags & IEEE80211_NODE_AMPDU_TX) &&
(vap->iv_flags_ht & IEEE80211_FHT_AMPDU_TX) &&
(m->m_flags & M_EAPOL) == 0) {
int tid = WME_AC_TO_TID(M_WME_GETAC(m));
struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid];
(vap->iv_flags_ht & IEEE80211_FHT_AMPDU_TX)) {
if ((m->m_flags & M_EAPOL) == 0) {
int tid = WME_AC_TO_TID(M_WME_GETAC(m));
struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid];
ieee80211_txampdu_count_packet(tap);
if (IEEE80211_AMPDU_RUNNING(tap)) {
/*
* Operational, mark frame for aggregation.
*
* XXX do tx aggregation here
*/
m->m_flags |= M_AMPDU_MPDU;
} else if (!IEEE80211_AMPDU_REQUESTED(tap) &&
ic->ic_ampdu_enable(ni, tap)) {
/*
* Not negotiated yet, request service.
*/
ieee80211_ampdu_request(ni, tap);
/* XXX hold frame for reply? */
ieee80211_txampdu_count_packet(tap);
if (IEEE80211_AMPDU_RUNNING(tap)) {
/*
* Operational, mark frame for aggregation.
*
* XXX do tx aggregation here
*/
m->m_flags |= M_AMPDU_MPDU;
} else if (!IEEE80211_AMPDU_REQUESTED(tap) &&
ic->ic_ampdu_enable(ni, tap)) {
/*
* Not negotiated yet, request service.
*/
ieee80211_ampdu_request(ni, tap);
/* XXX hold frame for reply? */
}
}
}
/*
* XXX If we aren't doing AMPDU TX then we /could/ do
* fast-frames encapsulation, however right now this
* output logic doesn't handle that case.
*
* So we'll be limited to "fast-frames" xmit for non-11n STA
* and "no fast frames" xmit for 11n STAs.
* It'd be nice to eventually test fast-frames out by
* gracefully falling from failing A-MPDU transmission
* (driver says no, fail to negotiate it with peer) to
* using fast-frames.
*
* Note: we can actually put A-MSDU's inside an A-MPDU,
* so hopefully we can figure out how to make that particular
* combination work right.
*/
#ifdef IEEE80211_SUPPORT_SUPERG
else if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF)) {
m = ieee80211_ff_check(ni, m);
if (m == NULL) {
/* NB: any ni ref held on stageq */
return (0);
/*
* Check for AMSDU/FF; queue for aggregation
*
* Note: we don't bother trying to do fast frames or
* A-MSDU encapsulation for 802.3 drivers. Now, we
* likely could do it for FF (because it's a magic
* atheros tunnel LLC type) but I don't think we're going
* to really need to. For A-MSDU we'd have to set the
* A-MSDU QoS bit in the wifi header, so we just plain
* can't do it.
*
* Strictly speaking, we could actually /do/ A-MSDU / FF
* with A-MPDU together which for certain circumstances
* is beneficial (eg A-MSDU of TCK ACKs.) However,
* I'll ignore that for now so existing behaviour is maintained.
* Later on it would be good to make "amsdu + ampdu" configurable.
*/
else if (__predict_true((vap->iv_caps & IEEE80211_C_8023ENCAP) == 0)) {
if ((! mcast) && ieee80211_amsdu_tx_ok(ni)) {
m = ieee80211_amsdu_check(ni, m);
if (m == NULL) {
/* NB: any ni ref held on stageq */
IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
"%s: amsdu_check queued frame\n",
__func__);
return (0);
}
} else if ((! mcast) && IEEE80211_ATH_CAP(vap, ni,
IEEE80211_NODE_FF)) {
m = ieee80211_ff_check(ni, m);
if (m == NULL) {
/* NB: any ni ref held on stageq */
IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
"%s: ff_check queued frame\n",
__func__);
return (0);
}
}
}
#endif /* IEEE80211_SUPPORT_SUPERG */
@ -1229,6 +1246,7 @@ ieee80211_encap(struct ieee80211vap *vap, struct ieee80211_node *ni,
ieee80211_seq seqno;
int meshhdrsize, meshae;
uint8_t *qos;
int is_amsdu = 0;
IEEE80211_TX_LOCK_ASSERT(ic);
@ -1383,9 +1401,19 @@ ieee80211_encap(struct ieee80211vap *vap, struct ieee80211_node *ni,
} else {
#ifdef IEEE80211_SUPPORT_SUPERG
/*
* Aggregated frame.
* Aggregated frame. Check if it's for AMSDU or FF.
*
* XXX TODO: IEEE80211_NODE_AMSDU* isn't implemented
* anywhere for some reason. But, since 11n requires
* AMSDU RX, we can just assume "11n" == "AMSDU".
*/
m = ieee80211_ff_encap(vap, m, hdrspace + meshhdrsize, key);
IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, "%s: called; M_FF\n", __func__);
if (ieee80211_amsdu_tx_ok(ni)) {
m = ieee80211_amsdu_encap(vap, m, hdrspace + meshhdrsize, key);
is_amsdu = 1;
} else {
m = ieee80211_ff_encap(vap, m, hdrspace + meshhdrsize, key);
}
if (m == NULL)
#endif
goto bad;
@ -1521,6 +1549,13 @@ ieee80211_encap(struct ieee80211vap *vap, struct ieee80211_node *ni,
qos[1] = 0;
wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_QOS;
/*
* If this is an A-MSDU then ensure we set the
* relevant field.
*/
if (is_amsdu)
qos[0] |= IEEE80211_QOS_AMSDU;
if ((m->m_flags & M_AMPDU_MPDU) == 0) {
/*
* NB: don't assign a sequence # to potential
@ -1544,6 +1579,14 @@ ieee80211_encap(struct ieee80211vap *vap, struct ieee80211_node *ni,
*(uint16_t *)wh->i_seq =
htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
M_SEQNO_SET(m, seqno);
/*
* XXX TODO: we shouldn't allow EAPOL, etc that would
* be forced to be non-QoS traffic to be A-MSDU encapsulated.
*/
if (is_amsdu)
printf("%s: XXX ERROR: is_amsdu set; not QoS!\n",
__func__);
}

View File

@ -590,7 +590,7 @@ static const uint16_t ht40_bps[32] = {
#define HT_STF 4
#define HT_LTF(n) ((n) * 4)
#define HT_RC_2_MCS(_rc) ((_rc) & 0xf)
#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
#define IS_HT_RATE(_rc) ( (_rc) & IEEE80211_RATE_MCS)

View File

@ -194,6 +194,14 @@ uint8_t ieee80211_plcp2rate(uint8_t, enum ieee80211_phytype);
*/
uint8_t ieee80211_rate2plcp(int, enum ieee80211_phytype);
/*
* 802.11n rate manipulation.
*/
#define IEEE80211_HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
#define IEEE80211_HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
#define IEEE80211_IS_HT_RATE(_rc) ( (_rc) & IEEE80211_RATE_MCS)
uint32_t ieee80211_compute_duration_ht(uint32_t frameLen,
uint16_t rate, int streams, int isht40,
int isShortGI);

View File

@ -1703,12 +1703,8 @@ sta_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0, int subtype,
ieee80211_setup_basic_htrates(ni, htinfo);
ieee80211_node_setuptxparms(ni);
ieee80211_ratectl_node_init(ni);
} else {
#ifdef IEEE80211_SUPPORT_SUPERG
if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_ATH))
ieee80211_ff_node_init(ni);
#endif
}
/*
* Configure state now that we are associated.
*

View File

@ -99,18 +99,22 @@ ieee80211_superg_attach(struct ieee80211com *ic)
{
struct ieee80211_superg *sg;
if (ic->ic_caps & IEEE80211_C_FF) {
sg = (struct ieee80211_superg *) IEEE80211_MALLOC(
sizeof(struct ieee80211_superg), M_80211_VAP,
IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
if (sg == NULL) {
printf("%s: cannot allocate SuperG state block\n",
__func__);
return;
}
ic->ic_superg = sg;
sg = (struct ieee80211_superg *) IEEE80211_MALLOC(
sizeof(struct ieee80211_superg), M_80211_VAP,
IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
if (sg == NULL) {
printf("%s: cannot allocate SuperG state block\n",
__func__);
return;
}
ieee80211_ffagemax = msecs_to_ticks(150);
ic->ic_superg = sg;
/*
* Default to not being so aggressive for FF/AMSDU
* aging, otherwise we may hold a frame around
* for way too long before we expire it out.
*/
ieee80211_ffagemax = msecs_to_ticks(2);
}
void
@ -353,13 +357,145 @@ ieee80211_ff_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace,
goto bad;
}
m1->m_nextpkt = NULL;
/*
* Include fast frame headers in adjusting header layout.
* Adjust to include 802.11 header requirement.
*/
KASSERT(m1->m_len >= sizeof(eh1), ("no ethernet header!"));
ETHER_HEADER_COPY(&eh1, mtod(m1, caddr_t));
m1 = ieee80211_mbuf_adjust(vap, hdrspace, key, m1);
if (m1 == NULL) {
printf("%s: failed initial mbuf_adjust\n", __func__);
/* NB: ieee80211_mbuf_adjust handles msgs+statistics */
m_freem(m2);
goto bad;
}
/*
* Copy second frame's Ethernet header out of line
* and adjust for possible padding in case there isn't room
* at the end of first frame.
*/
KASSERT(m2->m_len >= sizeof(eh2), ("no ethernet header!"));
ETHER_HEADER_COPY(&eh2, mtod(m2, caddr_t));
m2 = ieee80211_mbuf_adjust(vap, 4, NULL, m2);
if (m2 == NULL) {
/* NB: ieee80211_mbuf_adjust handles msgs+statistics */
printf("%s: failed second \n", __func__);
goto bad;
}
/*
* Now do tunnel encapsulation. First, each
* frame gets a standard encapsulation.
*/
m1 = ieee80211_ff_encap1(vap, m1, &eh1);
if (m1 == NULL)
goto bad;
m2 = ieee80211_ff_encap1(vap, m2, &eh2);
if (m2 == NULL)
goto bad;
/*
* Pad leading frame to a 4-byte boundary. If there
* is space at the end of the first frame, put it
* there; otherwise prepend to the front of the second
* frame. We know doing the second will always work
* because we reserve space above. We prefer appending
* as this typically has better DMA alignment properties.
*/
for (m = m1; m->m_next != NULL; m = m->m_next)
;
pad = roundup2(m1->m_pkthdr.len, 4) - m1->m_pkthdr.len;
if (pad) {
if (M_TRAILINGSPACE(m) < pad) { /* prepend to second */
m2->m_data -= pad;
m2->m_len += pad;
m2->m_pkthdr.len += pad;
} else { /* append to first */
m->m_len += pad;
m1->m_pkthdr.len += pad;
}
}
/*
* A-MSDU's are just appended; the "I'm A-MSDU!" bit is in the
* QoS header.
*
* XXX optimize by prepending together
*/
m->m_next = m2; /* NB: last mbuf from above */
m1->m_pkthdr.len += m2->m_pkthdr.len;
M_PREPEND(m1, sizeof(uint32_t)+2, M_NOWAIT);
if (m1 == NULL) { /* XXX cannot happen */
IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
"%s: no space for tunnel header\n", __func__);
vap->iv_stats.is_tx_nobuf++;
return NULL;
}
memset(mtod(m1, void *), 0, sizeof(uint32_t)+2);
M_PREPEND(m1, sizeof(struct llc), M_NOWAIT);
if (m1 == NULL) { /* XXX cannot happen */
IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
"%s: no space for llc header\n", __func__);
vap->iv_stats.is_tx_nobuf++;
return NULL;
}
llc = mtod(m1, struct llc *);
llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP;
llc->llc_control = LLC_UI;
llc->llc_snap.org_code[0] = ATH_FF_SNAP_ORGCODE_0;
llc->llc_snap.org_code[1] = ATH_FF_SNAP_ORGCODE_1;
llc->llc_snap.org_code[2] = ATH_FF_SNAP_ORGCODE_2;
llc->llc_snap.ether_type = htons(ATH_FF_ETH_TYPE);
vap->iv_stats.is_ff_encap++;
return m1;
bad:
vap->iv_stats.is_ff_encapfail++;
if (m1 != NULL)
m_freem(m1);
if (m2 != NULL)
m_freem(m2);
return NULL;
}
/*
* A-MSDU encapsulation.
*
* This assumes just two frames for now, since we're borrowing the
* same queuing code and infrastructure as fast-frames.
*
* There must be two packets chained with m_nextpkt.
* We do header adjustment for each, and then concatenate the mbuf chains
* to form a single frame for transmission.
*/
struct mbuf *
ieee80211_amsdu_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace,
struct ieee80211_key *key)
{
struct mbuf *m2;
struct ether_header eh1, eh2;
struct mbuf *m;
int pad;
m2 = m1->m_nextpkt;
if (m2 == NULL) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
"%s: only one frame\n", __func__);
goto bad;
}
m1->m_nextpkt = NULL;
/*
* Include A-MSDU header in adjusting header layout.
*/
KASSERT(m1->m_len >= sizeof(eh1), ("no ethernet header!"));
ETHER_HEADER_COPY(&eh1, mtod(m1, caddr_t));
m1 = ieee80211_mbuf_adjust(vap,
hdrspace + sizeof(struct llc) + sizeof(uint32_t) + 2 +
hdrspace + sizeof(struct llc) + sizeof(uint32_t) +
sizeof(struct ether_header),
key, m1);
if (m1 == NULL) {
@ -376,9 +512,7 @@ ieee80211_ff_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace,
*/
KASSERT(m2->m_len >= sizeof(eh2), ("no ethernet header!"));
ETHER_HEADER_COPY(&eh2, mtod(m2, caddr_t));
m2 = ieee80211_mbuf_adjust(vap,
ATH_FF_MAX_HDR_PAD + sizeof(struct ether_header),
NULL, m2);
m2 = ieee80211_mbuf_adjust(vap, 4, NULL, m2);
if (m2 == NULL) {
/* NB: ieee80211_mbuf_adjust handles msgs+statistics */
goto bad;
@ -418,42 +552,16 @@ ieee80211_ff_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace,
}
/*
* Now, stick 'em together and prepend the tunnel headers;
* first the Atheros tunnel header (all zero for now) and
* then a special fast frame LLC.
*
* XXX optimize by prepending together
* Now, stick 'em together.
*/
m->m_next = m2; /* NB: last mbuf from above */
m1->m_pkthdr.len += m2->m_pkthdr.len;
M_PREPEND(m1, sizeof(uint32_t)+2, M_NOWAIT);
if (m1 == NULL) { /* XXX cannot happen */
IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
"%s: no space for tunnel header\n", __func__);
vap->iv_stats.is_tx_nobuf++;
return NULL;
}
memset(mtod(m1, void *), 0, sizeof(uint32_t)+2);
M_PREPEND(m1, sizeof(struct llc), M_NOWAIT);
if (m1 == NULL) { /* XXX cannot happen */
IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
"%s: no space for llc header\n", __func__);
vap->iv_stats.is_tx_nobuf++;
return NULL;
}
llc = mtod(m1, struct llc *);
llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP;
llc->llc_control = LLC_UI;
llc->llc_snap.org_code[0] = ATH_FF_SNAP_ORGCODE_0;
llc->llc_snap.org_code[1] = ATH_FF_SNAP_ORGCODE_1;
llc->llc_snap.org_code[2] = ATH_FF_SNAP_ORGCODE_2;
llc->llc_snap.ether_type = htons(ATH_FF_ETH_TYPE);
vap->iv_stats.is_ff_encap++;
vap->iv_stats.is_amsdu_encap++;
return m1;
bad:
vap->iv_stats.is_amsdu_encapfail++;
if (m1 != NULL)
m_freem(m1);
if (m2 != NULL)
@ -461,6 +569,7 @@ ieee80211_ff_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace,
return NULL;
}
static void
ff_transmit(struct ieee80211_node *ni, struct mbuf *m)
{
@ -605,6 +714,7 @@ ff_approx_txtime(struct ieee80211_node *ni,
struct ieee80211com *ic = ni->ni_ic;
struct ieee80211vap *vap = ni->ni_vap;
uint32_t framelen;
uint32_t frame_time;
/*
* Approximate the frame length to be transmitted. A swag to add
@ -621,7 +731,21 @@ ff_approx_txtime(struct ieee80211_node *ni,
framelen += 24;
if (m2 != NULL)
framelen += m2->m_pkthdr.len;
return ieee80211_compute_duration(ic->ic_rt, framelen, ni->ni_txrate, 0);
/*
* For now, we assume non-shortgi, 20MHz, just because I want to
* at least test 802.11n.
*/
if (ni->ni_txrate & IEEE80211_RATE_MCS)
frame_time = ieee80211_compute_duration_ht(framelen,
ni->ni_txrate,
IEEE80211_HT_RC_2_STREAMS(ni->ni_txrate),
0, /* isht40 */
0); /* isshortgi */
else
frame_time = ieee80211_compute_duration(ic->ic_rt, framelen,
ni->ni_txrate, 0);
return (frame_time);
}
/*
@ -753,6 +877,30 @@ ieee80211_ff_check(struct ieee80211_node *ni, struct mbuf *m)
return mstaged;
}
struct mbuf *
ieee80211_amsdu_check(struct ieee80211_node *ni, struct mbuf *m)
{
/*
* XXX TODO: actually enforce the node support
* and HTCAP requirements for the maximum A-MSDU
* size.
*/
/* First: software A-MSDU transmit? */
if (! ieee80211_amsdu_tx_ok(ni))
return (m);
/* Next - EAPOL? Nope, don't aggregate; we don't QoS encap them */
if (m->m_flags & (M_EAPOL | M_MCAST | M_BCAST))
return (m);
/* Next - needs to be a data frame, non-broadcast, etc */
if (ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost))
return (m);
return (ieee80211_ff_check(ni, m));
}
void
ieee80211_ff_node_init(struct ieee80211_node *ni)
{

View File

@ -82,6 +82,27 @@ int ieee80211_parse_athparams(struct ieee80211_node *, uint8_t *,
void ieee80211_ff_node_init(struct ieee80211_node *);
void ieee80211_ff_node_cleanup(struct ieee80211_node *);
static inline int
ieee80211_amsdu_tx_ok(struct ieee80211_node *ni)
{
/* First: software A-MSDU transmit? */
if ((ni->ni_ic->ic_caps & IEEE80211_C_SWAMSDUTX) == 0)
return (0);
/* Next: does the VAP have AMSDU TX enabled? */
if ((ni->ni_vap->iv_flags_ht & IEEE80211_FHT_AMSDU_TX) == 0)
return (0);
/* Next: 11n node? (assumed that A-MSDU TX to HT nodes is ok */
if ((ni->ni_flags & IEEE80211_NODE_HT) == 0)
return (0);
/* ok, we can at least /do/ AMSDU to this node */
return (1);
}
struct mbuf * ieee80211_amsdu_check(struct ieee80211_node *ni, struct mbuf *m);
struct mbuf *ieee80211_ff_check(struct ieee80211_node *, struct mbuf *);
void ieee80211_ff_age(struct ieee80211com *, struct ieee80211_stageq *,
int quanta);
@ -122,6 +143,8 @@ ieee80211_ff_age_all(struct ieee80211com *ic, int quanta)
struct mbuf *ieee80211_ff_encap(struct ieee80211vap *, struct mbuf *,
int, struct ieee80211_key *);
struct mbuf * ieee80211_amsdu_encap(struct ieee80211vap *vap, struct mbuf *m1,
int hdrspace, struct ieee80211_key *key);
struct mbuf *ieee80211_ff_decap(struct ieee80211_node *, struct mbuf *);

View File

@ -362,7 +362,8 @@ struct ieee80211vap {
TAILQ_ENTRY(ieee80211vap) iv_next; /* list of vap instances */
struct ieee80211com *iv_ic; /* back ptr to common state */
const uint8_t *iv_myaddr; /* MAC address: ifp or ic */
/* MAC address: ifp or ic */
uint8_t iv_myaddr[IEEE80211_ADDR_LEN];
uint32_t iv_debug; /* debug msg flags */
struct ieee80211_stats iv_stats; /* statistics */
@ -647,6 +648,7 @@ MALLOC_DECLARE(M_80211_VAP);
#define IEEE80211_C_DFS 0x00020000 /* CAPABILITY: DFS/radar avail*/
#define IEEE80211_C_MBSS 0x00040000 /* CAPABILITY: MBSS available */
#define IEEE80211_C_SWSLEEP 0x00080000 /* CAPABILITY: do sleep here */
#define IEEE80211_C_SWAMSDUTX 0x00100000 /* CAPABILITY: software A-MSDU TX */
/* 0x7c0000 available */
#define IEEE80211_C_WPA1 0x00800000 /* CAPABILITY: WPA1 avail */
#define IEEE80211_C_WPA2 0x01000000 /* CAPABILITY: WPA2 avail */

View File

@ -97,6 +97,7 @@ static int i2c_stop(device_t dev);
static int i2c_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr);
static int i2c_read(device_t dev, char *buf, int len, int *read, int last, int delay);
static int i2c_write(device_t dev, const char *buf, int len, int *sent, int timeout);
static phandle_t i2c_get_node(device_t bus, device_t dev);
static device_method_t i2c_methods[] = {
DEVMETHOD(device_probe, i2c_probe),
@ -110,12 +111,13 @@ static device_method_t i2c_methods[] = {
DEVMETHOD(iicbus_read, i2c_read),
DEVMETHOD(iicbus_write, i2c_write),
DEVMETHOD(iicbus_transfer, iicbus_transfer_gen),
DEVMETHOD(ofw_bus_get_node, i2c_get_node),
{ 0, 0 }
};
static driver_t i2c_driver = {
"i2c",
"iichb",
i2c_methods,
sizeof(struct i2c_softc),
};
@ -425,3 +427,11 @@ i2c_write(device_t dev, const char *buf, int len, int *sent, int timeout)
return (IIC_NOERR);
}
static phandle_t
i2c_get_node(device_t bus, device_t dev)
{
/* Share controller node with iibus device. */
return (ofw_bus_get_node(bus));
}

View File

@ -58,7 +58,7 @@
* in the range 5 to 9.
*/
#undef __FreeBSD_version
#define __FreeBSD_version 1100104 /* Master, propagated to newvers */
#define __FreeBSD_version 1100105 /* Master, propagated to newvers */
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,

View File

@ -176,6 +176,9 @@ mask_width(u_int x)
return (fls(x << (1 - powerof2(x))) - 1);
}
/*
* Add a cache level to the cache topology description.
*/
static int
add_deterministic_cache(int type, int level, int share_count)
{
@ -217,6 +220,16 @@ add_deterministic_cache(int type, int level, int share_count)
return (1);
}
/*
* Determine topology of processing units and caches for AMD CPUs.
* See:
* - AMD CPUID Specification (Publication # 25481)
* - BKDG For AMD Family 10h Processors (Publication # 31116), section 2.15
* - BKDG for AMD NPT Family 0Fh Processors (Publication # 32559)
* XXX At the moment the code does not recognize grouping of AMD CMT threads,
* if supported, into cores, so each thread is treated as being in its own
* core. In other words, each logical CPU is considered to be a core.
*/
static void
topo_probe_amd(void)
{
@ -277,6 +290,15 @@ topo_probe_amd(void)
}
}
/*
* Determine topology of processing units for Intel CPUs
* using CPUID Leaf 1 and Leaf 4, if supported.
* See:
* - Intel 64 Architecture Processor Topology Enumeration
* - Intel 64 and IA-32 ArchitecturesSoftware Developers Manual,
* Volume 3A: System Programming Guide, PROGRAMMING CONSIDERATIONS
* FOR HARDWARE MULTI-THREADING CAPABLE PROCESSORS
*/
static void
topo_probe_intel_0x4(void)
{
@ -302,6 +324,15 @@ topo_probe_intel_0x4(void)
pkg_id_shift = core_id_shift + mask_width(max_cores);
}
/*
* Determine topology of processing units for Intel CPUs
* using CPUID Leaf 11, if supported.
* See:
* - Intel 64 Architecture Processor Topology Enumeration
* - Intel 64 and IA-32 ArchitecturesSoftware Developers Manual,
* Volume 3A: System Programming Guide, PROGRAMMING CONSIDERATIONS
* FOR HARDWARE MULTI-THREADING CAPABLE PROCESSORS
*/
static void
topo_probe_intel_0xb(void)
{
@ -342,6 +373,14 @@ topo_probe_intel_0xb(void)
}
}
/*
* Determine topology of caches for Intel CPUs.
* See:
* - Intel 64 Architecture Processor Topology Enumeration
* - Intel 64 and IA-32 Architectures Software Developers Manual
* Volume 2A: Instruction Set Reference, A-M,
* CPUID instruction
*/
static void
topo_probe_intel_caches(void)
{
@ -376,14 +415,16 @@ topo_probe_intel_caches(void)
}
}
/*
* Determine topology of processing units and caches for Intel CPUs.
* See:
* - Intel 64 Architecture Processor Topology Enumeration
*/
static void
topo_probe_intel(void)
{
/*
* See Intel(R) 64 Architecture Processor
* Topology Enumeration article for details.
*
* Note that 0x1 <= cpu_high < 4 case should be
* compatible with topo_probe_intel_0x4() logic when
* CPUID.1:EBX[23:16] > 0 (cpu_cores will be 1)
@ -640,6 +681,12 @@ cpu_mp_announce(void)
}
}
/*
* Add a scheduling group, a group of logical processors sharing
* a particular cache (and, thus having an affinity), to the scheduling
* topology.
* This function recursively works on lower level caches.
*/
static void
x86topo_add_sched_group(struct topo_node *root, struct cpu_group *cg_root)
{
@ -657,6 +704,11 @@ x86topo_add_sched_group(struct topo_node *root, struct cpu_group *cg_root)
else
cg_root->cg_level = root->subtype;
/*
* Check how many core nodes we have under the given root node.
* If we have multiple logical processors, but not multiple
* cores, then those processors must be hardware threads.
*/
ncores = 0;
node = root;
while (node != NULL) {
@ -673,6 +725,13 @@ x86topo_add_sched_group(struct topo_node *root, struct cpu_group *cg_root)
root->cpu_count > 1 && ncores < 2)
cg_root->cg_flags = CG_FLAG_SMT;
/*
* Find out how many cache nodes we have under the given root node.
* We ignore cache nodes that cover all the same processors as the
* root node. Also, we do not descend below found cache nodes.
* That is, we count top-level "non-redundant" caches under the root
* node.
*/
nchildren = 0;
node = root;
while (node != NULL) {
@ -689,6 +748,10 @@ x86topo_add_sched_group(struct topo_node *root, struct cpu_group *cg_root)
cg_root->cg_child = smp_topo_alloc(nchildren);
cg_root->cg_children = nchildren;
/*
* Now find again the same cache nodes as above and recursively
* build scheduling topologies for them.
*/
node = root;
i = 0;
while (node != NULL) {
@ -705,6 +768,9 @@ x86topo_add_sched_group(struct topo_node *root, struct cpu_group *cg_root)
}
}
/*
* Build the MI scheduling topology from the discovered hardware topology.
*/
struct cpu_group *
cpu_topo(void)
{
@ -719,6 +785,9 @@ cpu_topo(void)
}
/*
* Add a logical CPU to the topology.
*/
void
cpu_add(u_int apic_id, char boot_cpu)
{

View File

@ -730,7 +730,7 @@ compile_tr(char *p, struct s_tr **py)
}
/*
* Compile the text following an a or i command.
* Compile the text following an a, c, or i command.
*/
static char *
compile_text(void)
@ -746,7 +746,6 @@ compile_text(void)
while (cu_fgets(lbuf, sizeof(lbuf), NULL)) {
op = s = text + size;
p = lbuf;
EATSPACE();
for (esc_nl = 0; *p != '\0'; p++) {
if (*p == '\\' && p[1] != '\0' && *++p == '\n')
esc_nl = 1;

View File

@ -427,7 +427,6 @@ ahci_port_stop(struct ahci_port *p)
struct ahci_ioreq *aior;
uint8_t *cfis;
int slot;
int ncq;
int error;
assert(pthread_mutex_isowned_np(&p->pr_sc->mtx));
@ -445,10 +444,7 @@ ahci_port_stop(struct ahci_port *p)
if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
cfis[2] == ATA_READ_FPDMA_QUEUED ||
cfis[2] == ATA_SEND_FPDMA_QUEUED)
ncq = 1;
if (ncq)
p->sact &= ~(1 << slot);
p->sact &= ~(1 << slot); /* NCQ */
else
p->ci &= ~(1 << slot);

View File

@ -152,7 +152,6 @@ struct cb_file {
static int
cb_open(void *arg, const char *filename, void **hp)
{
struct stat st;
struct cb_file *cf;
char path[PATH_MAX];
@ -169,7 +168,7 @@ cb_open(void *arg, const char *filename, void **hp)
return (errno);
}
cf->cf_size = st.st_size;
cf->cf_size = cf->cf_stat.st_size;
if (S_ISDIR(cf->cf_stat.st_mode)) {
cf->cf_isdir = 1;
cf->cf_u.dir = opendir(path);