Sponsored by:	The FreeBSD Foundation
This commit is contained in:
Glen Barber 2016-04-13 16:19:50 +00:00
commit 010855174a
22 changed files with 293 additions and 346 deletions

View File

@ -33,7 +33,7 @@ __FBSDID("$FreeBSD$");
#include <sys/fbio.h>
#include "vgl.h"
static VGLText *VGLTextFont = 0;
static VGLText *VGLTextFont;
extern byte VGLFont[];

View File

@ -46,6 +46,7 @@ SND_DECLARE_FILE("$FreeBSD$");
#define VCHIQ_AUDIO_PACKET_SIZE 4000
#define VCHIQ_AUDIO_BUFFER_SIZE 128000
#define VCHIQ_AUDIO_PREBUFFER 10 /* Number of pre-buffered audio messages */
#define VCHIQ_AUDIO_MAX_VOLUME
/* volume in terms of 0.01dB */
@ -91,6 +92,7 @@ struct bcm2835_audio_chinfo {
uint32_t free_buffer;
uint32_t buffered_ptr;
int playback_state;
int prebuffered;
};
struct bcm2835_audio_info {
@ -170,11 +172,10 @@ bcm2835_audio_callback(void *param, const VCHI_CALLBACK_REASON_T reason, void *m
ch->complete_pos = (ch->complete_pos + count) % sndbuf_getsize(ch->buffer);
ch->free_buffer += count;
chn_intr(sc->pch.channel);
if (perr || ch->free_buffer >= VCHIQ_AUDIO_PACKET_SIZE) {
chn_intr(ch->channel);
if (perr || ch->free_buffer >= VCHIQ_AUDIO_PACKET_SIZE)
cv_signal(&sc->data_cv);
}
} else
printf("%s: unknown m.type: %d\n", __func__, m.type);
}
@ -244,6 +245,7 @@ bcm2835_audio_reset_channel(struct bcm2835_audio_chinfo *ch)
ch->playback_state = 0;
ch->buffered_ptr = 0;
ch->complete_pos = 0;
ch->prebuffered = 0;
sndbuf_reset(ch->buffer);
}
@ -478,21 +480,29 @@ bcm2835_audio_worker(void *data)
if (sc->unloading)
break;
if ((ch->playback_state == PLAYBACK_PLAYING) &&
(vchiq_unbuffered_bytes(ch) >= VCHIQ_AUDIO_PACKET_SIZE)
&& (ch->free_buffer >= VCHIQ_AUDIO_PACKET_SIZE)) {
bcm2835_audio_write_samples(ch);
} else {
if (ch->playback_state == PLAYBACK_STOPPING) {
bcm2835_audio_reset_channel(&sc->pch);
ch->playback_state = PLAYBACK_IDLE;
}
if (ch->playback_state == PLAYBACK_IDLE) {
cv_wait_sig(&sc->data_cv, &sc->data_lock);
continue;
}
if (ch->playback_state == PLAYBACK_STARTING) {
/* Give it initial kick */
chn_intr(sc->pch.channel);
if (ch->playback_state == PLAYBACK_STOPPING) {
bcm2835_audio_reset_channel(&sc->pch);
ch->playback_state = PLAYBACK_IDLE;
continue;
}
if (ch->free_buffer < vchiq_unbuffered_bytes(ch)) {
cv_timedwait_sig(&sc->data_cv, &sc->data_lock, 10);
continue;
}
bcm2835_audio_write_samples(ch);
if (ch->playback_state == PLAYBACK_STARTING) {
ch->prebuffered++;
if (ch->prebuffered == VCHIQ_AUDIO_PREBUFFER) {
bcm2835_audio_start(ch);
ch->playback_state = PLAYBACK_PLAYING;
}
}
@ -514,7 +524,7 @@ bcm2835_audio_create_worker(struct bcm2835_audio_info *sc)
}
/* -------------------------------------------------------------------- */
/* channel interface for ESS18xx */
/* channel interface for VCHI audio */
static void *
bcmchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir)
{
@ -612,7 +622,6 @@ bcmchan_trigger(kobj_t obj, void *data, int go)
switch (go) {
case PCMTRIG_START:
bcm2835_audio_start(ch);
ch->playback_state = PLAYBACK_STARTING;
/* wakeup worker thread */
cv_signal(&sc->data_cv);
@ -620,7 +629,7 @@ bcmchan_trigger(kobj_t obj, void *data, int go)
case PCMTRIG_STOP:
case PCMTRIG_ABORT:
ch->playback_state = 1;
ch->playback_state = PLAYBACK_STOPPING;
bcm2835_audio_stop(ch);
break;

View File

@ -37,6 +37,7 @@
#include <machine/vmparam.h>
#define VIRT_BITS 48
#define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
.globl kernbase
.set kernbase, KERNBASE
@ -322,10 +323,12 @@ virt_map:
* TODO: This is out of date.
* There are at least 5 pages before that address for the page tables
* The pages used are:
* - The identity (PA = VA) table (TTBR0)
* - The Kernel L1 table (TTBR1)(not yet)
* - The PA != VA L2 table to jump into (not yet)
* - The FDT L2 table (not yet)
* - The Kernel L2 table
* - The Kernel L1 table
* - The Kernel L0 table (TTBR1)
* - The identity (PA = VA) L1 table
* - The identity (PA = VA) L0 table (TTBR0)
* - The DMAP L1 tables
*/
create_pagetables:
/* Save the Link register */
@ -381,6 +384,12 @@ create_pagetables:
mov x10, #1
bl link_l0_pagetable
/* Link the DMAP tables */
ldr x8, =DMAP_MIN_ADDRESS
adr x9, pagetable_dmap;
mov x10, #DMAP_TABLES
bl link_l0_pagetable
/*
* Build the TTBR0 maps.
*/
@ -644,6 +653,10 @@ pagetable_l1_ttbr0:
.space PAGE_SIZE
pagetable_l0_ttbr0:
.space PAGE_SIZE
.globl pagetable_dmap
pagetable_dmap:
.space PAGE_SIZE * DMAP_TABLES
pagetable_end:
el2_pagetable:

View File

@ -222,6 +222,13 @@ static struct rwlock_padalign pvh_global_lock;
vm_paddr_t dmap_phys_base; /* The start of the dmap region */
/* This code assumes all L1 DMAP entries will be used */
CTASSERT((DMAP_MIN_ADDRESS & ~L0_OFFSET) == DMAP_MIN_ADDRESS);
CTASSERT((DMAP_MAX_ADDRESS & ~L0_OFFSET) == DMAP_MAX_ADDRESS);
#define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
extern pt_entry_t pagetable_dmap[];
/*
* Data for the pv entry allocation mechanism
*/
@ -543,28 +550,25 @@ pmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va)
}
static void
pmap_bootstrap_dmap(vm_offset_t l1pt, vm_paddr_t kernstart)
pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t kernstart)
{
vm_offset_t va;
vm_paddr_t pa;
pd_entry_t *l1;
u_int l1_slot;
pa = dmap_phys_base = kernstart & ~L1_OFFSET;
va = DMAP_MIN_ADDRESS;
l1 = (pd_entry_t *)l1pt;
l1_slot = pmap_l1_index(DMAP_MIN_ADDRESS);
for (; va < DMAP_MAX_ADDRESS;
pa += L1_SIZE, va += L1_SIZE, l1_slot++) {
KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
pmap_load_store(&l1[l1_slot],
pmap_load_store(&pagetable_dmap[l1_slot],
(pa & ~L1_OFFSET) | ATTR_DEFAULT |
ATTR_IDX(CACHED_MEMORY) | L1_BLOCK);
}
cpu_dcache_wb_range((vm_offset_t)l1, PAGE_SIZE);
cpu_dcache_wb_range((vm_offset_t)pagetable_dmap,
PAGE_SIZE * DMAP_TABLES);
cpu_tlb_flushID();
}

View File

@ -125,16 +125,22 @@
* split into 2 regions at each end of the 64 bit address space, with an
* out of range "hole" in the middle.
*
* We limit the size of the two spaces to 39 bits each.
* We use the full 48 bits for each region, however the kernel may only use
* a limited range within this space.
*
* Upper region: 0xffffffffffffffff
* 0xffffff8000000000
* Upper region: 0xffffffffffffffff Top of virtual memory
*
* Hole: 0xffffff7fffffffff
* 0x0000008000000000
* 0xfffffeffffffffff End of DMAP
* 0xfffffd0000000000 Start of DMAP
*
* Lower region: 0x0000007fffffffff
* 0x0000000000000000
* 0xffff007fffffffff End of KVA
* 0xffff000000000000 Kernel base address & start of KVA
*
* Hole: 0xfffeffffffffffff
* 0x0001000000000000
*
* Lower region: 0x0000ffffffffffff End of user address space
* 0x0000000000000000 Start of user address space
*
* We use the upper region for the kernel, and the lower region for userland.
*
@ -152,23 +158,23 @@
#define VM_MIN_ADDRESS (0x0000000000000000UL)
#define VM_MAX_ADDRESS (0xffffffffffffffffUL)
/* 32 GiB of kernel addresses */
#define VM_MIN_KERNEL_ADDRESS (0xffffff8000000000UL)
#define VM_MAX_KERNEL_ADDRESS (0xffffff8800000000UL)
/* 512 GiB of kernel addresses */
#define VM_MIN_KERNEL_ADDRESS (0xffff000000000000UL)
#define VM_MAX_KERNEL_ADDRESS (0xffff008000000000UL)
/* Direct Map for 128 GiB of PA: 0x0 - 0x1fffffffff */
#define DMAP_MIN_ADDRESS (0xffffffc000000000UL)
#define DMAP_MAX_ADDRESS (0xffffffdfffffffffUL)
/* 2TiB for the direct map region */
#define DMAP_MIN_ADDRESS (0xfffffd0000000000UL)
#define DMAP_MAX_ADDRESS (0xffffff0000000000UL)
#define DMAP_MIN_PHYSADDR (dmap_phys_base)
#define DMAP_MAX_PHYSADDR (dmap_phys_base + (DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS))
/* True if pa is in the dmap range */
#define PHYS_IN_DMAP(pa) ((pa) >= DMAP_MIN_PHYSADDR && \
(pa) <= DMAP_MAX_PHYSADDR)
(pa) < DMAP_MAX_PHYSADDR)
/* True if va is in the dmap range */
#define VIRT_IN_DMAP(va) ((va) >= DMAP_MIN_ADDRESS && \
(va) <= DMAP_MAX_ADDRESS)
(va) < DMAP_MAX_ADDRESS)
#define PHYS_TO_DMAP(pa) \
({ \

View File

@ -3463,15 +3463,33 @@ scsi_error_action(struct ccb_scsiio *csio, struct scsi_inquiry_data *inq_data,
char *
scsi_cdb_string(u_int8_t *cdb_ptr, char *cdb_string, size_t len)
{
struct sbuf sb;
int error;
if (len == 0)
return ("");
sbuf_new(&sb, cdb_string, len, SBUF_FIXEDLEN);
scsi_cdb_sbuf(cdb_ptr, &sb);
/* ENOMEM just means that the fixed buffer is full, OK to ignore */
error = sbuf_finish(&sb);
if (error != 0 && error != ENOMEM)
return ("");
return(sbuf_data(&sb));
}
void
scsi_cdb_sbuf(u_int8_t *cdb_ptr, struct sbuf *sb)
{
u_int8_t cdb_len;
int i;
if (cdb_ptr == NULL)
return("");
/* Silence warnings */
cdb_len = 0;
return;
/*
* This is taken from the SCSI-3 draft spec.
@ -3508,12 +3526,11 @@ scsi_cdb_string(u_int8_t *cdb_ptr, char *cdb_string, size_t len)
cdb_len = 12;
break;
}
*cdb_string = '\0';
for (i = 0; i < cdb_len; i++)
snprintf(cdb_string + strlen(cdb_string),
len - strlen(cdb_string), "%02hhx ", cdb_ptr[i]);
return(cdb_string);
for (i = 0; i < cdb_len; i++)
sbuf_printf(sb, "%02hhx ", cdb_ptr[i]);
return;
}
const char *
@ -3562,7 +3579,6 @@ scsi_command_string(struct cam_device *device, struct ccb_scsiio *csio,
#endif /* _KERNEL/!_KERNEL */
{
struct scsi_inquiry_data *inq_data;
char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
#ifdef _KERNEL
struct ccb_getdev *cgd;
#endif /* _KERNEL */
@ -3595,15 +3611,13 @@ scsi_command_string(struct cam_device *device, struct ccb_scsiio *csio,
#endif /* _KERNEL/!_KERNEL */
if ((csio->ccb_h.flags & CAM_CDB_POINTER) != 0) {
sbuf_printf(sb, "%s. CDB: %s",
scsi_op_desc(csio->cdb_io.cdb_ptr[0], inq_data),
scsi_cdb_string(csio->cdb_io.cdb_ptr, cdb_str,
sizeof(cdb_str)));
sbuf_printf(sb, "%s. CDB: ",
scsi_op_desc(csio->cdb_io.cdb_ptr[0], inq_data));
scsi_cdb_sbuf(csio->cdb_io.cdb_ptr, sb);
} else {
sbuf_printf(sb, "%s. CDB: %s",
scsi_op_desc(csio->cdb_io.cdb_bytes[0], inq_data),
scsi_cdb_string(csio->cdb_io.cdb_bytes, cdb_str,
sizeof(cdb_str)));
sbuf_printf(sb, "%s. CDB: ",
scsi_op_desc(csio->cdb_io.cdb_bytes[0], inq_data));
scsi_cdb_sbuf(csio->cdb_io.cdb_bytes, sb);
}
#ifdef _KERNEL

View File

@ -3646,6 +3646,7 @@ const char * scsi_op_desc(u_int16_t opcode,
struct scsi_inquiry_data *inq_data);
char * scsi_cdb_string(u_int8_t *cdb_ptr, char *cdb_string,
size_t len);
void scsi_cdb_sbuf(u_int8_t *cdb_ptr, struct sbuf *sb);
void scsi_print_inquiry(struct scsi_inquiry_data *inq_data);
void scsi_print_inquiry_short(struct scsi_inquiry_data *inq_data);

View File

@ -423,6 +423,8 @@ ahci_pci_attach(device_t dev)
pci_get_subvendor(dev) == 0x1043 &&
pci_get_subdevice(dev) == 0x81e4)
ctlr->quirks |= AHCI_Q_SATA1_UNIT0;
resource_int_value(device_get_name(dev), device_get_unit(dev),
"quirks", &ctlr->quirks);
ctlr->vendorid = pci_get_vendor(dev);
ctlr->deviceid = pci_get_device(dev);
ctlr->subvendorid = pci_get_subvendor(dev);

View File

@ -672,6 +672,7 @@ static void bxe_handle_fp_tq(void *context, int pending);
static int bxe_add_cdev(struct bxe_softc *sc);
static void bxe_del_cdev(struct bxe_softc *sc);
static int bxe_grc_dump(struct bxe_softc *sc);
static int bxe_alloc_buf_rings(struct bxe_softc *sc);
static void bxe_free_buf_rings(struct bxe_softc *sc);
@ -3448,10 +3449,6 @@ bxe_watchdog(struct bxe_softc *sc,
}
BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
if(sc->trigger_grcdump) {
/* taking grcdump */
bxe_grc_dump(sc);
}
BXE_FP_TX_UNLOCK(fp);
@ -15639,6 +15636,30 @@ bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
return (error);
}
static int
bxe_sysctl_trigger_grcdump(SYSCTL_HANDLER_ARGS)
{
struct bxe_softc *sc;
int error, result;
result = 0;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || !req->newptr) {
return (error);
}
if (result == 1) {
sc = (struct bxe_softc *)arg1;
BLOGI(sc, "... grcdump start ...\n");
bxe_grc_dump(sc);
BLOGI(sc, "... grcdump done ...\n");
}
return (error);
}
static int
bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
{
@ -15790,16 +15811,14 @@ bxe_add_sysctls(struct bxe_softc *sc)
"debug logging mode");
#endif /* #if __FreeBSD_version >= 900000 */
sc->trigger_grcdump = 0;
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump",
CTLFLAG_RW, &sc->trigger_grcdump, 0,
"trigger grcdump should be invoked"
" before collecting grcdump");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "trigger_grcdump",
CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
bxe_sysctl_trigger_grcdump, "IU",
"set by driver when a grcdump is needed");
sc->grcdump_started = 0;
sc->grcdump_done = 0;
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
CTLFLAG_RD, &sc->grcdump_done, 0,
CTLFLAG_RW, &sc->grcdump_done, 0,
"set by driver when grcdump is done");
sc->rx_budget = bxe_rx_budget;
@ -18631,7 +18650,7 @@ bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
return 0;
}
int
static int
bxe_grc_dump(struct bxe_softc *sc)
{
int rval = 0;
@ -18639,53 +18658,12 @@ bxe_grc_dump(struct bxe_softc *sc)
uint8_t *buf;
uint32_t size;
struct dump_header *d_hdr;
uint32_t i;
uint32_t reg_val;
uint32_t reg_addr;
uint32_t cmd_offset;
int context_size;
int allocated;
struct ecore_ilt *ilt = SC_ILT(sc);
struct bxe_fastpath *fp;
struct ilt_client_info *ilt_cli;
int grc_dump_size;
if (sc->grcdump_done || sc->grcdump_started)
if (sc->grcdump_done)
return (rval);
sc->grcdump_started = 1;
BLOGI(sc, "Started collecting grcdump\n");
grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
sizeof(struct dump_header);
sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
if (sc->grc_dump == NULL) {
BLOGW(sc, "Unable to allocate memory for grcdump collection\n");
return(ENOMEM);
}
/* Disable parity attentions as long as following dump may
* cause false alarms by reading never written registers. We
* will re-enable parity attentions right after the dump.
*/
/* Disable parity on path 0 */
bxe_pretend_func(sc, 0);
ecore_disable_blocks_parity(sc);
/* Disable parity on path 1 */
bxe_pretend_func(sc, 1);
ecore_disable_blocks_parity(sc);
/* Return to current function */
bxe_pretend_func(sc, SC_ABS_FUNC(sc));
buf = sc->grc_dump;
d_hdr = sc->grc_dump;
@ -18717,7 +18695,7 @@ bxe_grc_dump(struct bxe_softc *sc)
(preset_idx == 11))
continue;
rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx);
rval = bxe_get_preset_regs(sc, sc->grc_dump, preset_idx);
if (rval)
break;
@ -18727,78 +18705,9 @@ bxe_grc_dump(struct bxe_softc *sc)
buf += size;
}
bxe_pretend_func(sc, 0);
ecore_clear_blocks_parity(sc);
ecore_enable_blocks_parity(sc);
bxe_pretend_func(sc, 1);
ecore_clear_blocks_parity(sc);
ecore_enable_blocks_parity(sc);
/* Return to current function */
bxe_pretend_func(sc, SC_ABS_FUNC(sc));
context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
for (i = 0, allocated = 0; allocated < context_size; i++) {
BLOGI(sc, "cdu_context i %d paddr %#jx vaddr %p size 0x%zx\n", i,
sc->context[i].vcxt_dma.paddr, sc->context[i].vcxt_dma.vaddr,
sc->context[i].size);
allocated += sc->context[i].size;
}
BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n",
(uintmax_t)sc->fw_stats_req_mapping,
(uintmax_t)sc->fw_stats_data_mapping,
sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size));
BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%lx\n",
(void *)sc->def_sb_dma.paddr, sc->def_sb,
sizeof(struct host_sp_status_block));
BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n",
sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE);
BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%lx\n",
sc->sp_dma.paddr, sc->sp_dma.vaddr, sizeof(struct bxe_slowpath));
BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n",
sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE);
BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n",
sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr, FW_BUF_SIZE);
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%lx\n", i,
fp->sb_dma.paddr, fp->sb_dma.vaddr,
sizeof(union bxe_host_hc_status_block));
BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
fp->tx_dma.paddr, fp->tx_dma.vaddr,
(BCM_PAGE_SIZE * TX_BD_NUM_PAGES));
BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
fp->rx_dma.paddr, fp->rx_dma.vaddr,
(BCM_PAGE_SIZE * RX_BD_NUM_PAGES));
BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%lx\n", i,
fp->rcq_dma.paddr, fp->rcq_dma.vaddr,
(BCM_PAGE_SIZE * RCQ_NUM_PAGES));
BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr,
(BCM_PAGE_SIZE * RX_SGE_NUM_PAGES));
}
ilt_cli = &ilt->clients[1];
for (i = ilt_cli->start; i <= ilt_cli->end; i++) {
BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n",
((struct bxe_dma *)((&ilt->lines[i])->page))->paddr,
((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE);
}
cmd_offset = DMAE_REG_CMD_MEM;
for (i = 0; i < 224; i++) {
reg_addr = (cmd_offset +(i * 4));
reg_val = REG_RD(sc, reg_addr);
BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i,
reg_addr, reg_val);
}
BLOGI(sc, "Collection of grcdump done\n");
sc->grcdump_done = 1;
return(rval);
}
@ -18806,10 +18715,21 @@ bxe_grc_dump(struct bxe_softc *sc)
static int
bxe_add_cdev(struct bxe_softc *sc)
{
int grc_dump_size;
grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
sizeof(struct dump_header);
sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
if (sc->grc_dump == NULL)
return (-1);
sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT);
if (sc->eeprom == NULL) {
BLOGW(sc, "Unable to alloc for eeprom size buffer\n");
free(sc->grc_dump, M_DEVBUF); sc->grc_dump = NULL;
return (-1);
}
@ -18822,8 +18742,11 @@ bxe_add_cdev(struct bxe_softc *sc)
if_name(sc->ifp));
if (sc->ioctl_dev == NULL) {
free(sc->grc_dump, M_DEVBUF);
free(sc->eeprom, M_DEVBUF);
sc->eeprom = NULL;
return (-1);
}
@ -18838,11 +18761,13 @@ bxe_del_cdev(struct bxe_softc *sc)
if (sc->ioctl_dev != NULL)
destroy_dev(sc->ioctl_dev);
if (sc->grc_dump != NULL)
free(sc->grc_dump, M_DEVBUF);
if (sc->eeprom != NULL) {
free(sc->eeprom, M_DEVBUF);
sc->eeprom = NULL;
}
sc->ioctl_dev = NULL;
return;
}
@ -19020,26 +18945,15 @@ bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
sizeof(struct dump_header);
if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) ||
(dump->grcdump_size < grc_dump_size)) {
if ((sc->grc_dump == NULL) || (dump->grcdump == NULL) ||
(dump->grcdump_size < grc_dump_size) || (!sc->grcdump_done)) {
rval = EINVAL;
break;
}
if((sc->trigger_grcdump) && (!sc->grcdump_done) &&
(!sc->grcdump_started)) {
rval = bxe_grc_dump(sc);
}
if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) &&
(sc->grc_dump != NULL)) {
dump->grcdump_dwords = grc_dump_size >> 2;
rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
free(sc->grc_dump, M_DEVBUF);
sc->grc_dump = NULL;
sc->grcdump_started = 0;
sc->grcdump_done = 0;
}
dump->grcdump_dwords = grc_dump_size >> 2;
rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
sc->grcdump_done = 0;
break;
@ -19059,7 +18973,6 @@ bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d",
sc->pcie_bus, sc->pcie_device, sc->pcie_func);
break;
case BXE_DEV_SETTING:
dev_p = (bxe_dev_setting_t *)data;
bxe_get_settings(sc, &dev_set);
@ -19078,20 +18991,20 @@ bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
reg_p = (bxe_get_regs_t *)data;
grc_dump_size = reg_p->reg_buf_len;
if((!sc->grcdump_done) && (!sc->grcdump_started)) {
if (sc->grc_dump == NULL) {
rval = EINVAL;
break;
}
if(!sc->grcdump_done) {
bxe_grc_dump(sc);
}
if((sc->grcdump_done) && (sc->grcdump_started) &&
(sc->grc_dump != NULL)) {
if(sc->grcdump_done) {
rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size);
free(sc->grc_dump, M_DEVBUF);
sc->grc_dump = NULL;
sc->grcdump_started = 0;
sc->grcdump_done = 0;
}
break;
case BXE_RDW_REG:
reg_rdw_p = (bxe_reg_rdw_t *)data;
if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) &&

View File

@ -1786,12 +1786,8 @@ struct bxe_softc {
int panic;
struct cdev *ioctl_dev;
void *grc_dump;
unsigned int trigger_grcdump;
unsigned int grcdump_done;
unsigned int grcdump_started;
int grcdump_done;
void *eeprom;
}; /* struct bxe_softc */
@ -2297,7 +2293,7 @@ void bxe_dump_mem(struct bxe_softc *sc, char *tag,
uint8_t *mem, uint32_t len);
void bxe_dump_mbuf_data(struct bxe_softc *sc, char *pTag,
struct mbuf *m, uint8_t contents);
extern int bxe_grc_dump(struct bxe_softc *sc);
#if __FreeBSD_version >= 800000
#if __FreeBSD_version >= 1000000

View File

@ -234,10 +234,6 @@ bxe_stats_comp(struct bxe_softc *sc)
while (*stats_comp != DMAE_COMP_VAL) {
if (!cnt) {
BLOGE(sc, "Timeout waiting for stats finished\n");
if(sc->trigger_grcdump) {
/* taking grcdump */
bxe_grc_dump(sc);
}
break;
}
@ -1314,12 +1310,8 @@ bxe_stats_update(struct bxe_softc *sc)
if (bxe_storm_stats_update(sc)) {
if (sc->stats_pending++ == 3) {
if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
if(sc->trigger_grcdump) {
/* taking grcdump */
bxe_grc_dump(sc);
}
atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
}
}
return;

View File

@ -418,9 +418,6 @@ netvsc_attach(device_t dev)
#endif
sc = device_get_softc(dev);
if (sc == NULL) {
return (ENOMEM);
}
bzero(sc, sizeof(hn_softc_t));
sc->hn_unit = unit;
@ -1169,10 +1166,6 @@ netvsc_linkstatus_callback(struct hv_device *device_obj, uint32_t status)
{
hn_softc_t *sc = device_get_softc(device_obj->device);
if (sc == NULL) {
return;
}
if (status == 1) {
sc->hn_carrier = 1;
} else {

View File

@ -293,9 +293,6 @@ get_stor_device(struct hv_device *device,
struct storvsc_softc *sc;
sc = device_get_softc(device->device);
if (sc == NULL) {
return NULL;
}
if (outbound) {
/*
@ -976,10 +973,6 @@ storvsc_attach(device_t dev)
root_mount_token = root_mount_hold("storvsc");
sc = device_get_softc(dev);
if (sc == NULL) {
ret = ENOMEM;
goto cleanup;
}
stor_type = storvsc_get_storage_type(dev);

View File

@ -305,14 +305,18 @@ hv_vmbus_on_events(int cpu)
KASSERT(cpu <= mp_maxid, ("VMBUS: hv_vmbus_on_events: "
"cpu out of range!"));
page_addr = hv_vmbus_g_context.syn_ic_event_page[cpu];
event = (hv_vmbus_synic_event_flags *)
page_addr + HV_VMBUS_MESSAGE_SINT;
if ((hv_vmbus_protocal_version == HV_VMBUS_VERSION_WS2008) ||
(hv_vmbus_protocal_version == HV_VMBUS_VERSION_WIN7)) {
maxdword = HV_MAX_NUM_CHANNELS_SUPPORTED >> 5;
/*
* receive size is 1/2 page and divide that by 4 bytes
*/
recv_interrupt_page =
hv_vmbus_g_connection.recv_interrupt_page;
if (synch_test_and_clear_bit(0, &event->flags32[0]))
recv_interrupt_page =
hv_vmbus_g_connection.recv_interrupt_page;
} else {
/*
* On Host with Win8 or above, the event page can be
@ -320,9 +324,6 @@ hv_vmbus_on_events(int cpu)
* that has the pending interrupt.
*/
maxdword = HV_EVENT_FLAGS_DWORD_COUNT;
page_addr = hv_vmbus_g_context.syn_ic_event_page[cpu];
event = (hv_vmbus_synic_event_flags *)
page_addr + HV_VMBUS_MESSAGE_SINT;
recv_interrupt_page = event->flags32;
}

View File

@ -145,7 +145,6 @@ hv_vmbus_isr(struct trapframe *frame)
{
int cpu;
hv_vmbus_message* msg;
hv_vmbus_synic_event_flags* event;
void* page_addr;
cpu = PCPU_GET(cpuid);
@ -156,26 +155,7 @@ hv_vmbus_isr(struct trapframe *frame)
* in Windows when running as a guest in Hyper-V
*/
page_addr = hv_vmbus_g_context.syn_ic_event_page[cpu];
event = (hv_vmbus_synic_event_flags*)
page_addr + HV_VMBUS_MESSAGE_SINT;
if ((hv_vmbus_protocal_version == HV_VMBUS_VERSION_WS2008) ||
(hv_vmbus_protocal_version == HV_VMBUS_VERSION_WIN7)) {
/* Since we are a child, we only need to check bit 0 */
if (synch_test_and_clear_bit(0, &event->flags32[0])) {
hv_vmbus_on_events(cpu);
}
} else {
/*
* On host with Win8 or above, we can directly look at
* the event page. If bit n is set, we have an interrupt
* on the channel with id n.
* Directly schedule the event software interrupt on
* current cpu.
*/
hv_vmbus_on_events(cpu);
}
hv_vmbus_on_events(cpu);
/* Check if there are actual msgs to be process */
page_addr = hv_vmbus_g_context.syn_ic_msg_page[cpu];

View File

@ -1822,22 +1822,24 @@ isp_fibre_init(ispsoftc_t *isp)
* Prefer or force Point-To-Point instead Loop?
*/
switch (isp->isp_confopts & ISP_CFG_PORT_PREF) {
case ISP_CFG_NPORT:
case ISP_CFG_LPORT_ONLY:
icbp->icb_xfwoptions &= ~ICBXOPT_TOPO_MASK;
icbp->icb_xfwoptions |= ICBXOPT_PTP_2_LOOP;
icbp->icb_xfwoptions |= ICBXOPT_LOOP_ONLY;
break;
case ISP_CFG_NPORT_ONLY:
icbp->icb_xfwoptions &= ~ICBXOPT_TOPO_MASK;
icbp->icb_xfwoptions |= ICBXOPT_PTP_ONLY;
break;
case ISP_CFG_LPORT_ONLY:
case ISP_CFG_LPORT:
icbp->icb_xfwoptions &= ~ICBXOPT_TOPO_MASK;
icbp->icb_xfwoptions |= ICBXOPT_LOOP_ONLY;
icbp->icb_xfwoptions |= ICBXOPT_LOOP_2_PTP;
break;
case ISP_CFG_NPORT:
icbp->icb_xfwoptions &= ~ICBXOPT_TOPO_MASK;
icbp->icb_xfwoptions |= ICBXOPT_PTP_2_LOOP;
break;
default:
/*
* Let NVRAM settings define it if they are sane
*/
/* Let NVRAM settings define it if they are sane */
switch (icbp->icb_xfwoptions & ICBXOPT_TOPO_MASK) {
case ICBXOPT_PTP_2_LOOP:
case ICBXOPT_PTP_ONLY:
@ -2109,19 +2111,32 @@ isp_fibre_init_2400(ispsoftc_t *isp)
}
switch (isp->isp_confopts & ISP_CFG_PORT_PREF) {
case ISP_CFG_NPORT_ONLY:
icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK;
icbp->icb_fwoptions2 |= ICB2400_OPT2_PTP_ONLY;
break;
case ISP_CFG_LPORT_ONLY:
icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK;
icbp->icb_fwoptions2 |= ICB2400_OPT2_LOOP_ONLY;
break;
default:
case ISP_CFG_NPORT_ONLY:
icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK;
icbp->icb_fwoptions2 |= ICB2400_OPT2_PTP_ONLY;
break;
case ISP_CFG_NPORT:
/* ISP_CFG_PTP_2_LOOP not available in 24XX/25XX */
case ISP_CFG_LPORT:
icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK;
icbp->icb_fwoptions2 |= ICB2400_OPT2_LOOP_2_PTP;
break;
default:
/* Let NVRAM settings define it if they are sane */
switch (icbp->icb_fwoptions2 & ICB2400_OPT2_TOPO_MASK) {
case ICB2400_OPT2_LOOP_ONLY:
case ICB2400_OPT2_PTP_ONLY:
case ICB2400_OPT2_LOOP_2_PTP:
break;
default:
icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK;
icbp->icb_fwoptions2 |= ICB2400_OPT2_LOOP_2_PTP;
}
break;
}
switch (icbp->icb_fwoptions2 & ICB2400_OPT2_TIMER_MASK) {
@ -2615,6 +2630,7 @@ isp_plogx(ispsoftc_t *isp, int chan, uint16_t handle, uint32_t portid, int flags
isp_put_plogx(isp, &pl, (isp_plogx_t *)reqp);
if (isp->isp_dblev & ISP_LOGDEBUG1)
isp_print_bytes(isp, "IOCB LOGX", QENTRY_LEN, reqp);
FCPARAM(isp, chan)->isp_login_hdl = handle;
ISP_SYNC_REQUEST(isp);
if (msleep(resp, &isp->isp_lock, 0, "PLOGX", 3 * ICB_LOGIN_TOV * hz)
== EWOULDBLOCK) {
@ -2623,6 +2639,7 @@ isp_plogx(ispsoftc_t *isp, int chan, uint16_t handle, uint32_t portid, int flags
isp_destroy_handle(isp, pl.plogx_handle);
return (-1);
}
FCPARAM(isp, chan)->isp_login_hdl = NIL_HANDLE;
if (isp->isp_dblev & ISP_LOGDEBUG1)
isp_print_bytes(isp, "IOCB LOGX response", QENTRY_LEN, resp);
isp_get_plogx(isp, (isp_plogx_t *)resp, &pl);
@ -5985,9 +6002,13 @@ isp_parse_async_fc(ispsoftc_t *isp, uint16_t mbox)
fcp = FCPARAM(isp, chan);
if (fcp->role == ISP_ROLE_NONE)
continue;
if (fcp->isp_loopstate > LOOP_LTEST_DONE)
if (fcp->isp_loopstate > LOOP_LTEST_DONE) {
if (nphdl != NIL_HANDLE &&
nphdl == fcp->isp_login_hdl &&
reason == PDB24XX_AE_OPN_2)
continue;
fcp->isp_loopstate = LOOP_LTEST_DONE;
else if (fcp->isp_loopstate < LOOP_HAVE_LINK)
} else if (fcp->isp_loopstate < LOOP_HAVE_LINK)
fcp->isp_loopstate = LOOP_HAVE_LINK;
isp_async(isp, ISPASYNC_CHANGE_NOTIFY, chan,
ISPASYNC_CHANGE_PDB, nphdl, nlstate, reason);
@ -7803,27 +7824,28 @@ isp_setdfltfcparm(ispsoftc_t *isp, int chan)
fcp->isp_xfwoptions = 0;
fcp->isp_zfwoptions = 0;
fcp->isp_lasthdl = NIL_HANDLE;
fcp->isp_login_hdl = NIL_HANDLE;
if (IS_24XX(isp)) {
fcp->isp_fwoptions |= ICB2400_OPT1_FAIRNESS;
fcp->isp_fwoptions |= ICB2400_OPT1_HARD_ADDRESS;
if (isp->isp_confopts & ISP_CFG_FULL_DUPLEX) {
if (isp->isp_confopts & ISP_CFG_FULL_DUPLEX)
fcp->isp_fwoptions |= ICB2400_OPT1_FULL_DUPLEX;
}
fcp->isp_fwoptions |= ICB2400_OPT1_BOTH_WWNS;
fcp->isp_xfwoptions |= ICB2400_OPT2_LOOP_2_PTP;
fcp->isp_zfwoptions |= ICB2400_OPT3_RATE_AUTO;
} else {
fcp->isp_fwoptions |= ICBOPT_FAIRNESS;
fcp->isp_fwoptions |= ICBOPT_PDBCHANGE_AE;
fcp->isp_fwoptions |= ICBOPT_HARD_ADDRESS;
if (isp->isp_confopts & ISP_CFG_FULL_DUPLEX) {
if (isp->isp_confopts & ISP_CFG_FULL_DUPLEX)
fcp->isp_fwoptions |= ICBOPT_FULL_DUPLEX;
}
/*
* Make sure this is turned off now until we get
* extended options from NVRAM
*/
fcp->isp_fwoptions &= ~ICBOPT_EXTENDED;
fcp->isp_xfwoptions |= ICBXOPT_LOOP_2_PTP;
fcp->isp_zfwoptions |= ICBZOPT_RATE_AUTO;
}

View File

@ -449,6 +449,7 @@ typedef struct {
uint16_t isp_lasthdl; /* only valid for channel 0 */
uint16_t isp_maxalloc;
uint16_t isp_fabric_params;
uint16_t isp_login_hdl; /* Logging in handle */
uint8_t isp_retry_delay;
uint8_t isp_retry_count;
@ -645,11 +646,12 @@ struct ispsoftc {
* ISP Runtime Configuration Options
*/
#define ISP_CFG_FULL_DUPLEX 0x01 /* Full Duplex (Fibre Channel only) */
#define ISP_CFG_PORT_PREF 0x0c /* Mask for Port Prefs (all FC except 2100) */
#define ISP_CFG_LPORT 0x00 /* prefer {N/F}L-Port connection */
#define ISP_CFG_NPORT 0x04 /* prefer {N/F}-Port connection */
#define ISP_CFG_NPORT_ONLY 0x08 /* insist on {N/F}-Port connection */
#define ISP_CFG_LPORT_ONLY 0x0c /* insist on {N/F}L-Port connection */
#define ISP_CFG_PORT_PREF 0x0e /* Mask for Port Prefs (all FC except 2100) */
#define ISP_CFG_PORT_DEF 0x00 /* prefer connection type from NVRAM */
#define ISP_CFG_LPORT_ONLY 0x02 /* insist on {N/F}L-Port connection */
#define ISP_CFG_NPORT_ONLY 0x04 /* insist on {N/F}-Port connection */
#define ISP_CFG_LPORT 0x06 /* prefer {N/F}L-Port connection */
#define ISP_CFG_NPORT 0x08 /* prefer {N/F}-Port connection */
#define ISP_CFG_1GB 0x10 /* force 1GB connection (23XX only) */
#define ISP_CFG_2GB 0x20 /* force 2GB connection (23XX only) */
#define ISP_CFG_NORELOAD 0x80 /* don't download f/w */

View File

@ -360,10 +360,10 @@ static void urtwn_update_aifs(struct urtwn_softc *, uint8_t);
static void urtwn_set_promisc(struct urtwn_softc *);
static void urtwn_update_promisc(struct ieee80211com *);
static void urtwn_update_mcast(struct ieee80211com *);
static struct ieee80211_node *urtwn_r88e_node_alloc(struct ieee80211vap *,
static struct ieee80211_node *urtwn_node_alloc(struct ieee80211vap *,
const uint8_t mac[IEEE80211_ADDR_LEN]);
static void urtwn_r88e_newassoc(struct ieee80211_node *, int);
static void urtwn_r88e_node_free(struct ieee80211_node *);
static void urtwn_newassoc(struct ieee80211_node *, int);
static void urtwn_node_free(struct ieee80211_node *);
static void urtwn_set_chan(struct urtwn_softc *,
struct ieee80211_channel *,
struct ieee80211_channel *);
@ -628,10 +628,10 @@ urtwn_attach(device_t self)
ic->ic_update_promisc = urtwn_update_promisc;
ic->ic_update_mcast = urtwn_update_mcast;
if (sc->chip & URTWN_CHIP_88E) {
ic->ic_node_alloc = urtwn_r88e_node_alloc;
ic->ic_newassoc = urtwn_r88e_newassoc;
ic->ic_node_alloc = urtwn_node_alloc;
ic->ic_newassoc = urtwn_newassoc;
sc->sc_node_free = ic->ic_node_free;
ic->ic_node_free = urtwn_r88e_node_free;
ic->ic_node_free = urtwn_node_free;
}
ic->ic_update_chw = urtwn_update_chw;
ic->ic_ampdu_enable = urtwn_ampdu_enable;
@ -1025,7 +1025,7 @@ urtwn_rx_frame(struct urtwn_softc *sc, struct mbuf *m, int8_t *rssi_p)
struct r92c_rx_stat *stat;
uint32_t rxdw0, rxdw3;
uint8_t rate, cipher;
int8_t rssi = URTWN_NOISE_FLOOR + 1;
int8_t rssi = -127;
int infosz;
stat = mtod(m, struct r92c_rx_stat *);
@ -1042,6 +1042,7 @@ urtwn_rx_frame(struct urtwn_softc *sc, struct mbuf *m, int8_t *rssi_p)
rssi = urtwn_r88e_get_rssi(sc, rate, &stat[1]);
else
rssi = urtwn_get_rssi(sc, rate, &stat[1]);
URTWN_DPRINTF(sc, URTWN_DEBUG_RSSI, "%s: rssi=%d\n", __func__, rssi);
/* Update our average RSSI. */
urtwn_update_avgrssi(sc, rate, rssi);
}
@ -1070,6 +1071,8 @@ urtwn_rx_frame(struct urtwn_softc *sc, struct mbuf *m, int8_t *rssi_p)
/* Bit 7 set means HT MCS instead of rate. */
tap->wr_rate = 0x80 | (rate - 12);
}
/* XXX TODO: this isn't right; should use the last good RSSI */
tap->wr_dbm_antsignal = rssi;
tap->wr_dbm_antnoise = URTWN_NOISE_FLOOR;
}
@ -1135,17 +1138,26 @@ urtwn_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error)
m->m_next = NULL;
ni = urtwn_rx_frame(sc, m, &rssi);
/* Store a global last-good RSSI */
if (rssi != -127)
sc->last_rssi = rssi;
URTWN_UNLOCK(sc);
nf = URTWN_NOISE_FLOOR;
if (ni != NULL) {
if (rssi != -127)
URTWN_NODE(ni)->last_rssi = rssi;
if (ni->ni_flags & IEEE80211_NODE_HT)
m->m_flags |= M_AMPDU;
(void)ieee80211_input(ni, m, rssi - nf, nf);
(void)ieee80211_input(ni, m,
URTWN_NODE(ni)->last_rssi - nf, nf);
ieee80211_free_node(ni);
} else {
(void)ieee80211_input_all(ic, m, rssi - nf,
nf);
/* Use last good global RSSI */
(void)ieee80211_input_all(ic, m,
sc->last_rssi - nf, nf);
}
URTWN_LOCK(sc);
m = next;
@ -4868,7 +4880,7 @@ urtwn_update_mcast(struct ieee80211com *ic)
}
static struct ieee80211_node *
urtwn_r88e_node_alloc(struct ieee80211vap *vap,
urtwn_node_alloc(struct ieee80211vap *vap,
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct urtwn_node *un;
@ -4885,12 +4897,16 @@ urtwn_r88e_node_alloc(struct ieee80211vap *vap,
}
static void
urtwn_r88e_newassoc(struct ieee80211_node *ni, int isnew)
urtwn_newassoc(struct ieee80211_node *ni, int isnew)
{
struct urtwn_softc *sc = ni->ni_ic->ic_softc;
struct urtwn_node *un = URTWN_NODE(ni);
uint8_t id;
/* Only do this bit for R88E chips */
if (! (sc->chip & URTWN_CHIP_88E))
return;
if (!isnew)
return;
@ -4911,7 +4927,7 @@ urtwn_r88e_newassoc(struct ieee80211_node *ni, int isnew)
}
static void
urtwn_r88e_node_free(struct ieee80211_node *ni)
urtwn_node_free(struct ieee80211_node *ni)
{
struct urtwn_softc *sc = ni->ni_ic->ic_softc;
struct urtwn_node *un = URTWN_NODE(ni);

View File

@ -96,6 +96,7 @@ struct urtwn_fw_info {
struct urtwn_node {
struct ieee80211_node ni; /* must be the first */
uint8_t id;
int last_rssi;
};
#define URTWN_NODE(ni) ((struct urtwn_node *)(ni))
@ -191,6 +192,8 @@ struct urtwn_softc {
int ledlink;
int sc_txtimer;
int last_rssi;
int fwcur;
struct urtwn_data sc_rx[URTWN_RX_LIST_COUNT];
urtwn_datahead sc_rx_active;

View File

@ -57,18 +57,18 @@ int client_try_idm(struct env *, struct idm *);
int client_addr_init(struct idm *);
int client_addr_free(struct idm *);
struct aldap *client_aldap_open(struct ypldap_addr *);
struct aldap *client_aldap_open(struct ypldap_addr_list *);
/*
* dummy wrapper to provide aldap_init with its fd's.
*/
struct aldap *
client_aldap_open(struct ypldap_addr *addr)
client_aldap_open(struct ypldap_addr_list *addr)
{
int fd = -1;
struct ypldap_addr *p;
for (p = addr; p != NULL; p = p->next) {
TAILQ_FOREACH(p, addr, next) {
char hbuf[NI_MAXHOST], sbuf[NI_MAXSERV];
struct sockaddr *sa = (struct sockaddr *)&p->ss;
@ -99,7 +99,7 @@ client_addr_init(struct idm *idm)
struct sockaddr_in6 *sa_in6;
struct ypldap_addr *h;
for (h = idm->idm_addr; h != NULL; h = h->next) {
TAILQ_FOREACH(h, &idm->idm_addr, next) {
switch (h->ss.ss_family) {
case AF_INET:
sa_in = (struct sockaddr_in *)&h->ss;
@ -125,18 +125,14 @@ client_addr_init(struct idm *idm)
int
client_addr_free(struct idm *idm)
{
struct ypldap_addr *h, *p;
struct ypldap_addr *h;
if (idm->idm_addr == NULL)
return (-1);
for (h = idm->idm_addr; h != NULL; h = p) {
p = h->next;
while (!TAILQ_EMPTY(&idm->idm_addr)) {
h = TAILQ_FIRST(&idm->idm_addr);
TAILQ_REMOVE(&idm->idm_addr, h, next);
free(h);
}
idm->idm_addr = NULL;
return (0);
}
@ -200,8 +196,8 @@ client_dispatch_dns(int fd, short events, void *p)
log_warnx("IMSG_HOST_DNS with invalid peerID");
break;
}
if (idm->idm_addr != NULL) {
log_warnx("IMSG_HOST_DNS but addr != NULL!");
if (!TAILQ_EMPTY(&idm->idm_addr)) {
log_warnx("IMSG_HOST_DNS but addrs set!");
break;
}
@ -213,17 +209,10 @@ client_dispatch_dns(int fd, short events, void *p)
data = (u_char *)imsg.data;
while (dlen >= sizeof(struct sockaddr_storage)) {
if ((h = calloc(1, sizeof(struct ypldap_addr))) ==
NULL)
if ((h = calloc(1, sizeof(*h))) == NULL)
fatal(NULL);
memcpy(&h->ss, data, sizeof(h->ss));
if (idm->idm_addr == NULL)
h->next = NULL;
else
h->next = idm->idm_addr;
idm->idm_addr = h;
TAILQ_INSERT_HEAD(&idm->idm_addr, h, next);
data += sizeof(h->ss);
dlen -= sizeof(h->ss);
@ -588,7 +577,7 @@ client_try_idm(struct env *env, struct idm *idm)
struct aldap *al;
where = "connect";
if ((al = client_aldap_open(idm->idm_addr)) == NULL)
if ((al = client_aldap_open(&idm->idm_addr)) == NULL)
return (-1);
if (idm->idm_flags & F_NEEDAUTH) {

View File

@ -42,9 +42,10 @@ enum imsg_type {
};
struct ypldap_addr {
struct ypldap_addr *next;
struct sockaddr_storage ss;
TAILQ_ENTRY(ypldap_addr) next;
struct sockaddr_storage ss;
};
TAILQ_HEAD(ypldap_addr_list, ypldap_addr);
enum {
PROC_MAIN,
@ -91,7 +92,7 @@ struct idm {
enum client_state idm_state;
u_int32_t idm_flags; /* lower 20 reserved */
u_int32_t idm_list;
struct ypldap_addr *idm_addr;
struct ypldap_addr_list idm_addr;
in_port_t idm_port;
char idm_binddn[LINE_WIDTH];
char idm_bindcred[LINE_WIDTH];

View File

@ -48,7 +48,7 @@ struct imsgev *iev_dns;
void dns_dispatch_imsg(int, short, void *);
void dns_sig_handler(int, short, void *);
void dns_shutdown(void);
int host_dns(const char *s, struct ypldap_addr **hn);
int host_dns(const char *, struct ypldap_addr_list *);
void
dns_sig_handler(int sig, short event, void *p)
@ -129,7 +129,8 @@ dns_dispatch_imsg(int fd, short events, void *p)
struct imsg imsg;
int n, cnt;
char *name;
struct ypldap_addr *h, *hn;
struct ypldap_addr_list hn = TAILQ_HEAD_INITIALIZER(hn);
struct ypldap_addr *h;
struct ibuf *buf;
struct env *env = p;
struct imsgev *iev = env->sc_iev;
@ -176,12 +177,11 @@ dns_dispatch_imsg(int fd, short events, void *p)
if (buf == NULL)
break;
if (cnt > 0) {
h = hn;
while (h != NULL) {
while(!TAILQ_EMPTY(&hn)) {
h = TAILQ_FIRST(&hn);
TAILQ_REMOVE(&hn, h, next);
imsg_add(buf, &h->ss, sizeof(h->ss));
hn = h->next;
free(h);
h = hn;
}
}
@ -204,13 +204,13 @@ dns_dispatch_imsg(int fd, short events, void *p)
}
int
host_dns(const char *s, struct ypldap_addr **hn)
host_dns(const char *s, struct ypldap_addr_list *hn)
{
struct addrinfo hints, *res0, *res;
int error, cnt = 0;
struct sockaddr_in *sa_in;
struct sockaddr_in6 *sa_in6;
struct ypldap_addr *h, *hh = NULL;
struct ypldap_addr *h;
bzero(&hints, sizeof(hints));
hints.ai_family = PF_UNSPEC;
@ -243,12 +243,9 @@ host_dns(const char *s, struct ypldap_addr **hn)
res->ai_addr)->sin6_addr, sizeof(struct in6_addr));
}
h->next = hh;
hh = h;
TAILQ_INSERT_HEAD(hn, h, next);
cnt++;
}
freeaddrinfo(res0);
*hn = hh;
return (cnt);
}