Merge ^/head r341764 through r341812.

This commit is contained in:
dim 2018-12-11 06:47:04 +00:00
commit 30c4d65dc9
46 changed files with 820 additions and 240 deletions

View File

@ -977,6 +977,14 @@ _cleanobj_fast_depend_hack: .PHONY
rm -f ${OBJTOP}/usr.sbin/ntp/libntpevent/.depend.*; \
fi
# 20181209 r341759 track migration across wpa update
@if [ -e "${OBJTOP}/usr.sbin/wpa/wpa_supplicant/.depend.rrm.o" ] && \
egrep -q 'src/ap/rrm.c' \
${OBJTOP}/usr.sbin/wpa/wpa_supplicant/.depend.rrm.o; then \
echo "Removing stale wpa dependencies and objects"; \
rm -f ${OBJTOP}/usr.sbin/wpa/*/.depend*; \
fi
_worldtmp: .PHONY
@echo
@echo "--------------------------------------------------------------"

View File

@ -29,7 +29,7 @@
.\" @(#)getfh.2 8.1 (Berkeley) 6/9/93
.\" $FreeBSD$
.\"
.Dd December 7, 2018
.Dd December 11, 2018
.Dt GETFH 2
.Os
.Sh NAME
@ -76,12 +76,12 @@ and
.Fn lgetfh
except when the
.Fa path
specifies a relative or NULL path, or the
specifies a relative path, or the
.Dv AT_BENEATH
flag is provided.
For
.Fn getfhat
and relative or NULL
and relative
.Fa path ,
the status is retrieved from a file relative to
the directory associated with the file descriptor

View File

@ -58,17 +58,29 @@ JID=0
# ---------
# list_vars pattern
# List vars matching pattern.
# List variables matching glob pattern.
#
list_vars()
{
set | { while read LINE; do
var="${LINE%%=*}"
case "$var" in
"$LINE"|*[!a-zA-Z0-9_]*) continue ;;
$1) echo $var
# Localize 'set' option below.
local -
local IFS=$'\n' line varname
# Disable path expansion in unquoted 'for' parameters below.
set -o noglob
for line in $(set); do
varname="${line%%=*}"
case "$varname" in
"$line"|*[!a-zA-Z0-9_]*)
continue
;;
$1)
echo $varname
;;
esac
done; }
done
}
# set_rcvar [var] [defval] [desc]

View File

@ -1256,7 +1256,8 @@ print_ip(struct buf_pr *bp, const struct format_opts *fo, ipfw_insn_ip *cmd)
(cmd->o.opcode == O_IP_SRC || cmd->o.opcode == O_IP_DST) ?
32 : contigmask((uint8_t *)&(a[1]), 32);
if (mb == 32 && co.do_resolv)
he = gethostbyaddr((char *)&(a[0]), sizeof(u_long), AF_INET);
he = gethostbyaddr((char *)&(a[0]), sizeof(in_addr_t),
AF_INET);
if (he != NULL) /* resolved to name */
bprintf(bp, "%s", he->h_name);
else if (mb == 0) /* any */
@ -1510,6 +1511,7 @@ print_instruction(struct buf_pr *bp, const struct format_opts *fo,
bprintf(bp, " %s", pe->p_name);
else
bprintf(bp, " %u", cmd->arg1);
state->proto = cmd->arg1;
break;
case O_MACADDR2:
print_mac(bp, insntod(cmd, mac));
@ -1991,10 +1993,10 @@ print_proto(struct buf_pr *bp, struct format_opts *fo,
struct show_state *state)
{
ipfw_insn *cmd;
int l, proto, ip4, ip6, tmp;
int l, proto, ip4, ip6;
/* Count all O_PROTO, O_IP4, O_IP6 instructions. */
proto = tmp = ip4 = ip6 = 0;
proto = ip4 = ip6 = 0;
for (l = state->rule->act_ofs, cmd = state->rule->cmd;
l > 0; l -= F_LEN(cmd), cmd += F_LEN(cmd)) {
switch (cmd->opcode) {
@ -2030,18 +2032,13 @@ print_proto(struct buf_pr *bp, struct format_opts *fo,
if (cmd == NULL || (cmd->len & F_OR))
for (l = proto; l > 0; l--) {
cmd = print_opcode(bp, fo, state, O_PROTO);
if (cmd != NULL && (cmd->len & F_OR) == 0)
if (cmd == NULL || (cmd->len & F_OR) == 0)
break;
tmp = cmd->arg1;
}
/* Initialize proto, it is used by print_newports() */
if (tmp != 0)
state->proto = tmp;
else if (ip6 != 0)
state->proto = IPPROTO_IPV6;
else
state->proto = IPPROTO_IP;
state->flags |= HAVE_PROTO;
if (state->proto == 0 && ip6 != 0)
state->proto = IPPROTO_IPV6;
}
static int

View File

@ -313,7 +313,7 @@ main(int argc, char *const *argv)
break;
case 'c':
ltmp = strtol(optarg, &ep, 0);
if (*ep || ep == optarg || ltmp > LONG_MAX || ltmp <=0)
if (*ep || ep == optarg || ltmp <= 0)
errx(EX_USAGE,
"invalid count of packets to transmit: `%s'",
optarg);

View File

@ -520,6 +520,12 @@ uk.dvorak.kbd:fr:Royaume Uni Dvorak
uk.dvorak.kbd:pt:Reino Unido Dvorak
uk.dvorak.kbd:es:Británico Dvorak
uk.macbook.kbd:en:United Kingdom Macbook
uk.macbook.kbd:de:Vereinigtes Königreich Macbook
uk.macbook.kbd:fr:Royaume Uni Macbook
uk.macbook.kbd:pt:Reino Unido Macbook
uk.macbook.kbd:es:Británico Macbook
us.kbd:en:United States of America
us.kbd:de:US-amerikanisch
us.kbd:fr:États Unis d'Amérique

View File

@ -74,6 +74,7 @@ FILES= INDEX.keymaps \
uk.capsctrl.kbd \
uk.dvorak.kbd \
uk.kbd \
uk.macbook.kbd \
us.acc.kbd \
us.ctrl.kbd \
us.dvorak.kbd \

View File

@ -0,0 +1,115 @@
# $FreeBSD$
# by James Wright <james.wright@jigsawdezign.com>
# alt
# scan cntrl alt alt cntrl lock
# code base shift cntrl shift alt shift cntrl shift state
# ------------------------------------------------------------------
000 nop nop nop nop nop nop nop nop O
001 esc esc esc esc esc esc debug esc O
002 '1' '!' nop nop '1' '!' nop nop O
003 '2' '@' nul nul 0x20ac '@' nul nul O
004 '3' 0xa3 nop nop '#' 0xa3 nop nop O
005 '4' '$' nop nop '4' '$' nop nop O
006 '5' '%' nop nop '5' '%' nop nop O
007 '6' '^' rs rs '6' '^' rs rs O
008 '7' '&' nop nop '7' '&' nop nop O
009 '8' '*' nop nop '8' '*' nop nop O
010 '9' '(' nop nop '9' '(' nop nop O
011 '0' ')' nop nop '0' ')' nop nop O
012 '-' '_' us us '-' '_' us us O
013 '=' '+' nop nop '=' '+' nop nop O
014 bs bs del del bs bs del del O
015 ht btab nop nop ht btab nop nop O
016 'q' 'Q' dc1 dc1 'q' 'Q' dc1 dc1 C
017 'w' 'W' etb etb 'w' 'W' etb etb C
018 'e' 'E' enq enq 'e' 'E' enq enq C
019 'r' 'R' dc2 dc2 'r' 'R' dc2 dc2 C
020 't' 'T' dc4 dc4 't' 'T' dc4 dc4 C
021 'y' 'Y' em em 'y' 'Y' em em C
022 'u' 'U' nak nak 'u' 'U' nak nak C
023 'i' 'I' ht ht 'i' 'I' ht ht C
024 'o' 'O' si si 'o' 'O' si si C
025 'p' 'P' dle dle 'p' 'P' dle dle C
026 '[' '{' esc esc '[' '{' esc esc O
027 ']' '}' gs gs ']' '}' gs gs O
028 cr cr nl nl cr cr nl nl O
029 lctrl lctrl lctrl lctrl lctrl lctrl lctrl lctrl O
030 'a' 'A' soh soh 'a' 'A' soh soh C
031 's' 'S' dc3 dc3 's' 'S' dc3 dc3 C
032 'd' 'D' eot eot 'd' 'D' eot eot C
033 'f' 'F' ack ack 'f' 'F' ack ack C
034 'g' 'G' bel bel 'g' 'G' bel bel C
035 'h' 'H' bs bs 'h' 'H' bs bs C
036 'j' 'J' nl nl 'j' 'J' nl nl C
037 'k' 'K' vt vt 'k' 'K' vt vt C
038 'l' 'L' ff ff 'l' 'L' ff ff C
039 ';' ':' nop nop ';' ':' nop nop O
040 ''' '"' nop nop ''' '"' nop nop O
041 0xa7 0xb1 nop nop 0xa7 0xb1 nop nop O
042 lshift lshift lshift lshift lshift lshift lshift lshift O
043 '\' '|' fs fs '\' '|' fs fs O
044 'z' 'Z' sub sub 'z' 'Z' sub sub C
045 'x' 'X' can can 'x' 'X' can can C
046 'c' 'C' etx etx 'c' 'C' etx etx C
047 'v' 'V' syn syn 'v' 'V' syn syn C
048 'b' 'B' stx stx 'b' 'B' stx stx C
049 'n' 'N' so so 'n' 'N' so so C
050 'm' 'M' cr cr 'm' 'M' cr cr C
051 ',' '<' nop nop ',' '<' nop nop O
052 '.' '>' nop nop '.' '>' nop nop O
053 '/' '?' nop nop '/' '?' nop nop O
054 rshift rshift rshift rshift rshift rshift rshift rshift O
055 '*' '*' '*' '*' '*' '*' '*' '*' O
056 lalt lalt lalt lalt lalt lalt lalt lalt O
057 ' ' ' ' nul ' ' ' ' ' ' susp ' ' O
058 clock clock clock clock clock clock clock clock O
059 fkey01 fkey13 fkey25 fkey37 scr01 scr11 scr01 scr11 O
060 fkey02 fkey14 fkey26 fkey38 scr02 scr12 scr02 scr12 O
061 fkey03 fkey15 fkey27 fkey39 scr03 scr13 scr03 scr13 O
062 fkey04 fkey16 fkey28 fkey40 scr04 scr14 scr04 scr14 O
063 fkey05 fkey17 fkey29 fkey41 scr05 scr15 scr05 scr15 O
064 fkey06 fkey18 fkey30 fkey42 scr06 scr16 scr06 scr16 O
065 fkey07 fkey19 fkey31 fkey43 scr07 scr07 scr07 scr07 O
066 fkey08 fkey20 fkey32 fkey44 scr08 scr08 scr08 scr08 O
067 fkey09 fkey21 fkey33 fkey45 scr09 scr09 scr09 scr09 O
068 fkey10 fkey22 fkey34 fkey46 scr10 scr10 scr10 scr10 O
069 nlock nlock nlock nlock nlock nlock nlock nlock O
070 slock slock slock slock slock slock slock slock O
071 fkey49 '7' '7' '7' '7' '7' '7' '7' N
072 fkey50 '8' '8' '8' '8' '8' '8' '8' N
073 fkey51 '9' '9' '9' '9' '9' '9' '9' N
074 fkey52 '-' '-' '-' '-' '-' '-' '-' N
075 fkey53 '4' '4' '4' '4' '4' '4' '4' N
076 fkey54 '5' '5' '5' '5' '5' '5' '5' N
077 fkey55 '6' '6' '6' '6' '6' '6' '6' N
078 fkey56 '+' '+' '+' '+' '+' '+' '+' N
079 fkey57 '1' '1' '1' '1' '1' '1' '1' N
080 fkey58 '2' '2' '2' '2' '2' '2' '2' N
081 fkey59 '3' '3' '3' '3' '3' '3' '3' N
082 fkey60 '0' '0' '0' '0' '0' '0' '0' N
083 del '.' '.' '.' '.' '.' boot boot N
084 nop nop nop nop nop nop nop nop O
085 nop nop nop nop nop nop nop nop O
086 '`' '~' nop nop '`' '~' nop nop O
087 fkey11 fkey23 fkey35 fkey47 scr11 scr11 scr11 scr11 O
088 fkey12 fkey24 fkey36 fkey48 scr12 scr12 scr12 scr12 O
089 cr cr nl nl cr cr nl nl O
090 rctrl rctrl rctrl rctrl rctrl rctrl rctrl rctrl O
091 '/' '/' '/' '/' '/' '/' '/' '/' N
092 nscr pscr debug debug nop nop nop nop O
093 ralt ralt ralt ralt ralt ralt ralt ralt O
094 fkey49 fkey49 fkey49 fkey49 fkey49 fkey49 fkey49 fkey49 O
095 fkey50 fkey50 fkey50 fkey50 fkey50 fkey50 fkey50 fkey50 O
096 fkey51 fkey51 fkey51 fkey51 fkey51 fkey51 fkey51 fkey51 O
097 fkey53 fkey53 fkey53 fkey53 fkey53 fkey53 fkey53 fkey53 O
098 fkey55 fkey55 fkey55 fkey55 fkey55 fkey55 fkey55 fkey55 O
099 fkey57 fkey57 fkey57 fkey57 fkey57 fkey57 fkey57 fkey57 O
100 fkey58 fkey58 fkey58 fkey58 fkey58 fkey58 fkey58 fkey58 O
101 fkey59 fkey59 fkey59 fkey59 fkey59 fkey59 fkey59 fkey59 O
102 fkey60 paste fkey60 fkey60 fkey60 fkey60 fkey60 fkey60 O
103 fkey61 fkey61 fkey61 fkey61 fkey61 fkey61 boot fkey61 O
104 slock saver slock saver susp nop susp nop O
105 fkey62 fkey62 fkey62 fkey62 fkey62 fkey62 fkey62 fkey62 O
106 fkey63 fkey63 fkey63 fkey63 fkey63 fkey63 fkey63 fkey63 O
107 fkey64 fkey64 fkey64 fkey64 fkey64 fkey64 fkey64 fkey64 O
108 nop nop nop nop nop nop nop nop O

View File

@ -16,7 +16,7 @@ NEWVERSWHAT= "U-Boot loader" ${MACHINE_ARCH}
INSTALLFLAGS= -b
# Architecture-specific loader code
SRCS= start.S conf.c vers.c
SRCS= start.S conf.c vers.c ppc64_elf_freebsd.c
SRCS+= ucmpdi2.c
# Always add MI sources

View File

@ -95,9 +95,11 @@ struct netif_driver *netif_drivers[] = {
* Sort formats so that those that can detect based on arguments
* rather than reading the file go first.
*/
extern struct file_format uboot_elf64;
struct file_format *file_formats[] = {
&uboot_elf,
&uboot_elf64,
NULL
};

View File

@ -0,0 +1,101 @@
/*-
* Copyright (c) 2001 Benno Rice <benno@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#define __ELF_WORD_SIZE 64
#include <sys/param.h>
#include <sys/linker.h>
#include <machine/metadata.h>
#include <machine/elf.h>
#include <machine/md_var.h>
#include <stand.h>
#include "bootstrap.h"
#include "libuboot.h"
vm_offset_t md_load64(char *args, vm_offset_t *modulep, vm_offset_t *dtb);
extern char end[];
extern vm_offset_t reloc; /* From <arch>/conf.c */
int
ppc64_uboot_elf_loadfile(char *filename, uint64_t dest,
struct preloaded_file **result)
{
int r;
r = __elfN(loadfile)(filename, dest, result);
if (r != 0)
return (r);
/*
* No need to sync the icache for modules: this will
* be done by the kernel after relocation.
*/
if (!strcmp((*result)->f_type, "elf kernel"))
__syncicache((void *) (*result)->f_addr, (*result)->f_size);
return (0);
}
int
ppc64_uboot_elf_exec(struct preloaded_file *fp)
{
struct file_metadata *fmp;
vm_offset_t mdp, dtbp;
Elf_Ehdr *e;
int error;
void (*entry)(void *);
if ((fmp = file_findmetadata(fp, MODINFOMD_ELFHDR)) == NULL) {
return(EFTYPE);
}
e = (Elf_Ehdr *)&fmp->md_data;
/* Handle function descriptor for ELFv1 kernels */
if ((e->e_flags & 3) == 2)
entry = (void (*)(void*))(intptr_t)e->e_entry;
else
entry = *(void (*)(void*))(uint64_t *)(intptr_t)e->e_entry;
if ((error = md_load64(fp->f_args, &mdp, &dtbp)) != 0)
return (error);
dev_cleanup();
printf("Kernel args: %s\n", fp->f_args);
(*entry)((void *)mdp);
panic("exec returned");
}
struct file_format uboot_elf64 =
{
ppc64_uboot_elf_loadfile,
ppc64_uboot_elf_exec
};

View File

@ -55,6 +55,13 @@
#include <machine/atomic-v4.h>
#endif /* Arch >= v6 */
static __inline u_long
atomic_swap_long(volatile u_long *p, u_long v)
{
return (atomic_swap_32((volatile uint32_t *)p, v));
}
#define atomic_clear_ptr atomic_clear_32
#define atomic_clear_acq_ptr atomic_clear_acq_32
#define atomic_clear_rel_ptr atomic_clear_rel_32

View File

@ -1165,7 +1165,7 @@ static struct asc_table_entry asc_table[] = {
{ SST(0x04, 0x1B, SS_RDEF, /* XXX TBD */
"Logical unit not ready, sanitize in progress") },
/* DT MAEB */
{ SST(0x04, 0x1C, SS_RDEF, /* XXX TBD */
{ SST(0x04, 0x1C, SS_START | SSQ_DECREMENT_COUNT | ENXIO,
"Logical unit not ready, additional power use not yet granted") },
/* D */
{ SST(0x04, 0x1D, SS_RDEF, /* XXX TBD */

View File

@ -78,15 +78,7 @@ atomic_long_dec(atomic_long_t *v)
static inline long
atomic_long_xchg(atomic_long_t *v, long val)
{
#if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
return atomic_swap_long(&v->counter, val);
#else
long ret = atomic_long_read(v);
while (!atomic_fcmpset_long(&v->counter, &ret, val))
;
return (ret);
#endif
}
static inline long

View File

@ -128,15 +128,7 @@ atomic_clear_mask(unsigned int mask, atomic_t *v)
static inline int
atomic_xchg(atomic_t *v, int i)
{
#if !defined(__mips__)
return (atomic_swap_int(&v->counter, i));
#else
int ret = atomic_read(v);
while (!atomic_fcmpset_int(&v->counter, &ret, i))
;
return (ret);
#endif
}
static inline int

View File

@ -358,10 +358,7 @@ static int
ahci_pci_ctlr_reset(device_t dev)
{
if (pci_read_config(dev, PCIR_DEVVENDOR, 4) == 0x28298086 &&
(pci_read_config(dev, 0x92, 1) & 0xfe) == 0x04)
pci_write_config(dev, 0x92, 0x01, 1);
return ahci_ctlr_reset(dev);
return(ahci_ctlr_reset(dev));
}
static int

View File

@ -1241,10 +1241,10 @@ md_kthread(void *arg)
if (error != -1) {
bp->bio_completed = bp->bio_length;
if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
devstat_end_transaction_bio(sc->devstat, bp);
g_io_deliver(bp, error);
}
if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)
devstat_end_transaction_bio(sc->devstat, bp);
}
}

View File

@ -118,6 +118,7 @@ static const STRUCT_USB_HOST_ID rtwn_devs[] = {
RTWN_RTL8188EU_DEV(DLINK, DWA123D1),
RTWN_RTL8188EU_DEV(DLINK, DWA125D1),
RTWN_RTL8188EU_DEV(ELECOM, WDC150SU2M),
RTWN_RTL8188EU_DEV(TPLINK, WN722N),
RTWN_RTL8188EU_DEV(REALTEK, RTL8188ETV),
RTWN_RTL8188EU_DEV(REALTEK, RTL8188EU),
#undef RTWN_RTL8188EU_DEV

View File

@ -1261,6 +1261,7 @@ efx_bist_stop(
#define EFX_FEATURE_FW_ASSISTED_TSO 0x00001000
#define EFX_FEATURE_FW_ASSISTED_TSO_V2 0x00002000
#define EFX_FEATURE_PACKED_STREAM 0x00004000
#define EFX_FEATURE_TXQ_CKSUM_OP_DESC 0x00008000
typedef enum efx_tunnel_protocol_e {
EFX_TUNNEL_PROTOCOL_NONE = 0,

View File

@ -257,7 +257,8 @@ efx_nic_create(
EFX_FEATURE_PIO_BUFFERS |
EFX_FEATURE_FW_ASSISTED_TSO |
EFX_FEATURE_FW_ASSISTED_TSO_V2 |
EFX_FEATURE_PACKED_STREAM;
EFX_FEATURE_PACKED_STREAM |
EFX_FEATURE_TXQ_CKSUM_OP_DESC;
break;
#endif /* EFSYS_OPT_HUNTINGTON */
@ -277,7 +278,8 @@ efx_nic_create(
EFX_FEATURE_MCDI_DMA |
EFX_FEATURE_PIO_BUFFERS |
EFX_FEATURE_FW_ASSISTED_TSO_V2 |
EFX_FEATURE_PACKED_STREAM;
EFX_FEATURE_PACKED_STREAM |
EFX_FEATURE_TXQ_CKSUM_OP_DESC;
break;
#endif /* EFSYS_OPT_MEDFORD */
@ -293,7 +295,8 @@ efx_nic_create(
EFX_FEATURE_MCDI_DMA |
EFX_FEATURE_PIO_BUFFERS |
EFX_FEATURE_FW_ASSISTED_TSO_V2 |
EFX_FEATURE_PACKED_STREAM;
EFX_FEATURE_PACKED_STREAM |
EFX_FEATURE_TXQ_CKSUM_OP_DESC;
break;
#endif /* EFSYS_OPT_MEDFORD2 */

View File

@ -151,8 +151,8 @@ sfxge_estimate_rsrc_limits(struct sfxge_softc *sc)
limits.edl_min_evq_count = 1;
limits.edl_max_evq_count = evq_max;
limits.edl_min_txq_count = SFXGE_TXQ_NTYPES;
limits.edl_max_txq_count = evq_max + SFXGE_TXQ_NTYPES - 1;
limits.edl_min_txq_count = SFXGE_EVQ0_N_TXQ(sc);
limits.edl_max_txq_count = evq_max + SFXGE_EVQ0_N_TXQ(sc) - 1;
limits.edl_min_rxq_count = 1;
limits.edl_max_rxq_count = evq_max;
@ -168,12 +168,12 @@ sfxge_estimate_rsrc_limits(struct sfxge_softc *sc)
return (rc);
}
KASSERT(txq_allocated >= SFXGE_TXQ_NTYPES,
("txq_allocated < SFXGE_TXQ_NTYPES"));
KASSERT(txq_allocated >= SFXGE_EVQ0_N_TXQ(sc),
("txq_allocated < %u", SFXGE_EVQ0_N_TXQ(sc)));
sc->evq_max = MIN(evq_allocated, evq_max);
sc->evq_max = MIN(rxq_allocated, sc->evq_max);
sc->evq_max = MIN(txq_allocated - (SFXGE_TXQ_NTYPES - 1),
sc->evq_max = MIN(txq_allocated - (SFXGE_EVQ0_N_TXQ(sc) - 1),
sc->evq_max);
KASSERT(sc->evq_max <= evq_max,
@ -205,7 +205,7 @@ sfxge_set_drv_limits(struct sfxge_softc *sc)
limits.edl_min_evq_count = limits.edl_max_evq_count =
sc->intr.n_alloc;
limits.edl_min_txq_count = limits.edl_max_txq_count =
sc->intr.n_alloc + SFXGE_TXQ_NTYPES - 1;
sc->intr.n_alloc + SFXGE_EVQ0_N_TXQ(sc) - 1;
limits.edl_min_rxq_count = limits.edl_max_rxq_count =
sc->intr.n_alloc;
@ -762,6 +762,11 @@ sfxge_create(struct sfxge_softc *sc)
}
sc->rxq_entries = sfxge_rx_ring_entries;
if (efx_nic_cfg_get(enp)->enc_features & EFX_FEATURE_TXQ_CKSUM_OP_DESC)
sc->txq_dynamic_cksum_toggle_supported = B_TRUE;
else
sc->txq_dynamic_cksum_toggle_supported = B_FALSE;
if (!ISP2(sfxge_tx_ring_entries) ||
(sfxge_tx_ring_entries < EFX_TXQ_MINNDESCS) ||
(sfxge_tx_ring_entries > efx_nic_cfg_get(enp)->enc_txq_max_ndescs)) {

View File

@ -184,6 +184,10 @@ struct sfxge_evq {
unsigned int buf_base_id;
unsigned int entries;
char lock_name[SFXGE_LOCK_NAME_MAX];
#if EFSYS_OPT_QSTATS
clock_t stats_update_time;
uint64_t stats[EV_NQSTATS];
#endif
} __aligned(CACHE_LINE_SIZE);
#define SFXGE_NDESCS 1024
@ -275,6 +279,9 @@ struct sfxge_softc {
struct ifnet *ifnet;
unsigned int if_flags;
struct sysctl_oid *stats_node;
#if EFSYS_OPT_QSTATS
struct sysctl_oid *evqs_stats_node;
#endif
struct sysctl_oid *txqs_node;
struct task task_reset;
@ -287,6 +294,8 @@ struct sfxge_softc {
efx_nic_t *enp;
efsys_lock_t enp_lock;
boolean_t txq_dynamic_cksum_toggle_supported;
unsigned int rxq_entries;
unsigned int txq_entries;

View File

@ -269,9 +269,13 @@ sfxge_get_txq_by_label(struct sfxge_evq *evq, enum sfxge_txq_type label)
{
unsigned int index;
KASSERT((evq->index == 0 && label < SFXGE_TXQ_NTYPES) ||
(label == SFXGE_TXQ_IP_TCP_UDP_CKSUM), ("unexpected txq label"));
index = (evq->index == 0) ? label : (evq->index - 1 + SFXGE_TXQ_NTYPES);
KASSERT((evq->sc->txq_dynamic_cksum_toggle_supported) ? (label == 0) :
((evq->index == 0 && label < SFXGE_TXQ_NTYPES) ||
(label == SFXGE_TXQ_IP_TCP_UDP_CKSUM)),
("unexpected txq label"));
index = (evq->index == 0) ?
label : (evq->index - 1 + SFXGE_EVQ0_N_TXQ(evq->sc));
return (evq->sc->txq[index]);
}
@ -442,30 +446,95 @@ sfxge_ev_wake_up(void *arg, uint32_t index)
#if EFSYS_OPT_QSTATS
static void
sfxge_evq_stat_update(struct sfxge_evq *evq)
{
clock_t now;
SFXGE_EVQ_LOCK(evq);
if (__predict_false(evq->init_state != SFXGE_EVQ_STARTED))
goto out;
now = ticks;
if ((unsigned int)(now - evq->stats_update_time) < (unsigned int)hz)
goto out;
evq->stats_update_time = now;
efx_ev_qstats_update(evq->common, evq->stats);
out:
SFXGE_EVQ_UNLOCK(evq);
}
static int
sfxge_evq_stat_handler(SYSCTL_HANDLER_ARGS)
{
struct sfxge_evq *evq = arg1;
struct sfxge_softc *sc = evq->sc;
unsigned int id = arg2;
SFXGE_ADAPTER_LOCK(sc);
sfxge_evq_stat_update(evq);
SFXGE_ADAPTER_UNLOCK(sc);
return (SYSCTL_OUT(req, &evq->stats[id], sizeof(evq->stats[id])));
}
static int
sfxge_evq_stat_init(struct sfxge_evq *evq)
{
struct sfxge_softc *sc = evq->sc;
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
char name[16];
struct sysctl_oid *evq_stats_node;
unsigned int id;
snprintf(name, sizeof(name), "%u", evq->index);
evq_stats_node = SYSCTL_ADD_NODE(ctx,
SYSCTL_CHILDREN(sc->evqs_stats_node),
OID_AUTO, name, CTLFLAG_RD, NULL, "");
if (evq_stats_node == NULL)
return (ENOMEM);
for (id = 0; id < EV_NQSTATS; id++) {
SYSCTL_ADD_PROC(
ctx, SYSCTL_CHILDREN(evq_stats_node),
OID_AUTO, efx_ev_qstat_name(sc->enp, id),
CTLTYPE_U64|CTLFLAG_RD,
evq, id, sfxge_evq_stat_handler, "Q",
"");
}
return (0);
}
static void
sfxge_ev_stat_update(struct sfxge_softc *sc)
{
struct sfxge_evq *evq;
unsigned int index;
clock_t now;
unsigned int id;
SFXGE_ADAPTER_LOCK(sc);
if (__predict_false(sc->evq[0]->init_state != SFXGE_EVQ_STARTED))
goto out;
now = ticks;
if ((unsigned int)(now - sc->ev_stats_update_time) < (unsigned int)hz)
goto out;
sc->ev_stats_update_time = now;
/* Add event counts from each event queue in turn */
memset(sc->ev_stats, 0, sizeof(sc->ev_stats));
/* Update and add event counts from each event queue in turn */
for (index = 0; index < sc->evq_count; index++) {
evq = sc->evq[index];
SFXGE_EVQ_LOCK(evq);
efx_ev_qstats_update(evq->common, sc->ev_stats);
SFXGE_EVQ_UNLOCK(evq);
sfxge_evq_stat_update(evq);
for (id = 0; id < EV_NQSTATS; id++)
sc->ev_stats[id] += evq->stats[id];
}
out:
SFXGE_ADAPTER_UNLOCK(sc);
@ -672,7 +741,7 @@ sfxge_ev_qstop(struct sfxge_softc *sc, unsigned int index)
#if EFSYS_OPT_QSTATS
/* Add event counts before discarding the common evq state */
efx_ev_qstats_update(evq->common, sc->ev_stats);
efx_ev_qstats_update(evq->common, evq->stats);
#endif
efx_ev_qdestroy(evq->common);
@ -873,7 +942,24 @@ sfxge_ev_qinit(struct sfxge_softc *sc, unsigned int index)
evq->init_state = SFXGE_EVQ_INITIALIZED;
#if EFSYS_OPT_QSTATS
rc = sfxge_evq_stat_init(evq);
if (rc != 0)
goto fail_evq_stat_init;
#endif
return (0);
#if EFSYS_OPT_QSTATS
fail_evq_stat_init:
evq->init_state = SFXGE_EVQ_UNINITIALIZED;
SFXGE_EVQ_LOCK_DESTROY(evq);
sfxge_dma_free(esmp);
sc->evq[index] = NULL;
free(evq, M_SFXGE);
return (rc);
#endif
}
void
@ -922,6 +1008,16 @@ sfxge_ev_init(struct sfxge_softc *sc)
sc, 0, sfxge_int_mod_handler, "IU",
"sfxge interrupt moderation (us)");
#if EFSYS_OPT_QSTATS
sc->evqs_stats_node = SYSCTL_ADD_NODE(
device_get_sysctl_ctx(sc->dev), SYSCTL_CHILDREN(sc->stats_node),
OID_AUTO, "evq", CTLFLAG_RD, NULL, "Event queues stats");
if (sc->evqs_stats_node == NULL) {
rc = ENOMEM;
goto fail_evqs_stats_node;
}
#endif
/*
* Initialize the event queue(s) - one per interrupt.
*/
@ -940,6 +1036,9 @@ sfxge_ev_init(struct sfxge_softc *sc)
while (--index >= 0)
sfxge_ev_qfini(sc, index);
#if EFSYS_OPT_QSTATS
fail_evqs_stats_node:
#endif
sc->evq_count = 0;
return (rc);
}

View File

@ -35,7 +35,7 @@
/* Theory of operation:
*
* Tx queues allocation and mapping
* Tx queues allocation and mapping on Siena
*
* One Tx queue with enabled checksum offload is allocated per Rx channel
* (event queue). Also 2 Tx queues (one without checksum offload and one
@ -46,6 +46,17 @@
* if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES)
* else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1
* See sfxge_get_txq_by_label() sfxge_ev.c
*
* Tx queue allocation and mapping on EF10
*
* One Tx queue with enabled checksum offload is allocated per Rx
* channel (event queue). Checksum offload on all Tx queues is enabled or
* disabled dynamically by inserting option descriptors, so the additional
* queues used on Siena are not required.
*
* TxQ label is always set to zero on EF10 hardware.
* So, event queue to Tx queue mapping is simple:
* TxQ-index = EvQ-index
*/
#include <sys/cdefs.h>
@ -139,25 +150,7 @@ static void sfxge_tx_qlist_post(struct sfxge_txq *txq);
static void sfxge_tx_qunblock(struct sfxge_txq *txq);
static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
const bus_dma_segment_t *dma_seg, int n_dma_seg,
int vlan_tagged);
static int
sfxge_tx_maybe_insert_tag(struct sfxge_txq *txq, struct mbuf *mbuf)
{
uint16_t this_tag = ((mbuf->m_flags & M_VLANTAG) ?
mbuf->m_pkthdr.ether_vtag :
0);
if (this_tag == txq->hw_vlan_tci)
return (0);
efx_tx_qdesc_vlantci_create(txq->common,
bswap16(this_tag),
&txq->pend_desc[0]);
txq->n_pend_desc = 1;
txq->hw_vlan_tci = this_tag;
return (1);
}
int n_extra_descs);
static inline void
sfxge_next_stmp(struct sfxge_txq *txq, struct sfxge_tx_mapping **pstmp)
@ -170,6 +163,61 @@ sfxge_next_stmp(struct sfxge_txq *txq, struct sfxge_tx_mapping **pstmp)
(*pstmp)++;
}
static int
sfxge_tx_maybe_toggle_cksum_offload(struct sfxge_txq *txq, struct mbuf *mbuf,
struct sfxge_tx_mapping **pstmp)
{
uint16_t new_hw_cksum_flags;
efx_desc_t *desc;
if (mbuf->m_pkthdr.csum_flags &
(CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6 | CSUM_TSO)) {
/*
* We always set EFX_TXQ_CKSUM_IPV4 here because this
* configuration is the most useful, and this won't
* cause any trouble in case of IPv6 traffic anyway.
*/
new_hw_cksum_flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP;
} else if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP) {
new_hw_cksum_flags = EFX_TXQ_CKSUM_IPV4;
} else {
new_hw_cksum_flags = 0;
}
if (new_hw_cksum_flags == txq->hw_cksum_flags)
return (0);
desc = &txq->pend_desc[txq->n_pend_desc];
efx_tx_qdesc_checksum_create(txq->common, new_hw_cksum_flags, desc);
txq->hw_cksum_flags = new_hw_cksum_flags;
txq->n_pend_desc++;
sfxge_next_stmp(txq, pstmp);
return (1);
}
static int
sfxge_tx_maybe_insert_tag(struct sfxge_txq *txq, struct mbuf *mbuf,
struct sfxge_tx_mapping **pstmp)
{
uint16_t this_tag = ((mbuf->m_flags & M_VLANTAG) ?
mbuf->m_pkthdr.ether_vtag :
0);
efx_desc_t *desc;
if (this_tag == txq->hw_vlan_tci)
return (0);
desc = &txq->pend_desc[txq->n_pend_desc];
efx_tx_qdesc_vlantci_create(txq->common, bswap16(this_tag), desc);
txq->hw_vlan_tci = this_tag;
txq->n_pend_desc++;
sfxge_next_stmp(txq, pstmp);
return (1);
}
void
sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq)
@ -361,8 +409,9 @@ static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
int rc;
int i;
int eop;
uint16_t hw_cksum_flags_prev;
uint16_t hw_vlan_tci_prev;
int vlan_tagged;
int n_extra_descs;
KASSERT(!txq->blocked, ("txq->blocked"));
@ -413,14 +462,20 @@ static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
used_map = &stmp->map;
hw_cksum_flags_prev = txq->hw_cksum_flags;
hw_vlan_tci_prev = txq->hw_vlan_tci;
vlan_tagged = sfxge_tx_maybe_insert_tag(txq, mbuf);
if (vlan_tagged) {
sfxge_next_stmp(txq, &stmp);
}
/*
* The order of option descriptors, which are used to leverage VLAN tag
* and checksum offloads, might be important. Changing checksum offload
* between VLAN option and packet descriptors probably does not work.
*/
n_extra_descs = sfxge_tx_maybe_toggle_cksum_offload(txq, mbuf, &stmp);
n_extra_descs += sfxge_tx_maybe_insert_tag(txq, mbuf, &stmp);
if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg, vlan_tagged);
rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg,
n_extra_descs);
if (rc < 0)
goto reject_mapped;
stmp = &txq->stmp[(rc - 1) & txq->ptr_mask];
@ -431,7 +486,7 @@ static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
i = 0;
for (;;) {
desc = &txq->pend_desc[i + vlan_tagged];
desc = &txq->pend_desc[i + n_extra_descs];
eop = (i == n_dma_seg - 1);
efx_tx_qdesc_dma_create(txq->common,
dma_seg[i].ds_addr,
@ -443,7 +498,7 @@ static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
i++;
sfxge_next_stmp(txq, &stmp);
}
txq->n_pend_desc = n_dma_seg + vlan_tagged;
txq->n_pend_desc = n_dma_seg + n_extra_descs;
}
/*
@ -467,6 +522,7 @@ static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
reject_mapped:
txq->hw_vlan_tci = hw_vlan_tci_prev;
txq->hw_cksum_flags = hw_cksum_flags_prev;
bus_dmamap_unload(txq->packet_dma_tag, *used_map);
reject:
/* Drop the packet on the floor. */
@ -840,8 +896,9 @@ sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m)
("interface not up"));
/* Pick the desired transmit queue. */
if (m->m_pkthdr.csum_flags &
(CSUM_DELAY_DATA | CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO)) {
if (sc->txq_dynamic_cksum_toggle_supported |
(m->m_pkthdr.csum_flags &
(CSUM_DELAY_DATA | CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO))) {
int index = 0;
#ifdef RSS
@ -867,7 +924,9 @@ sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m)
if (m->m_pkthdr.csum_flags & CSUM_TSO)
sfxge_parse_tx_packet(m);
#endif
txq = sc->txq[SFXGE_TXQ_IP_TCP_UDP_CKSUM + index];
index += (sc->txq_dynamic_cksum_toggle_supported == B_FALSE) ?
SFXGE_TXQ_IP_TCP_UDP_CKSUM : 0;
txq = sc->txq[index];
} else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) {
txq = sc->txq[SFXGE_TXQ_IP_CKSUM];
} else {
@ -1311,7 +1370,7 @@ static int tso_start_new_packet(struct sfxge_txq *txq,
static int
sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
const bus_dma_segment_t *dma_seg, int n_dma_seg,
int vlan_tagged)
int n_extra_descs)
{
struct sfxge_tso_state tso;
unsigned int id;
@ -1328,7 +1387,7 @@ sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
tso.in_len = dma_seg->ds_len - (tso.header_len - skipped);
tso.dma_addr = dma_seg->ds_addr + (tso.header_len - skipped);
id = (txq->added + vlan_tagged) & txq->ptr_mask;
id = (txq->added + n_extra_descs) & txq->ptr_mask;
if (__predict_false(tso_start_new_packet(txq, &tso, &id)))
return (-1);
@ -1492,6 +1551,8 @@ sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index)
efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
EFX_TXQ_NBUFS(sc->txq_entries));
txq->hw_cksum_flags = 0;
SFXGE_EVQ_UNLOCK(evq);
SFXGE_TXQ_UNLOCK(txq);
}
@ -1513,6 +1574,10 @@ sfxge_tx_max_pkt_desc(const struct sfxge_softc *sc, enum sfxge_txq_type type,
unsigned int fa_tso_v1_max_descs = 0;
unsigned int fa_tso_v2_max_descs = 0;
/* Checksum offload Tx option descriptor may be required */
if (sc->txq_dynamic_cksum_toggle_supported)
max_descs++;
/* VLAN tagging Tx option descriptor may be required */
if (efx_nic_cfg_get(sc->enp)->enc_hw_tx_insert_vlan_enabled)
max_descs++;
@ -1557,6 +1622,7 @@ sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
efsys_mem_t *esmp;
uint16_t flags;
unsigned int tso_fw_assisted;
unsigned int label;
struct sfxge_evq *evq;
unsigned int desc_index;
int rc;
@ -1598,8 +1664,10 @@ sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
break;
}
label = (sc->txq_dynamic_cksum_toggle_supported) ? 0 : txq->type;
/* Create the common code transmit queue. */
if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp,
if ((rc = efx_tx_qcreate(sc->enp, index, label, esmp,
sc->txq_entries, txq->buf_base_id, flags, evq->common,
&txq->common, &desc_index)) != 0) {
/* Retry if no FATSOv2 resources, otherwise fail */
@ -1609,7 +1677,7 @@ sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
/* Looks like all FATSOv2 contexts are used */
flags &= ~EFX_TXQ_FATSOV2;
tso_fw_assisted &= ~SFXGE_FATSOV2;
if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp,
if ((rc = efx_tx_qcreate(sc->enp, index, label, esmp,
sc->txq_entries, txq->buf_base_id, flags, evq->common,
&txq->common, &desc_index)) != 0)
goto fail;
@ -1632,6 +1700,9 @@ sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
txq->hw_vlan_tci = 0;
txq->hw_cksum_flags = flags &
(EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP);
SFXGE_TXQ_UNLOCK(txq);
return (0);
@ -1973,7 +2044,7 @@ sfxge_tx_init(struct sfxge_softc *sc)
goto fail_tx_dpl_put_max;
}
sc->txq_count = SFXGE_TXQ_NTYPES - 1 + sc->intr.n_alloc;
sc->txq_count = SFXGE_EVQ0_N_TXQ(sc) - 1 + sc->intr.n_alloc;
sc->tso_fw_assisted = sfxge_tso_fw_assisted;
if ((~encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO) ||
@ -1993,18 +2064,20 @@ sfxge_tx_init(struct sfxge_softc *sc)
}
/* Initialize the transmit queues */
if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM,
SFXGE_TXQ_NON_CKSUM, 0)) != 0)
goto fail;
if (sc->txq_dynamic_cksum_toggle_supported == B_FALSE) {
if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM,
SFXGE_TXQ_NON_CKSUM, 0)) != 0)
goto fail;
if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM,
SFXGE_TXQ_IP_CKSUM, 0)) != 0)
goto fail2;
if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM,
SFXGE_TXQ_IP_CKSUM, 0)) != 0)
goto fail2;
}
for (index = 0;
index < sc->txq_count - SFXGE_TXQ_NTYPES + 1;
index < sc->txq_count - SFXGE_EVQ0_N_TXQ(sc) + 1;
index++) {
if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NTYPES - 1 + index,
if ((rc = sfxge_tx_qinit(sc, SFXGE_EVQ0_N_TXQ(sc) - 1 + index,
SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0)
goto fail3;
}

View File

@ -139,6 +139,10 @@ enum sfxge_txq_type {
SFXGE_TXQ_NTYPES
};
#define SFXGE_EVQ0_N_TXQ(_sc) \
((_sc)->txq_dynamic_cksum_toggle_supported ? \
1 : SFXGE_TXQ_NTYPES)
#define SFXGE_TXQ_UNBLOCK_LEVEL(_entries) (EFX_TXQ_LIMIT(_entries) / 4)
#define SFXGE_TX_BATCH 64
@ -204,6 +208,9 @@ struct sfxge_txq {
unsigned int added;
unsigned int reaped;
/* The last (or constant) set of HW offloads requested on the queue */
uint16_t hw_cksum_flags;
/* The last VLAN TCI seen on the queue if FW-assisted tagging is
used */
uint16_t hw_vlan_tci;

View File

@ -4346,6 +4346,7 @@ product SITECOMEU RT3072_4 0x0048 RT3072
product SITECOMEU RT3072_5 0x004a RT3072
product SITECOMEU WL349V1 0x004b WL-349 v1
product SITECOMEU RT3072_6 0x004d RT3072
product SITECOMEU WLA1000 0x005b WLA-1000
product SITECOMEU RTL8188CU_1 0x0052 RTL8188CU
product SITECOMEU RTL8188CU_2 0x005c RTL8188CU
product SITECOMEU RTL8192CU 0x0061 RTL8192CU
@ -4611,6 +4612,7 @@ product TOSHIBA TRANSMEMORY 0x6545 USB ThumbDrive
product TPLINK T4U 0x0101 Archer T4U
product TPLINK WN822NV4 0x0108 TL-WN822N v4
product TPLINK WN823NV2 0x0109 TL-WN823N v2
product TPLINK WN722N 0x010c TL-WN722N
product TPLINK T4UV2 0x010d Archer T4U ver 2
product TPLINK T4UHV2 0x010e Archer T4UH ver 2
product TPLINK RTL8153 0x0601 RTL8153 USB 10/100/1000 LAN

View File

@ -114,6 +114,7 @@ static const STRUCT_USB_HOST_ID rsu_devs[] = {
RSU_HT_NOT_SUPPORTED) }
RSU_DEV(ASUS, RTL8192SU),
RSU_DEV(AZUREWAVE, RTL8192SU_4),
RSU_DEV(SITECOMEU, WLA1000),
RSU_DEV_HT(ACCTON, RTL8192SU),
RSU_DEV_HT(ASUS, USBN10),
RSU_DEV_HT(AZUREWAVE, RTL8192SU_1),

View File

@ -2847,6 +2847,8 @@ issignal(struct thread *td)
sig = ptracestop(td, sig, &ksi);
mtx_lock(&ps->ps_mtx);
td->td_si.si_signo = 0;
/*
* Keep looking if the debugger discarded or
* replaced the signal.

View File

@ -197,7 +197,7 @@ thread_ctor(void *mem, int size, void *arg, int flags)
td = (struct thread *)mem;
td->td_state = TDS_INACTIVE;
td->td_oncpu = NOCPU;
td->td_lastcpu = td->td_oncpu = NOCPU;
td->td_tid = tid_alloc();

View File

@ -644,14 +644,14 @@ blst_next_leaf_alloc(blmeta_t *scan, daddr_t blk, int count)
/*
* BLST_LEAF_ALLOC() - allocate at a leaf in the radix tree (a bitmap).
*
* This is the core of the allocator and is optimized for the
* BLIST_BMAP_RADIX block allocation case. Otherwise, execution
* time is proportional to log2(count) + bitpos time.
* This function is the core of the allocator. Its execution time is
* proportional to log(count), plus height of the tree if the allocation
* crosses a leaf boundary.
*/
static daddr_t
blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count)
{
u_daddr_t mask;
u_daddr_t cursor_mask, mask;
int count1, hi, lo, num_shifts, range1, range_ext;
range1 = 0;
@ -661,14 +661,14 @@ blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count)
while ((-mask & ~mask) != 0 && num_shifts > 0) {
/*
* If bit i is set in mask, then bits in [i, i+range1] are set
* in scan->bm_bitmap. The value of range1 is equal to
* count1 >> num_shifts. Grow range and reduce num_shifts to 0,
* while preserving these invariants. The updates to mask leave
* fewer bits set, but each bit that remains set represents a
* longer string of consecutive bits set in scan->bm_bitmap.
* If more updates to mask cannot clear more bits, because mask
* is partitioned with all 0 bits preceding all 1 bits, the loop
* terminates immediately.
* in scan->bm_bitmap. The value of range1 is equal to count1
* >> num_shifts. Grow range1 and reduce num_shifts to 0,
* while preserving these invariants. The updates to mask
* leave fewer bits set, but each bit that remains set
* represents a longer string of consecutive bits set in
* scan->bm_bitmap. If more updates to mask cannot clear more
* bits, because mask is partitioned with all 0 bits preceding
* all 1 bits, the loop terminates immediately.
*/
num_shifts--;
range_ext = range1 + ((count1 >> num_shifts) & 1);
@ -691,9 +691,22 @@ blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count)
}
/* Discard any candidates that appear before blk. */
mask &= (u_daddr_t)-1 << (blk & BLIST_BMAP_MASK);
if (mask == 0)
return (SWAPBLK_NONE);
if ((blk & BLIST_BMAP_MASK) != 0) {
cursor_mask = mask & bitrange(0, blk & BLIST_BMAP_MASK);
if (cursor_mask != 0) {
mask ^= cursor_mask;
if (mask == 0)
return (SWAPBLK_NONE);
/*
* Bighint change for last block allocation cannot
* assume that any other blocks are allocated, so the
* bighint cannot be reduced much.
*/
range1 = BLIST_MAX_ALLOC - 1;
}
blk &= ~BLIST_BMAP_MASK;
}
/*
* The least significant set bit in mask marks the start of the first
@ -734,7 +747,7 @@ blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count)
}
/* Clear the allocated bits from this leaf. */
scan->bm_bitmap &= ~mask;
return ((blk & ~BLIST_BMAP_MASK) + lo);
return (blk + lo);
}
/*

View File

@ -4196,8 +4196,8 @@ sys_getfhat(struct thread *td, struct getfhat_args *uap)
if ((uap->flags & ~(AT_SYMLINK_NOFOLLOW | AT_BENEATH)) != 0)
return (EINVAL);
return (kern_getfhat(td, uap->flags, uap->fd, uap->path ? uap->path : ".",
UIO_USERSPACE, uap->fhp));
return (kern_getfhat(td, uap->flags, uap->fd, uap->path, UIO_USERSPACE,
uap->fhp));
}
static int

View File

@ -1,24 +0,0 @@
# $FreeBSD$
# device.hints
hint.obio.0.at="nexus0"
hint.obio.0.maddr=0x0
hint.obio.0.msize=0x1fffffff
# host-to-pci bridge
hint.pcib.0.at="obio0"
hint.pcib.0.maddr=0x11400000
hint.pcib.0.msize=0x100000
hint.pcib.0.io=0x11500000
hint.pcib.0.iosize=0x100000
# on-board switch engine
hint.admsw.0.at="obio0"
hint.admsw.0.maddr=0x12000000
hint.admsw.0.msize=0x200000
hint.admsw.0.irq=9
# uart0
hint.uart.0.at="obio0"
hint.uart.0.maddr=0x12600000
hint.uart.0.msize=0x200000
hint.uart.0.irq=1

View File

@ -1,22 +0,0 @@
# $FreeBSD$
# device.hints
hint.obio.0.at="nexus0"
hint.obio.0.maddr=0x0
hint.obio.0.msize=0x1fffffff
# host-to-pci bridge
hint.pcib.0.at="obio0"
# hint.pcib.0.maddr=0x11400000
# hint.pcib.0.msize=0x100000
# hint.pcib.0.io=0x11500000
# hint.pcib.0.iosize=0x100000
# uart0
hint.uart.0.at="obio0"
hint.uart.0.maddr=0x18058000
hint.uart.0.msize=0x1C
hint.uart.0.irq=104
hint.kr.0.at="obio0"
hint.kr.0.maddr=0x18060000
hint.kr.0.msize=0x10000

View File

@ -755,4 +755,68 @@ atomic_thread_fence_seq_cst(void)
#define atomic_store_rel_ptr atomic_store_rel_long
#define atomic_readandclear_ptr atomic_readandclear_long
static __inline unsigned int
atomic_swap_int(volatile unsigned int *ptr, const unsigned int value)
{
unsigned int retval;
retval = *ptr;
while (!atomic_fcmpset_int(ptr, &retval, value))
;
return (retval);
}
static __inline uint32_t
atomic_swap_32(volatile uint32_t *ptr, const uint32_t value)
{
uint32_t retval;
retval = *ptr;
while (!atomic_fcmpset_32(ptr, &retval, value))
;
return (retval);
}
#if defined(__mips_n64) || defined(__mips_n32)
static __inline uint64_t
atomic_swap_64(volatile uint64_t *ptr, const uint64_t value)
{
uint64_t retval;
retval = *ptr;
while (!atomic_fcmpset_64(ptr, &retval, value))
;
return (retval);
}
#endif
static __inline unsigned long
atomic_swap_long(volatile unsigned long *ptr, const unsigned long value)
{
unsigned long retval;
retval = *ptr;
while (!atomic_fcmpset_32((volatile uint32_t *)ptr,
(uint32_t *)&retval, value))
;
return (retval);
}
static __inline uintptr_t
atomic_swap_ptr(volatile uintptr_t *ptr, const uintptr_t value)
{
uintptr_t retval;
retval = *ptr;
while (!atomic_fcmpset_32((volatile uint32_t *)ptr,
(uint32_t *)&retval, value))
;
return (retval);
}
#endif /* ! _MACHINE_ATOMIC_H_ */

View File

@ -524,7 +524,7 @@ _cxgbe= cxgbe
.endif
# These rely on 64bit atomics
.if ${MACHINE_ARCH} != "powerpc" && ${MACHINE_ARCH} != "powerpcspc" && \
.if ${MACHINE_ARCH} != "powerpc" && ${MACHINE_ARCH} != "powerpcspe" && \
${MACHINE_CPUARCH} != "mips"
_mps= mps
_mpr= mpr

View File

@ -366,6 +366,7 @@ aim_cpu_init(vm_offset_t toc)
bcopy(&hypertrapcode, (void *)(EXC_HEA + trap_offset), trapsize);
bcopy(&hypertrapcode, (void *)(EXC_HMI + trap_offset), trapsize);
bcopy(&hypertrapcode, (void *)(EXC_HVI + trap_offset), trapsize);
bcopy(&hypertrapcode, (void *)(EXC_SOFT_PATCH + trap_offset), trapsize);
#endif
bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstcodeend -

View File

@ -549,14 +549,9 @@ bp_kernload:
add %r2,%r1,%r2
mtspr SPR_SPRG8, %r2
/* Get load offset */
ld %r31,-0x8000(%r2) /* First TOC entry is TOC base */
subf %r31,%r31,%r2 /* Subtract from real TOC base to get base */
/* Set up the stack pointer */
ld %r1,TOC_REF(tmpstack)(%r2)
addi %r1,%r1,TMPSTACKSZ-96
add %r1,%r1,%r31
#else
/*
* Setup a temporary stack

View File

@ -852,6 +852,9 @@ atomic_swap_64(volatile u_long *p, u_long v)
#define atomic_fetchadd_64 atomic_fetchadd_long
#define atomic_swap_long atomic_swap_64
#define atomic_swap_ptr atomic_swap_64
#else
#define atomic_swap_long(p,v) atomic_swap_32((volatile u_int *)(p), v)
#define atomic_swap_ptr(p,v) atomic_swap_32((volatile u_int *)(p), v)
#endif
#undef __ATOMIC_REL

View File

@ -103,6 +103,9 @@
#define EXC_SPFPD 0x2f30 /* SPE Floating-point Data */
#define EXC_SPFPR 0x2f40 /* SPE Floating-point Round */
/* POWER8 */
#define EXC_SOFT_PATCH 0x1500 /* POWER8 Soft Patch Exception */
#define EXC_LAST 0x2f00 /* Last possible exception vector */
#define EXC_AST 0x3000 /* Fake AST vector */

View File

@ -347,7 +347,7 @@ mpc85xx_smp_start_cpu_epapr(platform_t plat, struct pcpu *pc)
rel_va = rel_page + (rel_pa & PAGE_MASK);
pmap_kenter(rel_page, rel_pa & ~PAGE_MASK);
rel = (struct cpu_release *)rel_va;
bptr = ((vm_paddr_t)(uintptr_t)__boot_page - __startkernel) + kernload;
bptr = pmap_kextract((uintptr_t)__boot_page);
cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel));
rel->pir = pc->pc_cpuid; __asm __volatile("sync");
rel->entry_h = (bptr >> 32);
@ -416,7 +416,7 @@ mpc85xx_smp_start_cpu(platform_t plat, struct pcpu *pc)
/* Flush caches to have our changes hit DRAM. */
cpu_flush_dcache(__boot_page, 4096);
bptr = ((vm_paddr_t)(uintptr_t)__boot_page - __startkernel) + kernload;
bptr = pmap_kextract((uintptr_t)__boot_page);
KASSERT((bptr & 0xfff) == 0,
("%s: boot page is not aligned (%#jx)", __func__, (uintmax_t)bptr));
if (mpc85xx_is_qoriq()) {

View File

@ -255,6 +255,7 @@ db_backtrace(struct thread *td, db_addr_t fp, int count)
case EXC_DECR: trapstr = "DECR"; break;
case EXC_PERF: trapstr = "PERF"; break;
case EXC_VSX: trapstr = "VSX"; break;
case EXC_SOFT_PATCH: trapstr = "SOFT_PATCH"; break;
default: trapstr = NULL; break;
}
if (trapstr != NULL) {

View File

@ -95,6 +95,7 @@ static void syscall(struct trapframe *frame);
void handle_kernel_slb_spill(int, register_t, register_t);
static int handle_user_slb_spill(pmap_t pm, vm_offset_t addr);
extern int n_slbs;
static void normalize_inputs(void);
#endif
extern vm_offset_t __startkernel;
@ -147,6 +148,7 @@ static struct powerpc_exception powerpc_exceptions[] = {
{ EXC_VECAST_G4, "altivec assist" },
{ EXC_THRM, "thermal management" },
{ EXC_RUNMODETRC, "run mode/trace" },
{ EXC_SOFT_PATCH, "soft patch exception" },
{ EXC_LAST, NULL }
};
@ -382,6 +384,17 @@ trap(struct trapframe *frame)
ucode = BUS_OBJERR;
break;
#if defined(__powerpc64__) && defined(AIM)
case EXC_SOFT_PATCH:
/*
* Point to the instruction that generated the exception to execute it again,
* and normalize the register values.
*/
frame->srr0 -= 4;
normalize_inputs();
break;
#endif
default:
trap_fatal(frame);
}
@ -909,6 +922,49 @@ fix_unaligned(struct thread *td, struct trapframe *frame)
return (-1);
}
#if defined(__powerpc64__) && defined(AIM)
#define MSKNSHL(x, m, n) "(((" #x ") & " #m ") << " #n ")"
#define MSKNSHR(x, m, n) "(((" #x ") & " #m ") >> " #n ")"
/* xvcpsgndp instruction, built in opcode format.
* This can be changed to use mnemonic after a toolchain update.
*/
#define XVCPSGNDP(xt, xa, xb) \
__asm __volatile(".long (" \
MSKNSHL(60, 0x3f, 26) " | " \
MSKNSHL(xt, 0x1f, 21) " | " \
MSKNSHL(xa, 0x1f, 16) " | " \
MSKNSHL(xb, 0x1f, 11) " | " \
MSKNSHL(240, 0xff, 3) " | " \
MSKNSHR(xa, 0x20, 3) " | " \
MSKNSHR(xa, 0x20, 4) " | " \
MSKNSHR(xa, 0x20, 5) ")")
/* Macros to normalize 1 or 10 VSX registers */
#define NORM(x) XVCPSGNDP(x, x, x)
#define NORM10(x) \
NORM(x ## 0); NORM(x ## 1); NORM(x ## 2); NORM(x ## 3); NORM(x ## 4); \
NORM(x ## 5); NORM(x ## 6); NORM(x ## 7); NORM(x ## 8); NORM(x ## 9)
static void
normalize_inputs(void)
{
unsigned long msr;
/* enable VSX */
msr = mfmsr();
mtmsr(msr | PSL_VSX);
NORM(0); NORM(1); NORM(2); NORM(3); NORM(4);
NORM(5); NORM(6); NORM(7); NORM(8); NORM(9);
NORM10(1); NORM10(2); NORM10(3); NORM10(4); NORM10(5);
NORM(60); NORM(61); NORM(62); NORM(63);
/* restore MSR */
mtmsr(msr);
}
#endif
#ifdef KDB
int
db_trap_glue(struct trapframe *frame)

View File

@ -213,8 +213,6 @@ __FBSDID("$FreeBSD$");
LIST_HEAD(pmaplist, pmap);
static struct pmaplist allpmaps;
static MALLOC_DEFINE(M_VMPMAP, "pmap", "PMAP L1");
struct pmap kernel_pmap_store;
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
@ -356,36 +354,6 @@ pmap_l3(pmap_t pmap, vm_offset_t va)
return (pmap_l2_to_l3(l2, va));
}
static __inline int
pmap_is_write(pt_entry_t entry)
{
return (entry & PTE_W);
}
static __inline int
pmap_l3_valid(pt_entry_t l3)
{
return (l3 & PTE_V);
}
static inline int
pmap_page_accessed(pt_entry_t pte)
{
return (pte & PTE_A);
}
/* Checks if the page is dirty. */
static inline int
pmap_page_dirty(pt_entry_t pte)
{
return (pte & PTE_D);
}
static __inline void
pmap_resident_count_inc(pmap_t pmap, int count)
{
@ -898,7 +866,7 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
retry:
l3p = pmap_l3(pmap, va);
if (l3p != NULL && (l3 = pmap_load(l3p)) != 0) {
if ((pmap_is_write(l3)) || ((prot & VM_PROT_WRITE) == 0)) {
if ((l3 & PTE_W) != 0 || (prot & VM_PROT_WRITE) == 0) {
phys = PTE_TO_PHYS(l3);
if (vm_page_pa_tryrelock(pmap, phys, &pa))
goto retry;
@ -1777,7 +1745,7 @@ pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
if (old_l3 & PTE_SW_MANAGED) {
phys = PTE_TO_PHYS(old_l3);
m = PHYS_TO_VM_PAGE(phys);
if (pmap_page_dirty(old_l3))
if ((old_l3 & PTE_D) != 0)
vm_page_dirty(m);
if (old_l3 & PTE_A)
vm_page_aflag_set(m, PGA_REFERENCED);
@ -1935,7 +1903,7 @@ pmap_remove_all(vm_page_t m)
/*
* Update the vm_page_t clean and reference bits.
*/
if (pmap_page_dirty(tl3))
if ((tl3 & PTE_D) != 0)
vm_page_dirty(m);
pmap_unuse_l3(pmap, pv->pv_va, pmap_load(l2), &free);
TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
@ -1997,9 +1965,9 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
sva += L3_SIZE) {
l3 = pmap_load(l3p);
if (pmap_l3_valid(l3)) {
if ((l3 & PTE_V) != 0) {
entry = pmap_load(l3p);
entry &= ~(PTE_W);
entry &= ~PTE_W;
pmap_load_store(l3p, entry);
/* XXX: Use pmap_invalidate_range */
pmap_invalidate_page(pmap, sva);
@ -2186,7 +2154,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/*
* Is the specified virtual address already mapped?
*/
if (pmap_l3_valid(orig_l3)) {
if ((orig_l3 & PTE_V) != 0) {
/*
* Wiring change, just update stats. We don't worry about
* wiring PT pages as they remain resident as long as there
@ -2217,10 +2185,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/*
* No, might be a protection or wiring change.
*/
if ((orig_l3 & PTE_SW_MANAGED) != 0) {
if (pmap_is_write(new_l3))
vm_page_aflag_set(m, PGA_WRITEABLE);
}
if ((orig_l3 & PTE_SW_MANAGED) != 0 &&
(new_l3 & PTE_W) != 0)
vm_page_aflag_set(m, PGA_WRITEABLE);
goto validate;
}
@ -2245,7 +2212,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* concurrent calls to pmap_page_test_mappings() and
* pmap_ts_referenced().
*/
if (pmap_page_dirty(orig_l3))
if ((orig_l3 & PTE_D) != 0)
vm_page_dirty(om);
if ((orig_l3 & PTE_A) != 0)
vm_page_aflag_set(om, PGA_REFERENCED);
@ -2278,7 +2245,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
m->md.pv_gen++;
if (pmap_is_write(new_l3))
if ((new_l3 & PTE_W) != 0)
vm_page_aflag_set(m, PGA_WRITEABLE);
}
@ -2298,8 +2265,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pmap_invalidate_page(pmap, va);
KASSERT(PTE_TO_PHYS(orig_l3) == pa,
("pmap_enter: invalid update"));
if (pmap_page_dirty(orig_l3) &&
(orig_l3 & PTE_SW_MANAGED) != 0)
if ((orig_l3 & (PTE_D | PTE_SW_MANAGED)) ==
(PTE_D | PTE_SW_MANAGED))
vm_page_dirty(m);
} else {
pmap_load_store(l3, new_l3);
@ -2840,7 +2807,7 @@ pmap_remove_pages(pmap_t pmap)
/*
* Update the vm_page_t clean/reference bits.
*/
if (pmap_page_dirty(tl3))
if ((tl3 & PTE_D) != 0)
vm_page_dirty(m);
CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
@ -3044,11 +3011,11 @@ pmap_remove_write(vm_page_t m)
retry:
oldl3 = pmap_load(l3);
if (pmap_is_write(oldl3)) {
newl3 = oldl3 & ~(PTE_W);
if ((oldl3 & PTE_W) != 0) {
newl3 = oldl3 & ~PTE_W;
if (!atomic_cmpset_long(l3, oldl3, newl3))
goto retry;
/* TODO: use pmap_page_dirty(oldl3) ? */
/* TODO: check for PTE_D? */
if ((oldl3 & PTE_A) != 0)
vm_page_dirty(m);
pmap_invalidate_page(pmap, pv->pv_va);
@ -3129,7 +3096,7 @@ pmap_ts_referenced(vm_page_t m)
l3 = pmap_l2_to_l3(l2, pv->pv_va);
old_l3 = pmap_load(l3);
if (pmap_page_dirty(old_l3))
if ((old_l3 & PTE_D) != 0)
vm_page_dirty(m);
if ((old_l3 & PTE_A) != 0) {
if (safe_to_clear_referenced(pmap, old_l3)) {
@ -3271,9 +3238,9 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
val = MINCORE_INCORE;
}
if (pmap_page_dirty(tpte))
if ((tpte & PTE_D) != 0)
val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
if (pmap_page_accessed(tpte))
if ((tpte & PTE_A) != 0)
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
managed = (tpte & PTE_SW_MANAGED) == PTE_SW_MANAGED;
}

View File

@ -1071,9 +1071,23 @@ init_secondary_tail(void)
/* NOTREACHED */
}
/*******************************************************************
* local functions and data
*/
static void
smp_after_idle_runnable(void *arg __unused)
{
struct thread *idle_td;
int cpu;
for (cpu = 1; cpu < mp_ncpus; cpu++) {
idle_td = pcpu_find(cpu)->pc_idlethread;
while (idle_td->td_lastcpu == NOCPU &&
idle_td->td_oncpu == NOCPU)
cpu_spinwait();
kmem_free((vm_offset_t)bootstacks[cpu], kstack_pages *
PAGE_SIZE);
}
}
SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
smp_after_idle_runnable, NULL);
/*
* We tell the I/O APIC code about all the CPUs we want to receive

View File

@ -3772,6 +3772,78 @@ ATF_TC_BODY(ptrace__PT_CONTINUE_different_thread, tc)
}
#endif
/*
* Verify that PT_LWPINFO doesn't return stale siginfo.
*/
ATF_TC_WITHOUT_HEAD(ptrace__PT_LWPINFO_stale_siginfo);
ATF_TC_BODY(ptrace__PT_LWPINFO_stale_siginfo, tc)
{
struct ptrace_lwpinfo pl;
pid_t fpid, wpid;
int events, status;
ATF_REQUIRE((fpid = fork()) != -1);
if (fpid == 0) {
trace_me();
raise(SIGABRT);
exit(1);
}
/* The first wait() should report the stop from SIGSTOP. */
wpid = waitpid(fpid, &status, 0);
ATF_REQUIRE(wpid == fpid);
ATF_REQUIRE(WIFSTOPPED(status));
ATF_REQUIRE(WSTOPSIG(status) == SIGSTOP);
ATF_REQUIRE(ptrace(PT_CONTINUE, fpid, (caddr_t)1, 0) == 0);
/* The next stop should report the SIGABRT in the child body. */
wpid = waitpid(fpid, &status, 0);
ATF_REQUIRE(wpid == fpid);
ATF_REQUIRE(WIFSTOPPED(status));
ATF_REQUIRE(WSTOPSIG(status) == SIGABRT);
ATF_REQUIRE(ptrace(PT_LWPINFO, wpid, (caddr_t)&pl, sizeof(pl)) != -1);
ATF_REQUIRE(pl.pl_flags & PL_FLAG_SI);
ATF_REQUIRE(pl.pl_siginfo.si_signo == SIGABRT);
/*
* Continue the process ignoring the signal, but enabling
* syscall traps.
*/
ATF_REQUIRE(ptrace(PT_SYSCALL, fpid, (caddr_t)1, 0) == 0);
/*
* The next stop should report a system call entry from
* exit(). PL_FLAGS_SI should not be set.
*/
wpid = waitpid(fpid, &status, 0);
ATF_REQUIRE(wpid == fpid);
ATF_REQUIRE(WIFSTOPPED(status));
ATF_REQUIRE(WSTOPSIG(status) == SIGTRAP);
ATF_REQUIRE(ptrace(PT_LWPINFO, wpid, (caddr_t)&pl, sizeof(pl)) != -1);
ATF_REQUIRE(pl.pl_flags & PL_FLAG_SCE);
ATF_REQUIRE((pl.pl_flags & PL_FLAG_SI) == 0);
/* Disable syscall tracing and continue the child to let it exit. */
ATF_REQUIRE(ptrace(PT_GET_EVENT_MASK, fpid, (caddr_t)&events,
sizeof(events)) == 0);
events &= ~PTRACE_SYSCALL;
ATF_REQUIRE(ptrace(PT_SET_EVENT_MASK, fpid, (caddr_t)&events,
sizeof(events)) == 0);
ATF_REQUIRE(ptrace(PT_CONTINUE, fpid, (caddr_t)1, 0) == 0);
/* The last event should be for the child process's exit. */
wpid = waitpid(fpid, &status, 0);
ATF_REQUIRE(WIFEXITED(status));
ATF_REQUIRE(WEXITSTATUS(status) == 1);
wpid = wait(&status);
ATF_REQUIRE(wpid == -1);
ATF_REQUIRE(errno == ECHILD);
}
ATF_TP_ADD_TCS(tp)
{
@ -3831,6 +3903,7 @@ ATF_TP_ADD_TCS(tp)
#if defined(HAVE_BREAKPOINT) && defined(SKIP_BREAK)
ATF_TP_ADD_TC(tp, ptrace__PT_CONTINUE_different_thread);
#endif
ATF_TP_ADD_TC(tp, ptrace__PT_LWPINFO_stale_siginfo);
return (atf_no_error());
}

View File

@ -71,6 +71,7 @@ main(int ac, char **av)
struct trussinfo *trussinfo;
char *fname;
char **command;
const char *errstr;
pid_t pid;
int c;
@ -118,7 +119,9 @@ main(int ac, char **av)
fname = optarg;
break;
case 's': /* Specified string size */
trussinfo->strsize = atoi(optarg);
trussinfo->strsize = strtonum(optarg, 0, INT_MAX, &errstr);
if (errstr)
errx(1, "maximum string size is %s: %s", errstr, optarg);
break;
case 'S': /* Don't trace signals */
trussinfo->flags |= NOSIGS;