Merge from HEAD@222848.

This commit is contained in:
hrs 2011-06-08 12:03:34 +00:00
commit af69660e0e
211 changed files with 3650 additions and 1574 deletions

View File

@ -22,6 +22,33 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 9.x IS SLOW:
machines to maximize performance. (To disable malloc debugging, run
ln -s aj /etc/malloc.conf.)
20110608:
The following sysctls and tunables are retired on x86 platforms:
machdep.hlt_cpus
machdep.hlt_logical_cpus
The following sysctl is retired:
machdep.hyperthreading_allowed
The sysctls were supposed to provide a way to dynamically offline and
online selected CPUs on x86 platforms, but the implementation has not
been reliable especially with SCHED_ULE scheduler.
machdep.hyperthreading_allowed tunable is still available to ignore
hyperthreading CPUs at OS level.
Individual CPUs can be disabled using hint.lapic.X.disabled tunable,
where X is an APIC ID of a CPU. Be advised, though, that disabling
CPUs in non-uniform fashion will result in non-uniform topology and
may lead to sub-optimal system performance with SCHED_ULE, which is
a default scheduler.
20110607:
cpumask_t type is retired and cpuset_t is used in order to describe
a mask of CPUs.
20110531:
Changes to ifconfig(8) for dynamic address family detection mandate
that you are running a kernel of 20110525 or later. Make sure to
follow the update procedure to boot a new kernel before installing
world.
20110513:
Support for sun4v architecture is officially dropped

View File

@ -409,6 +409,7 @@ evalsubshell(union node *n, int flags)
struct job *jp;
int backgnd = (n->type == NBACKGND);
oexitstatus = exitstatus;
expredir(n->nredir.redirect);
if ((!backgnd && flags & EV_EXIT && !have_traps()) ||
forkshell(jp = makejob(n, 1), n, backgnd) == 0) {
@ -436,6 +437,7 @@ evalredir(union node *n, int flags)
struct jmploc *savehandler;
volatile int in_redirect = 1;
oexitstatus = exitstatus;
expredir(n->nredir.redirect);
savehandler = handler;
if (setjmp(jmploc.loc)) {
@ -478,7 +480,6 @@ expredir(union node *n)
for (redir = n ; redir ; redir = redir->nfile.next) {
struct arglist fn;
fn.lastp = &fn.list;
oexitstatus = exitstatus;
switch (redir->type) {
case NFROM:
case NTO:

View File

@ -543,6 +543,7 @@
.ds doc-operating-system-FreeBSD-7.3 7.3
.ds doc-operating-system-FreeBSD-8.0 8.0
.ds doc-operating-system-FreeBSD-8.1 8.1
.ds doc-operating-system-FreeBSD-8.2 8.2
.
.ds doc-operating-system-Darwin-8.0.0 8.0.0
.ds doc-operating-system-Darwin-8.1.0 8.1.0
@ -563,6 +564,17 @@
.ds doc-operating-system-Darwin-9.4.0 9.4.0
.ds doc-operating-system-Darwin-9.5.0 9.5.0
.ds doc-operating-system-Darwin-9.6.0 9.6.0
.ds doc-operating-system-Darwin-9.7.0 9.7.0
.ds doc-operating-system-Darwin-9.8.0 9.8.0
.ds doc-operating-system-Darwin-10.6.0 10.6.0
.ds doc-operating-system-Darwin-10.1.0 10.1.0
.ds doc-operating-system-Darwin-10.2.0 10.2.0
.ds doc-operating-system-Darwin-10.3.0 10.3.0
.ds doc-operating-system-Darwin-10.4.0 10.4.0
.ds doc-operating-system-Darwin-10.5.0 10.5.0
.ds doc-operating-system-Darwin-10.6.0 10.6.0
.ds doc-operating-system-Darwin-10.7.0 10.7.0
.ds doc-operating-system-Darwin-11.0.0 11.0.0
.
.ds doc-operating-system-DragonFly-1.0 1.0
.ds doc-operating-system-DragonFly-1.1 1.1

View File

@ -617,6 +617,8 @@
.\" POSIX Part 1: System API
.ds doc-str-St--p1003.1 \*[doc-Tn-font-size]\%IEEE\*[doc-str-St] Std 1003.1
.as doc-str-St--p1003.1 " (\*[Lq]\)\*[Px]\*[doc-str-St].1\*[Rq])
.ds doc-str-St--p1003.1b \*[doc-Tn-font-size]\%IEEE\*[doc-str-St] Std 1003.1b
.as doc-str-St--p1003.1b " (\*[Lq]\)\*[Px]\*[doc-str-St].1\*[Rq])
.ds doc-str-St--p1003.1-88 \*[doc-Tn-font-size]\%IEEE\*[doc-str-St] Std 1003.1-1988
.as doc-str-St--p1003.1-88 " (\*[Lq]\)\*[Px]\*[doc-str-St].1\*[Rq])
.ds doc-str-St--p1003.1-90 \*[doc-Tn-font-size]ISO/IEC\*[doc-str-St] 9945-1:1990
@ -754,6 +756,7 @@
.
.ds doc-str-Lb-libarm ARM Architecture Library (libarm, \-larm)
.ds doc-str-Lb-libarm32 ARM32 Architecture Library (libarm32, \-larm32)
.ds doc-str-Lb-libbsm Basic Security Module Library (libbsm, \-lbsm)
.ds doc-str-Lb-libc Standard C\~Library (libc, \-lc)
.ds doc-str-Lb-libcdk Curses Development Kit Library (libcdk, \-lcdk)
.ds doc-str-Lb-libcompat Compatibility Library (libcompat, \-lcompat)
@ -779,6 +782,7 @@
.ds doc-str-Lb-libpthread \*[Px] \*[doc-str-Lb]Threads Library (libpthread, \-lpthread)
.ds doc-str-Lb-libresolv DNS Resolver Library (libresolv, \-lresolv)
.ds doc-str-Lb-librt \*[Px] \*[doc-str-Lb]Real-time Library (librt, \-lrt)
.ds doc-str-Lb-libSystem System Library (libSystem, \-lSystem)
.ds doc-str-Lb-libtermcap Termcap Access Library (libtermcap, \-ltermcap)
.ds doc-str-Lb-libusbhid USB Human Interface Devices Library (libusbhid, \-lusbhid)
.ds doc-str-Lb-libutil System Utilities Library (libutil, \-lutil)

View File

@ -1197,8 +1197,14 @@
. if !\n[doc-arg-limit] \
. ds doc-macro-name Aq
.
. ds doc-quote-left <
. ds doc-quote-right >
. ie "\*[doc-macro-name]"An" \{\
. ds doc-quote-left <
. ds doc-quote-right >
. \}
. el \{\
. ds doc-quote-left \[la]
. ds doc-quote-right \[ra]
. \}
.
. doc-enclose-string \$@
..
@ -1527,7 +1533,10 @@
. if !\n[doc-arg-limit] \
. ds doc-macro-name Ao
.
. ds doc-quote-left \[la]
. ie "\*[doc-macro-name]"An" \
. ds doc-quote-left <
. el \
. ds doc-quote-left \[la]
.
. doc-enclose-open \$@
..
@ -1546,7 +1555,10 @@
. if !\n[doc-arg-limit] \
. ds doc-macro-name Ac
.
. ds doc-quote-right \[ra]
. ie "\*[doc-macro-name]"An" \
. ds doc-quote-right >
. el \
. ds doc-quote-right \[ra]
.
. doc-enclose-close \$@
..

View File

@ -100,25 +100,19 @@ ifconfig_up()
# inet6 specific
if afexists inet6; then
if ipv6if $1; then
if checkyesno ipv6_gateway_enable; then
_ipv6_opts="-accept_rtadv"
fi
else
if checkyesno ipv6_activate_all_interfaces; then
_ipv6_opts="-ifdisabled"
else
_ipv6_opts="ifdisabled"
fi
# backward compatibility: $ipv6_enable
case $ipv6_enable in
[Yy][Ee][Ss]|[Tt][Rr][Uu][Ee]|[Oo][Nn]|1)
_ipv6_opts="${_ipv6_opts} accept_rtadv"
;;
esac
if checkyesno ipv6_activate_all_interfaces; then
_ipv6_opts="-ifdisabled"
elif [ "$1" != "lo0" ]; then
_ipv6_opts="ifdisabled"
fi
# backward compatibility: $ipv6_enable
case $ipv6_enable in
[Yy][Ee][Ss]|[Tt][Rr][Uu][Ee]|[Oo][Nn]|1)
_ipv6_opts="${_ipv6_opts} accept_rtadv"
;;
esac
if [ -n "${_ipv6_opts}" ]; then
ifconfig $1 inet6 ${_ipv6_opts}
fi

View File

@ -28,6 +28,7 @@
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/cpuset.h>
#include <sys/proc.h>
#include <sys/types.h>
#include <sys/signal.h>
@ -37,6 +38,7 @@ __FBSDID("$FreeBSD$");
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <defs.h>
#include <frame-unwind.h>
@ -48,7 +50,7 @@ static CORE_ADDR dumppcb;
static int dumptid;
static CORE_ADDR stoppcbs;
static __cpumask_t stopped_cpus;
static cpuset_t stopped_cpus;
static struct kthr *first;
struct kthr *curkthr;
@ -76,6 +78,7 @@ kgdb_thr_init(void)
{
struct proc p;
struct thread td;
long cpusetsize;
struct kthr *kt;
CORE_ADDR addr;
uintptr_t paddr;
@ -102,10 +105,11 @@ kgdb_thr_init(void)
dumptid = -1;
addr = kgdb_lookup("stopped_cpus");
if (addr != 0)
kvm_read(kvm, addr, &stopped_cpus, sizeof(stopped_cpus));
else
stopped_cpus = 0;
CPU_ZERO(&stopped_cpus);
cpusetsize = sysconf(_SC_CPUSET_SIZE);
if (cpusetsize != -1 && (u_long)cpusetsize <= sizeof(cpuset_t) &&
addr != 0)
kvm_read(kvm, addr, &stopped_cpus, cpusetsize);
stoppcbs = kgdb_lookup("stoppcbs");
@ -126,8 +130,8 @@ kgdb_thr_init(void)
kt->kaddr = addr;
if (td.td_tid == dumptid)
kt->pcb = dumppcb;
else if (td.td_state == TDS_RUNNING && ((1 << td.td_oncpu) & stopped_cpus)
&& stoppcbs != 0)
else if (td.td_state == TDS_RUNNING && stoppcbs != 0 &&
CPU_ISSET(td.td_oncpu, &stopped_cpus))
kt->pcb = (uintptr_t) stoppcbs + sizeof(struct pcb) * td.td_oncpu;
else
kt->pcb = (uintptr_t)td.td_pcb;

View File

@ -34,7 +34,6 @@
.\" FreeBSD .Lb values
.ds doc-str-Lb-libarchive Streaming Archive Library (libarchive, \-larchive)
.ds doc-str-Lb-libbluetooth Bluetooth User Library (libbluetooth, \-lbluetooth)
.ds doc-str-Lb-libbsm Basic Security Module User Library (libbsm, \-lbsm)
.ds doc-str-Lb-libc_r Reentrant C\~Library (libc_r, \-lc_r)
.ds doc-str-Lb-libcalendar Calendar Arithmetic Library (libcalendar, \-lcalendar)
.ds doc-str-Lb-libcam Common Access Method User Library (libcam, \-lcam)
@ -75,7 +74,7 @@
.
.\" FreeBSD releases not found in doc-common
.ds doc-operating-system-FreeBSD-7.4 7.4
.ds doc-operating-system-FreeBSD-8.2 8.2
.ds doc-operating-system-FreeBSD-8.3 8.3
.ds doc-operating-system-FreeBSD-9.0 9.0
.
.\" Definitions not (yet) in doc-syms

View File

@ -19,7 +19,6 @@ SRCS= citrus_bcs.c citrus_bcs_strtol.c citrus_bcs_strtoul.c \
citrus_module.c citrus_none.c citrus_pivot_factory.c \
citrus_prop.c citrus_stdenc.c iconv.c
WARNS?= 6
CFLAGS+= --param max-inline-insns-single=128 -I ${.CURDIR}/../../include -I${.CURDIR}/../libc/include
.include <bsd.lib.mk>

View File

@ -39,11 +39,13 @@
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/cpuset.h>
#include <sys/pcpu.h>
#include <sys/sysctl.h>
#include <kvm.h>
#include <limits.h>
#include <stdlib.h>
#include <unistd.h>
#include "kvm_private.h"
@ -118,6 +120,9 @@ _kvm_pcpu_clear(void)
void *
kvm_getpcpu(kvm_t *kd, int cpu)
{
long kcpusetsize;
ssize_t nbytes;
uintptr_t readptr;
char *buf;
if (kd == NULL) {
@ -125,6 +130,10 @@ kvm_getpcpu(kvm_t *kd, int cpu)
return (NULL);
}
kcpusetsize = sysconf(_SC_CPUSET_SIZE);
if (kcpusetsize == -1 || (u_long)kcpusetsize > sizeof(cpuset_t))
return ((void *)-1);
if (maxcpu == 0)
if (_kvm_pcpu_init(kd) < 0)
return ((void *)-1);
@ -137,8 +146,26 @@ kvm_getpcpu(kvm_t *kd, int cpu)
_kvm_err(kd, kd->program, "out of memory");
return ((void *)-1);
}
if (kvm_read(kd, (uintptr_t)pcpu_data[cpu], buf, sizeof(struct pcpu)) !=
sizeof(struct pcpu)) {
nbytes = sizeof(struct pcpu) - 2 * kcpusetsize;
readptr = (uintptr_t)pcpu_data[cpu];
if (kvm_read(kd, readptr, buf, nbytes) != nbytes) {
_kvm_err(kd, kd->program, "unable to read per-CPU data");
free(buf);
return ((void *)-1);
}
/* Fetch the valid cpuset_t objects. */
CPU_ZERO((cpuset_t *)(buf + nbytes));
CPU_ZERO((cpuset_t *)(buf + nbytes + sizeof(cpuset_t)));
readptr += nbytes;
if (kvm_read(kd, readptr, buf + nbytes, kcpusetsize) != kcpusetsize) {
_kvm_err(kd, kd->program, "unable to read per-CPU data");
free(buf);
return ((void *)-1);
}
readptr += kcpusetsize;
if (kvm_read(kd, readptr, buf + nbytes + sizeof(cpuset_t),
kcpusetsize) != kcpusetsize) {
_kvm_err(kd, kd->program, "unable to read per-CPU data");
free(buf);
return ((void *)-1);

View File

@ -27,6 +27,7 @@
*/
#include <sys/param.h>
#include <sys/cpuset.h>
#include <sys/sysctl.h>
#define LIBMEMSTAT /* Cause vm_page.h not to include opt_vmpage.h */
@ -44,6 +45,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "memstat.h"
#include "memstat_internal.h"
@ -313,7 +315,8 @@ memstat_kvm_uma(struct memory_type_list *list, void *kvm_handle)
struct uma_keg *kzp, kz;
int hint_dontsearch, i, mp_maxid, ret;
char name[MEMTYPE_MAXNAME];
__cpumask_t all_cpus;
cpuset_t all_cpus;
long cpusetsize;
kvm_t *kvm;
kvm = (kvm_t *)kvm_handle;
@ -337,7 +340,13 @@ memstat_kvm_uma(struct memory_type_list *list, void *kvm_handle)
list->mtl_error = ret;
return (-1);
}
ret = kread_symbol(kvm, X_ALL_CPUS, &all_cpus, sizeof(all_cpus), 0);
cpusetsize = sysconf(_SC_CPUSET_SIZE);
if (cpusetsize == -1 || (u_long)cpusetsize > sizeof(cpuset_t)) {
list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL;
return (-1);
}
CPU_ZERO(&all_cpus);
ret = kread_symbol(kvm, X_ALL_CPUS, &all_cpus, cpusetsize, 0);
if (ret != 0) {
list->mtl_error = ret;
return (-1);
@ -407,7 +416,7 @@ memstat_kvm_uma(struct memory_type_list *list, void *kvm_handle)
if (kz.uk_flags & UMA_ZFLAG_INTERNAL)
goto skip_percpu;
for (i = 0; i < mp_maxid + 1; i++) {
if ((all_cpus & (1 << i)) == 0)
if (!CPU_ISSET(i, &all_cpus))
continue;
ucp = &ucp_array[i];
mtp->mt_numallocs += ucp->uc_allocs;

View File

@ -180,7 +180,7 @@ notify(struct utmpx *utp, char file[], off_t offset, int folder)
dsyslog(LOG_DEBUG, "%s: wrong mode on %s", utp->ut_user, tty);
return;
}
dsyslog(LOG_DEBUG, "notify %s on %s\n", utp->ut_user, tty);
dsyslog(LOG_DEBUG, "notify %s on %s", utp->ut_user, tty);
switch (fork()) {
case -1:
syslog(LOG_NOTICE, "fork failed (%m)");

View File

@ -48,28 +48,32 @@ EFIPART=efipart.sys
if [ $bootable = yes ]; then
EFISZ=65536
MNT=/mnt
dd if=/dev/zero of=$BASE/$EFIPART count=$EFISZ
md=`mdconfig -a -t vnode -f $BASE/$EFIPART`
dd if=/dev/zero of=$EFIPART count=$EFISZ
md=`mdconfig -a -t vnode -f $EFIPART`
newfs_msdos -F 12 -S 512 -h 4 -o 0 -s $EFISZ -u 16 $md
mount -t msdosfs /dev/$md $MNT
mkdir -p $MNT/efi/boot $MNT/boot $MNT/boot/kernel
cp -R $BASE/boot/defaults $MNT/boot
cp $BASE/boot/kernel/kernel $MNT/boot/kernel
cp $BASE/boot/kernel/ispfw.ko $MNT/boot/kernel
if [ -s $BASE/boot/kernel/ispfw.ko ]; then
cp $BASE/boot/kernel/ispfw.ko $MNT/boot/kernel
fi
cp $BASE/boot/device.hints $MNT/boot
cp $BASE/boot/loader.* $MNT/boot
cp $BASE/boot/mfsroot.gz $MNT/boot
if [ -s $BASE/boot/mfsroot.gz ]; then
cp $BASE/boot/mfsroot.gz $MNT/boot
fi
cp $BASE/boot/support.4th $MNT/boot
mv $MNT/boot/loader.efi $MNT/efi/boot/bootia64.efi
umount $MNT
mdconfig -d -u $md
BOOTOPTS="-b bootimage=i386;$EFIPART -o no-emul-boot"
BOOTOPTS="-o bootimage=i386;$EFIPART -o no-emul-boot"
else
BOOTOPTS=""
fi
echo "/dev/iso9660/$LABEL / cd9660 ro 0 0" > $1/etc/fstab
echo "/dev/iso9660/$LABEL / cd9660 ro 0 0" > $BASE/etc/fstab
makefs -t cd9660 $BOOTOPTS -o rockridge -o label=$LABEL $NAME $BASE $*
rm -f $BASE/$EFIPART
rm -f $EFIPART
rm $1/etc/fstab
exit 0

View File

@ -341,9 +341,10 @@ gpart_autofill_resize(struct gctl_req *req)
errc(EXIT_FAILURE, error, "Invalid alignment param");
if (alignment == 0)
errx(EXIT_FAILURE, "Invalid alignment param");
} else {
lba = pp->lg_stripesize / pp->lg_sectorsize;
if (lba > 0)
alignment = g_lcm(lba, alignment);
alignment = lba;
}
error = gctl_delete_param(req, "alignment");
if (error)
@ -491,13 +492,9 @@ gpart_autofill(struct gctl_req *req)
if (has_size && has_start && !has_alignment)
goto done;
/*
* If stripesize is not zero, then recalculate alignment value.
* Use LCM from stripesize and user specified alignment.
*/
len = pp->lg_stripesize / pp->lg_sectorsize;
if (len > 0 )
alignment = g_lcm(len, alignment);
if (len > 0 && !has_alignment)
alignment = len;
/* Adjust parameters to stripeoffset */
offset = pp->lg_stripeoffset / pp->lg_sectorsize;

View File

@ -24,7 +24,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd May 30, 2011
.Dd June 6, 2011
.Dt GPART 8
.Os
.Sh NAME
@ -530,16 +530,17 @@ about its use.
.El
.\"
.Sh PARTITION TYPES
Partition types are identified on disk by particular strings or magic
values.
The
.Nm
utility uses symbolic names for common partition types to avoid that the
user needs to know what the partitioning scheme in question is and what
the actual number or identification needs to be used for a particular
type.
utility uses symbolic names for common partition types to avoid the
user needing to know these values or other details of the partitioning
scheme in question.
The
.Nm
utility also allows the user to specify scheme-specific partition types
for partition types that do not have symbol names.
for partition types that do not have symbolic names.
The symbolic names currently understood are:
.Bl -tag -width ".Cm freebsd-vinum"
.It Cm bios-boot
@ -740,30 +741,30 @@ action or reverted with the
.Cm undo
action.
.Sh RECOVERING
The GEOM class PART supports recovering of partition tables only for GPT.
The GEOM PART class supports recovering of partition tables only for GPT.
The GUID partition table has a primary and secondary (backup) copy of
metadata for redundance.
They are stored in the begining and in the end of device respectively.
Therefore it is acceptable to have some corruptions in the metadata that
are not fatal to work with GPT.
When kernel detects corrupt metadata it marks this table as corrupt and
reports about corruption.
Any changes in corrupt table are prohibited except
metadata for redundance, these are stored at the begining and the end
of the device respectively.
As a result of having two copies, it is acceptable to have some corruption
within the metadata that is not fatal to the working of GPT.
When the kernel detects corrupt metadata it marks this table as corrupt and
reports the corruption.
Any operations on corrupt tables are prohibited except for
.Cm destroy
and
.Cm recover .
.Pp
In case when only first sector is corrupt kernel can not detect GPT even
if partition table is not corrupt.
You can write protective MBR with
If the first sector of a provider is corrupt, the kernel can not detect GPT
even if partition table itself is not corrupt.
You can rewrite the protective MBR using the
.Xr dd 1
command to restore ability of GPT detection.
The copy of protective MBR is usually located in the
command, to restore the ability to detect the GPT.
The copy of the protective MBR is usually located in the
.Pa /boot/pmbr
file.
.Pp
In case when some of metadata is corrupt you will get to know about this
from kernel's messages like these:
If one GPT header appears to be corrupt but the other copy remains intact,
the kernel will log the following:
.Bd -literal -offset indent
GEOM: provider: the primary GPT table is corrupt or invalid.
GEOM: provider: using the secondary instead -- recovery strongly advised.
@ -777,32 +778,31 @@ GEOM: provider: using the primary only -- recovery suggested.
.Pp
Also
.Nm
commands like
commands such as
.Cm show , status
and
.Cm list
will report about corrupt table.
will report about corrupt tables.
.Pp
In case when the size of device has changed (e.g.\& volume expansion) the
secondary GPT header will become located not in the last sector.
If the size of the device has changed (e.g.\& volume expansion) the
secondary GPT header will no longer be located in the last sector.
This is not a metadata corruption, but it is dangerous because any
corruption of the primary GPT will lead to lost of partition table.
Kernel reports about this problem with message:
corruption of the primary GPT will lead to loss of partition table.
This problem is reported by the kernel with the message:
.Bd -literal -offset indent
GEOM: provider: the secondary GPT header is not in the last LBA.
.Ed
.Pp
A corrupt table can be recovered with
This situation can be recovered with the
.Cm recover
command.
This command does reconstruction of corrupt metadata using
known valid metadata.
Also it can relocate secondary GPT to the end of device.
This command reconstructs the corrupt metadata using known valid
metadata and relocates the secondary GPT to the end of the device.
.Pp
.Em NOTE :
The GEOM class PART can detect the same partition table on different GEOM
providers and some of them will be marked as corrupt.
Be careful when choosing a provider for recovering.
The GEOM PART class can detect the same partition table visible through
different GEOM providers, and some of them will be marked as corrupt.
Be careful when choosing a provider for recovery.
If you choose incorrectly you can destroy the metadata of another GEOM class,
e.g.\& GEOM MIRROR or GEOM LABEL.
.Sh SYSCTL VARIABLES
@ -815,11 +815,11 @@ The default value is shown next to each variable.
.Bl -tag -width indent
.It Va kern.geom.part.check_integrity : No 1
This variable controls the behaviour of metadata integrity checks.
When integrity checks are enabled
When integrity checks are enabled, the
.Nm PART
GEOM class verifies all generic partition parameters that it gets from the
GEOM class verifies all generic partition parameters obtained from the
disk metadata.
If some inconsistency is detected, partition table will be
If some inconsistency is detected, the partition table will be
rejected with a diagnostic message:
.Sy "GEOM_PART: Integrity check failed (provider, scheme)" .
.El

View File

@ -5,6 +5,4 @@
GEOM_CLASS= sched
WARNS?= 6
.include <bsd.lib.mk>

View File

@ -499,8 +499,8 @@ static struct cmd inet6_cmds[] = {
DEF_CMD("-autoconf", -IN6_IFF_AUTOCONF, setip6flags),
DEF_CMD("accept_rtadv", ND6_IFF_ACCEPT_RTADV, setnd6flags),
DEF_CMD("-accept_rtadv",-ND6_IFF_ACCEPT_RTADV, setnd6flags),
DEF_CMD("defroute_rtadv",ND6_IFF_DEFROUTE_RTADV,setnd6flags),
DEF_CMD("-defroute_rtadv",-ND6_IFF_DEFROUTE_RTADV,setnd6flags),
DEF_CMD("no_radr", ND6_IFF_NO_RADR, setnd6flags),
DEF_CMD("-no_radr", -ND6_IFF_NO_RADR, setnd6flags),
DEF_CMD("defaultif", 1, setnd6defif),
DEF_CMD("-defaultif", -1, setnd6defif),
DEF_CMD("ifdisabled", ND6_IFF_IFDISABLED, setnd6flags),

View File

@ -58,7 +58,7 @@ static const char rcsid[] =
#define MAX_SYSCTL_TRY 5
#define ND6BITS "\020\001PERFORMNUD\002ACCEPT_RTADV\003PREFER_SOURCE" \
"\004IFDISABLED\005DONT_SET_IFROUTE\006AUTO_LINKLOCAL" \
"\007DEFROUTE_RTADV\020DEFAULTIF"
"\007NO_RADR\020DEFAULTIF"
static int isnd6defif(int);
void setnd6flags(const char *, int, int, const struct afswtch *);
@ -159,7 +159,6 @@ nd6_status(int s)
}
isdefif = isnd6defif(s6);
close(s6);
if (nd.ndi.flags == 0 && !isdefif)
return;
printb("\tnd6 options",

View File

@ -356,6 +356,7 @@ ipfw_main(int oldac, char **oldav)
*/
co.do_nat = 0;
co.do_pipe = 0;
co.use_set = 0;
if (!strncmp(*av, "nat", strlen(*av)))
co.do_nat = 1;
else if (!strncmp(*av, "pipe", strlen(*av)))
@ -444,7 +445,7 @@ static void
ipfw_readfile(int ac, char *av[])
{
#define MAX_ARGS 32
char buf[BUFSIZ];
char buf[4096];
char *progname = av[0]; /* original program name */
const char *cmd = NULL; /* preprocessor name, if any */
const char *filename = av[ac-1]; /* file to read */
@ -552,7 +553,7 @@ ipfw_readfile(int ac, char *av[])
}
}
while (fgets(buf, BUFSIZ, f)) { /* read commands */
while (fgets(buf, sizeof(buf), f)) { /* read commands */
char linename[20];
char *args[2];

View File

@ -28,7 +28,7 @@
.\" @(#)mount.8 8.8 (Berkeley) 6/16/94
.\" $FreeBSD$
.\"
.Dd April 28, 2011
.Dd June 6, 2011
.Dt MOUNT 8
.Os
.Sh NAME
@ -348,7 +348,6 @@ option) may be passed as a comma separated list; these options are
distinguished by a leading
.Dq \&-
(dash).
Options that take a value are specified using the syntax -option=value.
For example, the
.Nm
command:
@ -363,6 +362,16 @@ to execute the equivalent of:
/sbin/mount_cd9660 -e /dev/cd0 /cdrom
.Ed
.Pp
Options that take a value are specified using the -option=value syntax:
.Bd -literal -offset indent
mount -t msdosfs -o -u=fred,-g=wheel /dev/da0s1 /mnt
.Ed
.Pp
is equivalent to
.Bd -literal -offset indent
/sbin/mount_msdosfs -u fred -g wheel /dev/da0s1 /mnt
.Ed
.Pp
Additional options specific to file system types
which are not internally known
(see the description of the

View File

@ -243,7 +243,7 @@ main(int argc, char *argv[])
const char *mntfromname, **vfslist, *vfstype;
struct fstab *fs;
struct statfs *mntbuf;
int all, ch, i, init_flags, late, mntsize, rval, have_fstab, ro;
int all, ch, i, init_flags, late, failok, mntsize, rval, have_fstab, ro;
char *cp, *ep, *options;
all = init_flags = late = 0;
@ -328,6 +328,10 @@ main(int argc, char *argv[])
continue;
if (hasopt(fs->fs_mntops, "late") && !late)
continue;
if (hasopt(fs->fs_mntops, "failok"))
failok = 1;
else
failok = 0;
if (!(init_flags & MNT_UPDATE) &&
ismounted(fs, mntbuf, mntsize))
continue;
@ -335,7 +339,7 @@ main(int argc, char *argv[])
mntbuf->f_flags);
if (mountfs(fs->fs_vfstype, fs->fs_spec,
fs->fs_file, init_flags, options,
fs->fs_mntops))
fs->fs_mntops) && !failok)
rval = 1;
}
} else if (fstab_style) {
@ -717,6 +721,14 @@ mangle(char *options, struct cpa *a)
* before mountd starts.
*/
continue;
} else if (strcmp(p, "failok") == 0) {
/*
* "failok" is used to prevent certain file
* systems from being causing the system to
* drop into single user mode in the boot
* cycle, and is not a real mount option.
*/
continue;
} else if (strncmp(p, "mountprog", 9) == 0) {
/*
* "mountprog" is used to force the use of

View File

@ -31,7 +31,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd June 9, 2008
.Dd June 6, 2011
.Dt RCORDER 8
.Os
.Sh NAME
@ -89,6 +89,12 @@ and
lines may appear, but all such lines must appear in a sequence without
any intervening lines, as once a line that does not follow the format
is reached, parsing stops.
Note that for historical reasons,
.Dq Li REQUIRES ,
.Dq Li PROVIDES ,
and
.Dq Li KEYWORDS
are also accepted in addition to the above.
.Pp
The options are as follows:
.Bl -tag -width indent

View File

@ -25,12 +25,12 @@
.\"
.\" $FreeBSD$
.\"
.Dd November 30, 2009
.Dd June 7, 2011
.Dt AMDSBWD 4
.Os
.Sh NAME
.Nm amdsbwd
.Nd device driver for the AMD SB600/SB700/SB710/SB750 watchdog timer
.Nd device driver for the AMD SB600/SB7xx/SB8xx watchdog timers
.Sh SYNOPSIS
To compile this driver into the kernel,
place the following line in your
@ -51,7 +51,7 @@ The
driver provides
.Xr watchdog 4
support for the watchdog timers present on
AMD SB600 and SB7xx south bridge chips.
AMD SB600, SB7xx and SB8xx southbridges.
.Sh SEE ALSO
.Xr watchdog 4 ,
.Xr watchdog 8 ,
@ -61,12 +61,14 @@ AMD SB600 and SB7xx south bridge chips.
The
.Nm
driver first appeared in
.Fx 9.0 .
.Fx 7.3
and
.Fx 8.1 .
.Sh AUTHORS
.An -nosplit
The
.Nm
driver was written by
.An Andiry Gapon Aq avg@FreeBSD.org .
.An Andriy Gapon Aq avg@FreeBSD.org .
This manual page was written by
.An Andiry Gapon Aq avg@FreeBSD.org .
.An Andriy Gapon Aq avg@FreeBSD.org .

View File

@ -26,7 +26,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd May 20, 2011
.Dd January 29, 2008
.Dt ATKBD 4
.Os
.Sh NAME
@ -176,11 +176,6 @@ When this option is given, the
.Nm
driver will not test the keyboard port during the probe routine.
Some machines hang during boot when this test is performed.
.It bit 4 (PROBE_TYPEMATIC)
When this option is given, the
.Nm
driver will try to probe the keyboard typematic rate on boot.
Some machines hang during boot when this test is performed.
.El
.\".Sh FILES
.Sh EXAMPLES

View File

@ -32,7 +32,7 @@
.\" @(#)fstab.5 8.1 (Berkeley) 6/5/93
.\" $FreeBSD$
.\"
.Dd November 23, 2008
.Dd June 7, 2011
.Dt FSTAB 5
.Os
.Sh NAME
@ -70,7 +70,8 @@ remote file system to be mounted.
The second field,
.Pq Fa fs_file ,
describes the mount point for the file system.
For swap partitions, this field should be specified as ``none''.
For swap partitions, this field should be specified as
.Dq none .
.Pp
The third field,
.Pq Fa fs_vfstype ,
@ -125,7 +126,11 @@ sync,noatime,-m=644,-M=755,-u=foo,-g=bar
in the option field of
.Nm .
.Pp
If the options ``userquota'' and/or ``groupquota'' are specified,
If the options
.Dq userquota
and/or
.Dq groupquota
are specified,
the file system is automatically processed by the
.Xr quotacheck 8
command, and user and/or group disk quotas are enabled with
@ -147,7 +152,18 @@ this location can be specified as:
userquota=/var/quotas/tmp.user
.Ed
.Pp
If the option ``noauto'' is specified, the file system will not be automatically
If the option
.Dq failok
is specified,
the system will ignore any error which happens during the mount of that filesystem,
which would otherwise cause the system to drop into single user mode.
This option is implemented by the
.Xr mount 8
command and will not be passed to the kernel.
.Pp
If the option
.Dq noauto
is specified, the file system will not be automatically
mounted at system startup.
Note that, for network file systems
of third party types
@ -170,13 +186,19 @@ field (it is not deleted from the
field).
If
.Fa fs_type
is ``rw'' or ``ro'' then the file system whose name is given in the
is
.Dq rw
or
.Dq ro
then the file system whose name is given in the
.Fa fs_file
field is normally mounted read-write or read-only on the
specified special file.
If
.Fa fs_type
is ``sw'' then the special file is made available as a piece of swap
is
.Dq sw
then the special file is made available as a piece of swap
space by the
.Xr swapon 8
command at the end of the system reboot procedure.
@ -187,7 +209,9 @@ and
are unused.
If
.Fa fs_type
is specified as ``xx'' the entry is ignored.
is specified as
.Dq xx
the entry is ignored.
This is useful to show disk partitions which are currently unused.
.Pp
The fifth field,

View File

@ -97,6 +97,7 @@ itetcu [label="Ion-Mihai Tetcu\nitetcu@FreeBSD.org\n2006/06/07"]
jacula [label="Giuseppe Pilichi\njacula@FreeBSD.org\n2010/04/05"]
jadawin [label="Philippe Audeoud\njadawin@FreeBSD.org\n2008/03/02"]
jkim [label="Jung-uk Kim\njkim@FreeBSD.org\n2007/09/12"]
jlaffaye [label="Julien Laffaye\njlaffaye@FreeBSD.org\n2011/06/06"]
jmelo [label="Jean Milanez Melo\njmelo@FreeBSD.org\n2006/03/31"]
joerg [label="Joerg Wunsch\njoerg@FreeBSD.org\n1994/08/22"]
johans [label="Johan Selst\njohans@FreeBSD.org\n2006/04/01"]
@ -204,6 +205,8 @@ arved -> stefan
asami -> obrien
bapt -> jlaffaye
beat -> decke
beech -> glarkin
@ -401,6 +404,7 @@ tabthorpe -> dhn
tabthorpe -> fluffy
tabthorpe -> jacula
tabthorpe -> jadawin
tabthorpe -> jlaffaye
tabthorpe -> pgj
tabthorpe -> rene

View File

@ -78,7 +78,7 @@ static void acpi_stop_beep(void *);
#ifdef SMP
static int acpi_wakeup_ap(struct acpi_softc *, int);
static void acpi_wakeup_cpus(struct acpi_softc *, cpumask_t);
static void acpi_wakeup_cpus(struct acpi_softc *, const cpuset_t *);
#endif
#define WAKECODE_VADDR(sc) ((sc)->acpi_wakeaddr + (3 * PAGE_SIZE))
@ -173,7 +173,7 @@ acpi_wakeup_ap(struct acpi_softc *sc, int cpu)
#define BIOS_WARM (0x0a)
static void
acpi_wakeup_cpus(struct acpi_softc *sc, cpumask_t wakeup_cpus)
acpi_wakeup_cpus(struct acpi_softc *sc, const cpuset_t *wakeup_cpus)
{
uint32_t mpbioswarmvec;
int cpu;
@ -192,7 +192,7 @@ acpi_wakeup_cpus(struct acpi_softc *sc, cpumask_t wakeup_cpus)
/* Wake up each AP. */
for (cpu = 1; cpu < mp_ncpus; cpu++) {
if ((wakeup_cpus & (1 << cpu)) == 0)
if (!CPU_ISSET(cpu, wakeup_cpus))
continue;
if (acpi_wakeup_ap(sc, cpu) == 0) {
/* restore the warmstart vector */
@ -214,7 +214,7 @@ int
acpi_sleep_machdep(struct acpi_softc *sc, int state)
{
#ifdef SMP
cpumask_t wakeup_cpus;
cpuset_t wakeup_cpus;
#endif
register_t cr3, rf;
ACPI_STATUS status;
@ -244,10 +244,9 @@ acpi_sleep_machdep(struct acpi_softc *sc, int state)
if (savectx(susppcbs[0])) {
#ifdef SMP
if (wakeup_cpus != 0 && suspend_cpus(wakeup_cpus) == 0) {
device_printf(sc->acpi_dev,
"Failed to suspend APs: CPU mask = 0x%jx\n",
(uintmax_t)(wakeup_cpus & ~stopped_cpus));
if (!CPU_EMPTY(&wakeup_cpus) &&
suspend_cpus(wakeup_cpus) == 0) {
device_printf(sc->acpi_dev, "Failed to suspend APs\n");
goto out;
}
#endif
@ -282,8 +281,8 @@ acpi_sleep_machdep(struct acpi_softc *sc, int state)
PCPU_SET(switchtime, 0);
PCPU_SET(switchticks, ticks);
#ifdef SMP
if (wakeup_cpus != 0)
acpi_wakeup_cpus(sc, wakeup_cpus);
if (!CPU_EMPTY(&wakeup_cpus))
acpi_wakeup_cpus(sc, &wakeup_cpus);
#endif
acpi_resync_clock(sc);
ret = 0;
@ -291,7 +290,7 @@ acpi_sleep_machdep(struct acpi_softc *sc, int state)
out:
#ifdef SMP
if (wakeup_cpus != 0)
if (!CPU_EMPTY(&wakeup_cpus))
restart_cpus(wakeup_cpus);
#endif

View File

@ -443,8 +443,7 @@ DB_SHOW_COMMAND(irqs, db_show_irqs)
* allocate CPUs round-robin.
*/
/* The BSP is always a valid target. */
static cpumask_t intr_cpus = (1 << 0);
static cpuset_t intr_cpus;
static int current_cpu;
/*
@ -466,7 +465,7 @@ intr_next_cpu(void)
current_cpu++;
if (current_cpu > mp_maxid)
current_cpu = 0;
} while (!(intr_cpus & (1 << current_cpu)));
} while (!CPU_ISSET(current_cpu, &intr_cpus));
mtx_unlock_spin(&icu_lock);
return (apic_id);
}
@ -497,7 +496,7 @@ intr_add_cpu(u_int cpu)
printf("INTR: Adding local APIC %d as a target\n",
cpu_apic_ids[cpu]);
intr_cpus |= (1 << cpu);
CPU_SET(cpu, &intr_cpus);
}
/*
@ -510,6 +509,9 @@ intr_shuffle_irqs(void *arg __unused)
struct intsrc *isrc;
int i;
/* The BSP is always a valid target. */
CPU_SETOF(0, &intr_cpus);
/* Don't bother on UP. */
if (mp_ncpus == 1)
return;

View File

@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$");
#include "opt_isa.h"
#include "opt_kstack_pages.h"
#include "opt_maxmem.h"
#include "opt_mp_watchdog.h"
#include "opt_perfmon.h"
#include "opt_sched.h"
#include "opt_kdtrace.h"
@ -116,6 +117,7 @@ __FBSDID("$FreeBSD$");
#include <x86/mca.h>
#include <machine/md_var.h>
#include <machine/metadata.h>
#include <machine/mp_watchdog.h>
#include <machine/pc/bios.h>
#include <machine/pcb.h>
#include <machine/proc.h>
@ -734,9 +736,8 @@ cpu_idle(int busy)
CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
busy, curcpu);
#ifdef SMP
if (mp_grab_cpu_hlt())
return;
#ifdef MP_WATCHDOG
ap_watchdog(PCPU_GET(cpuid));
#endif
/* If we are busy - try to use fast methods. */
if (busy) {

View File

@ -29,13 +29,13 @@ __FBSDID("$FreeBSD$");
#include "opt_cpu.h"
#include "opt_kstack_pages.h"
#include "opt_mp_watchdog.h"
#include "opt_sched.h"
#include "opt_smp.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/cpuset.h>
#ifdef GPROF
#include <sys/gmon.h>
#endif
@ -63,7 +63,6 @@ __FBSDID("$FreeBSD$");
#include <machine/cpufunc.h>
#include <x86/mca.h>
#include <machine/md_var.h>
#include <machine/mp_watchdog.h>
#include <machine/pcb.h>
#include <machine/psl.h>
#include <machine/smp.h>
@ -125,7 +124,7 @@ extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
* Local data and functions.
*/
static volatile cpumask_t ipi_nmi_pending;
static volatile cpuset_t ipi_nmi_pending;
/* used to hold the AP's until we are ready to release them */
static struct mtx ap_boot_mtx;
@ -159,11 +158,8 @@ static int start_all_aps(void);
static int start_ap(int apic_id);
static void release_aps(void *dummy);
static int hlt_logical_cpus;
static u_int hyperthreading_cpus; /* logical cpus sharing L1 cache */
static cpumask_t hyperthreading_cpus_mask;
static int hyperthreading_allowed = 1;
static struct sysctl_ctx_list logical_cpu_clist;
static u_int bootMP_size;
static void
@ -241,8 +237,11 @@ topo_probe_0x4(void)
* logical processors that belong to the same core
* as BSP thus deducing number of threads per core.
*/
cpuid_count(0x04, 0, p);
max_cores = ((p[0] >> 26) & 0x3f) + 1;
if (cpu_high >= 0x4) {
cpuid_count(0x04, 0, p);
max_cores = ((p[0] >> 26) & 0x3f) + 1;
} else
max_cores = 1;
core_id_bits = mask_width(max_logical/max_cores);
if (core_id_bits < 0)
return;
@ -334,7 +333,7 @@ topo_probe(void)
if (cpu_topo_probed)
return;
logical_cpus_mask = 0;
CPU_ZERO(&logical_cpus_mask);
if (mp_ncpus <= 1)
cpu_cores = cpu_logical = 1;
else if (cpu_vendor_id == CPU_VENDOR_AMD)
@ -478,7 +477,7 @@ cpu_mp_probe(void)
* Always record BSP in CPU map so that the mbuf init code works
* correctly.
*/
all_cpus = 1;
CPU_SETOF(0, &all_cpus);
if (mp_ncpus == 0) {
/*
* No CPUs were found, so this must be a UP system. Setup
@ -605,6 +604,7 @@ cpu_mp_announce(void)
void
init_secondary(void)
{
cpuset_t tcpuset, tallcpus;
struct pcpu *pc;
struct nmi_pcpu *np;
u_int64_t msr, cr0;
@ -736,19 +736,17 @@ init_secondary(void)
CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
tcpuset = PCPU_GET(cpumask);
/* Determine if we are a logical CPU. */
/* XXX Calculation depends on cpu_logical being a power of 2, e.g. 2 */
if (cpu_logical > 1 && PCPU_GET(apic_id) % cpu_logical != 0)
logical_cpus_mask |= PCPU_GET(cpumask);
/* Determine if we are a hyperthread. */
if (hyperthreading_cpus > 1 &&
PCPU_GET(apic_id) % hyperthreading_cpus != 0)
hyperthreading_cpus_mask |= PCPU_GET(cpumask);
CPU_OR(&logical_cpus_mask, &tcpuset);
/* Build our map of 'other' CPUs. */
PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
tallcpus = all_cpus;
CPU_NAND(&tallcpus, &tcpuset);
PCPU_SET(other_cpus, tallcpus);
if (bootverbose)
lapic_dump("AP");
@ -835,7 +833,7 @@ assign_cpu_ids(void)
if (hyperthreading_cpus > 1 && i % hyperthreading_cpus != 0) {
cpu_info[i].cpu_hyperthread = 1;
#if defined(SCHED_ULE)
/*
* Don't use HT CPU if it has been disabled by a
* tunable.
@ -844,7 +842,6 @@ assign_cpu_ids(void)
cpu_info[i].cpu_disabled = 1;
continue;
}
#endif
}
/* Don't use this CPU if it has been disabled by a tunable. */
@ -854,6 +851,11 @@ assign_cpu_ids(void)
}
}
if (hyperthreading_allowed == 0 && hyperthreading_cpus > 1) {
hyperthreading_cpus = 0;
cpu_logical = 1;
}
/*
* Assign CPU IDs to local APIC IDs and disable any CPUs
* beyond MAXCPU. CPU 0 is always assigned to the BSP.
@ -891,6 +893,7 @@ assign_cpu_ids(void)
static int
start_all_aps(void)
{
cpuset_t tallcpus, tcpuset;
vm_offset_t va = boot_address + KERNBASE;
u_int64_t *pt4, *pt3, *pt2;
u_int32_t mpbioswarmvec;
@ -955,11 +958,14 @@ start_all_aps(void)
panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
}
all_cpus |= (1 << cpu); /* record AP in CPU map */
CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
}
/* build our map of 'other' CPUs */
PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
tallcpus = all_cpus;
tcpuset = PCPU_GET(cpumask);
CPU_NAND(&tallcpus, &tcpuset);
PCPU_SET(other_cpus, tallcpus);
/* restore the warmstart vector */
*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
@ -1087,6 +1093,30 @@ SYSCTL_UINT(_debug_xhits, OID_AUTO, ipi_masked_range_size, CTLFLAG_RW,
&ipi_masked_range_size, 0, "");
#endif /* COUNT_XINVLTLB_HITS */
/*
* Send an IPI to specified CPU handling the bitmap logic.
*/
static void
ipi_send_cpu(int cpu, u_int ipi)
{
u_int bitmap, old_pending, new_pending;
KASSERT(cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu));
if (IPI_IS_BITMAPED(ipi)) {
bitmap = 1 << ipi;
ipi = IPI_BITMAP_VECTOR;
do {
old_pending = cpu_ipi_pending[cpu];
new_pending = old_pending | bitmap;
} while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
old_pending, new_pending));
if (old_pending)
return;
}
lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
}
/*
* Flush the TLB on all other CPU's
*/
@ -1111,28 +1141,19 @@ smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
}
static void
smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
{
int ncpu, othercpus;
int cpu, ncpu, othercpus;
othercpus = mp_ncpus - 1;
if (mask == (cpumask_t)-1) {
ncpu = othercpus;
if (ncpu < 1)
if (CPU_ISFULLSET(&mask)) {
if (othercpus < 1)
return;
} else {
mask &= ~PCPU_GET(cpumask);
if (mask == 0)
return;
ncpu = bitcount32(mask);
if (ncpu > othercpus) {
/* XXX this should be a panic offence */
printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
ncpu, othercpus);
ncpu = othercpus;
}
/* XXX should be a panic, implied by mask == 0 above */
if (ncpu < 1)
sched_pin();
CPU_NAND(&mask, PCPU_PTR(cpumask));
sched_unpin();
if (CPU_EMPTY(&mask))
return;
}
if (!(read_rflags() & PSL_I))
@ -1141,39 +1162,25 @@ smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_o
smp_tlb_addr1 = addr1;
smp_tlb_addr2 = addr2;
atomic_store_rel_int(&smp_tlb_wait, 0);
if (mask == (cpumask_t)-1)
if (CPU_ISFULLSET(&mask)) {
ncpu = othercpus;
ipi_all_but_self(vector);
else
ipi_selected(mask, vector);
} else {
ncpu = 0;
while ((cpu = cpusetobj_ffs(&mask)) != 0) {
cpu--;
CPU_CLR(cpu, &mask);
CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__,
cpu, vector);
ipi_send_cpu(cpu, vector);
ncpu++;
}
}
while (smp_tlb_wait < ncpu)
ia32_pause();
mtx_unlock_spin(&smp_ipi_mtx);
}
/*
* Send an IPI to specified CPU handling the bitmap logic.
*/
static void
ipi_send_cpu(int cpu, u_int ipi)
{
u_int bitmap, old_pending, new_pending;
KASSERT(cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu));
if (IPI_IS_BITMAPED(ipi)) {
bitmap = 1 << ipi;
ipi = IPI_BITMAP_VECTOR;
do {
old_pending = cpu_ipi_pending[cpu];
new_pending = old_pending | bitmap;
} while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
old_pending, new_pending));
if (old_pending)
return;
}
lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
}
void
smp_cache_flush(void)
{
@ -1220,7 +1227,7 @@ smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
}
void
smp_masked_invltlb(cpumask_t mask)
smp_masked_invltlb(cpuset_t mask)
{
if (smp_started) {
@ -1232,7 +1239,7 @@ smp_masked_invltlb(cpumask_t mask)
}
void
smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
smp_masked_invlpg(cpuset_t mask, vm_offset_t addr)
{
if (smp_started) {
@ -1244,7 +1251,7 @@ smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
}
void
smp_masked_invlpg_range(cpumask_t mask, vm_offset_t addr1, vm_offset_t addr2)
smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2)
{
if (smp_started) {
@ -1297,7 +1304,7 @@ ipi_bitmap_handler(struct trapframe frame)
* send an IPI to a set of cpus.
*/
void
ipi_selected(cpumask_t cpus, u_int ipi)
ipi_selected(cpuset_t cpus, u_int ipi)
{
int cpu;
@ -1307,12 +1314,12 @@ ipi_selected(cpumask_t cpus, u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
atomic_set_int(&ipi_nmi_pending, cpus);
CPU_OR_ATOMIC(&ipi_nmi_pending, &cpus);
CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
while ((cpu = ffs(cpus)) != 0) {
while ((cpu = cpusetobj_ffs(&cpus)) != 0) {
cpu--;
cpus &= ~(1 << cpu);
CPU_CLR(cpu, &cpus);
CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
ipi_send_cpu(cpu, ipi);
}
}
@ -1330,7 +1337,7 @@ ipi_cpu(int cpu, u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
atomic_set_int(&ipi_nmi_pending, 1 << cpu);
CPU_SET_ATOMIC(cpu, &ipi_nmi_pending);
CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
ipi_send_cpu(cpu, ipi);
@ -1343,8 +1350,10 @@ void
ipi_all_but_self(u_int ipi)
{
sched_pin();
if (IPI_IS_BITMAPED(ipi)) {
ipi_selected(PCPU_GET(other_cpus), ipi);
sched_unpin();
return;
}
@ -1354,7 +1363,8 @@ ipi_all_but_self(u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
atomic_set_int(&ipi_nmi_pending, PCPU_GET(other_cpus));
CPU_OR_ATOMIC(&ipi_nmi_pending, PCPU_PTR(other_cpus));
sched_unpin();
CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
@ -1363,7 +1373,7 @@ ipi_all_but_self(u_int ipi)
int
ipi_nmi_handler()
{
cpumask_t cpumask;
cpuset_t cpumask;
/*
* As long as there is not a simple way to know about a NMI's
@ -1371,11 +1381,13 @@ ipi_nmi_handler()
* the global pending bitword an IPI_STOP_HARD has been issued
* and should be handled.
*/
sched_pin();
cpumask = PCPU_GET(cpumask);
if ((ipi_nmi_pending & cpumask) == 0)
sched_unpin();
if (!CPU_OVERLAP(&ipi_nmi_pending, &cpumask))
return (1);
atomic_clear_int(&ipi_nmi_pending, cpumask);
CPU_NAND_ATOMIC(&ipi_nmi_pending, &cpumask);
cpustop_handler();
return (0);
}
@ -1387,23 +1399,25 @@ ipi_nmi_handler()
void
cpustop_handler(void)
{
cpumask_t cpumask;
cpuset_t cpumask;
u_int cpu;
sched_pin();
cpu = PCPU_GET(cpuid);
cpumask = PCPU_GET(cpumask);
sched_unpin();
savectx(&stoppcbs[cpu]);
/* Indicate that we are stopped */
atomic_set_int(&stopped_cpus, cpumask);
CPU_OR_ATOMIC(&stopped_cpus, &cpumask);
/* Wait for restart */
while (!(started_cpus & cpumask))
while (!CPU_OVERLAP(&started_cpus, &cpumask))
ia32_pause();
atomic_clear_int(&started_cpus, cpumask);
atomic_clear_int(&stopped_cpus, cpumask);
CPU_NAND_ATOMIC(&started_cpus, &cpumask);
CPU_NAND_ATOMIC(&stopped_cpus, &cpumask);
if (cpu == 0 && cpustop_restartfunc != NULL) {
cpustop_restartfunc();
@ -1418,7 +1432,7 @@ cpustop_handler(void)
void
cpususpend_handler(void)
{
cpumask_t cpumask;
cpuset_t cpumask;
register_t cr3, rf;
u_int cpu;
@ -1430,7 +1444,7 @@ cpususpend_handler(void)
if (savectx(susppcbs[cpu])) {
wbinvd();
atomic_set_int(&stopped_cpus, cpumask);
CPU_OR_ATOMIC(&stopped_cpus, &cpumask);
} else {
pmap_init_pat();
PCPU_SET(switchtime, 0);
@ -1438,11 +1452,11 @@ cpususpend_handler(void)
}
/* Wait for resume */
while (!(started_cpus & cpumask))
while (!CPU_OVERLAP(&started_cpus, &cpumask))
ia32_pause();
atomic_clear_int(&started_cpus, cpumask);
atomic_clear_int(&stopped_cpus, cpumask);
CPU_NAND_ATOMIC(&started_cpus, &cpumask);
CPU_NAND_ATOMIC(&stopped_cpus, &cpumask);
/* Restore CR3 and enable interrupts */
load_cr3(cr3);
@ -1467,158 +1481,6 @@ release_aps(void *dummy __unused)
}
SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
static int
sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
{
cpumask_t mask;
int error;
mask = hlt_cpus_mask;
error = sysctl_handle_int(oidp, &mask, 0, req);
if (error || !req->newptr)
return (error);
if (logical_cpus_mask != 0 &&
(mask & logical_cpus_mask) == logical_cpus_mask)
hlt_logical_cpus = 1;
else
hlt_logical_cpus = 0;
if (! hyperthreading_allowed)
mask |= hyperthreading_cpus_mask;
if ((mask & all_cpus) == all_cpus)
mask &= ~(1<<0);
hlt_cpus_mask = mask;
return (error);
}
SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
0, 0, sysctl_hlt_cpus, "IU",
"Bitmap of CPUs to halt. 101 (binary) will halt CPUs 0 and 2.");
static int
sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
{
int disable, error;
disable = hlt_logical_cpus;
error = sysctl_handle_int(oidp, &disable, 0, req);
if (error || !req->newptr)
return (error);
if (disable)
hlt_cpus_mask |= logical_cpus_mask;
else
hlt_cpus_mask &= ~logical_cpus_mask;
if (! hyperthreading_allowed)
hlt_cpus_mask |= hyperthreading_cpus_mask;
if ((hlt_cpus_mask & all_cpus) == all_cpus)
hlt_cpus_mask &= ~(1<<0);
hlt_logical_cpus = disable;
return (error);
}
static int
sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS)
{
int allowed, error;
allowed = hyperthreading_allowed;
error = sysctl_handle_int(oidp, &allowed, 0, req);
if (error || !req->newptr)
return (error);
#ifdef SCHED_ULE
/*
* SCHED_ULE doesn't allow enabling/disabling HT cores at
* run-time.
*/
if (allowed != hyperthreading_allowed)
return (ENOTSUP);
return (error);
#endif
if (allowed)
hlt_cpus_mask &= ~hyperthreading_cpus_mask;
else
hlt_cpus_mask |= hyperthreading_cpus_mask;
if (logical_cpus_mask != 0 &&
(hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask)
hlt_logical_cpus = 1;
else
hlt_logical_cpus = 0;
if ((hlt_cpus_mask & all_cpus) == all_cpus)
hlt_cpus_mask &= ~(1<<0);
hyperthreading_allowed = allowed;
return (error);
}
static void
cpu_hlt_setup(void *dummy __unused)
{
if (logical_cpus_mask != 0) {
TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
&hlt_logical_cpus);
sysctl_ctx_init(&logical_cpu_clist);
SYSCTL_ADD_PROC(&logical_cpu_clist,
SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
"hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
sysctl_hlt_logical_cpus, "IU", "");
SYSCTL_ADD_UINT(&logical_cpu_clist,
SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
"logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
&logical_cpus_mask, 0, "");
if (hlt_logical_cpus)
hlt_cpus_mask |= logical_cpus_mask;
/*
* If necessary for security purposes, force
* hyperthreading off, regardless of the value
* of hlt_logical_cpus.
*/
if (hyperthreading_cpus_mask) {
SYSCTL_ADD_PROC(&logical_cpu_clist,
SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
"hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW,
0, 0, sysctl_hyperthreading_allowed, "IU", "");
if (! hyperthreading_allowed)
hlt_cpus_mask |= hyperthreading_cpus_mask;
}
}
}
SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
int
mp_grab_cpu_hlt(void)
{
cpumask_t mask;
#ifdef MP_WATCHDOG
u_int cpuid;
#endif
int retval;
mask = PCPU_GET(cpumask);
#ifdef MP_WATCHDOG
cpuid = PCPU_GET(cpuid);
ap_watchdog(cpuid);
#endif
retval = 0;
while (mask & hlt_cpus_mask) {
retval = 1;
__asm __volatile("sti; hlt" : : : "memory");
}
return (retval);
}
#ifdef COUNT_IPIS
/*
* Setup interrupt counters for IPI handlers.

View File

@ -123,6 +123,8 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#ifdef SMP
#include <sys/smp.h>
#else
#include <sys/cpuset.h>
#endif
#include <vm/vm.h>
@ -581,7 +583,7 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
PMAP_LOCK_INIT(kernel_pmap);
kernel_pmap->pm_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(KPML4phys);
kernel_pmap->pm_root = NULL;
kernel_pmap->pm_active = -1; /* don't allow deactivation */
CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */
TAILQ_INIT(&kernel_pmap->pm_pvchunk);
/*
@ -923,19 +925,20 @@ pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde)
void
pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
{
cpumask_t cpumask, other_cpus;
cpuset_t cpumask, other_cpus;
sched_pin();
if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
invlpg(va);
smp_invlpg(va);
} else {
cpumask = PCPU_GET(cpumask);
other_cpus = PCPU_GET(other_cpus);
if (pmap->pm_active & cpumask)
if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
invlpg(va);
if (pmap->pm_active & other_cpus)
smp_masked_invlpg(pmap->pm_active & other_cpus, va);
CPU_AND(&other_cpus, &pmap->pm_active);
if (!CPU_EMPTY(&other_cpus))
smp_masked_invlpg(other_cpus, va);
}
sched_unpin();
}
@ -943,23 +946,23 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
void
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
cpumask_t cpumask, other_cpus;
cpuset_t cpumask, other_cpus;
vm_offset_t addr;
sched_pin();
if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
for (addr = sva; addr < eva; addr += PAGE_SIZE)
invlpg(addr);
smp_invlpg_range(sva, eva);
} else {
cpumask = PCPU_GET(cpumask);
other_cpus = PCPU_GET(other_cpus);
if (pmap->pm_active & cpumask)
if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
for (addr = sva; addr < eva; addr += PAGE_SIZE)
invlpg(addr);
if (pmap->pm_active & other_cpus)
smp_masked_invlpg_range(pmap->pm_active & other_cpus,
sva, eva);
CPU_AND(&other_cpus, &pmap->pm_active);
if (!CPU_EMPTY(&other_cpus))
smp_masked_invlpg_range(other_cpus, sva, eva);
}
sched_unpin();
}
@ -967,19 +970,20 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
void
pmap_invalidate_all(pmap_t pmap)
{
cpumask_t cpumask, other_cpus;
cpuset_t cpumask, other_cpus;
sched_pin();
if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
invltlb();
smp_invltlb();
} else {
cpumask = PCPU_GET(cpumask);
other_cpus = PCPU_GET(other_cpus);
if (pmap->pm_active & cpumask)
if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
invltlb();
if (pmap->pm_active & other_cpus)
smp_masked_invltlb(pmap->pm_active & other_cpus);
CPU_AND(&other_cpus, &pmap->pm_active);
if (!CPU_EMPTY(&other_cpus))
smp_masked_invltlb(other_cpus);
}
sched_unpin();
}
@ -995,8 +999,8 @@ pmap_invalidate_cache(void)
}
struct pde_action {
cpumask_t store; /* processor that updates the PDE */
cpumask_t invalidate; /* processors that invalidate their TLB */
cpuset_t store; /* processor that updates the PDE */
cpuset_t invalidate; /* processors that invalidate their TLB */
vm_offset_t va;
pd_entry_t *pde;
pd_entry_t newpde;
@ -1007,8 +1011,12 @@ pmap_update_pde_action(void *arg)
{
struct pde_action *act = arg;
if (act->store == PCPU_GET(cpumask))
sched_pin();
if (!CPU_CMP(&act->store, PCPU_PTR(cpumask))) {
sched_unpin();
pde_store(act->pde, act->newpde);
} else
sched_unpin();
}
static void
@ -1016,8 +1024,12 @@ pmap_update_pde_teardown(void *arg)
{
struct pde_action *act = arg;
if ((act->invalidate & PCPU_GET(cpumask)) != 0)
sched_pin();
if (CPU_OVERLAP(&act->invalidate, PCPU_PTR(cpumask))) {
sched_unpin();
pmap_update_pde_invalidate(act->va, act->newpde);
} else
sched_unpin();
}
/*
@ -1032,26 +1044,28 @@ static void
pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
{
struct pde_action act;
cpumask_t active, cpumask;
cpuset_t active, cpumask, other_cpus;
sched_pin();
cpumask = PCPU_GET(cpumask);
other_cpus = PCPU_GET(other_cpus);
if (pmap == kernel_pmap)
active = all_cpus;
else
active = pmap->pm_active;
if ((active & PCPU_GET(other_cpus)) != 0) {
if (CPU_OVERLAP(&active, &other_cpus)) {
act.store = cpumask;
act.invalidate = active;
act.va = va;
act.pde = pde;
act.newpde = newpde;
smp_rendezvous_cpus(cpumask | active,
CPU_OR(&cpumask, &active);
smp_rendezvous_cpus(cpumask,
smp_no_rendevous_barrier, pmap_update_pde_action,
pmap_update_pde_teardown, &act);
} else {
pde_store(pde, newpde);
if ((active & cpumask) != 0)
if (CPU_OVERLAP(&active, &cpumask))
pmap_update_pde_invalidate(va, newpde);
}
sched_unpin();
@ -1065,7 +1079,7 @@ PMAP_INLINE void
pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
{
if (pmap == kernel_pmap || pmap->pm_active)
if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
invlpg(va);
}
@ -1074,7 +1088,7 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
vm_offset_t addr;
if (pmap == kernel_pmap || pmap->pm_active)
if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
for (addr = sva; addr < eva; addr += PAGE_SIZE)
invlpg(addr);
}
@ -1083,7 +1097,7 @@ PMAP_INLINE void
pmap_invalidate_all(pmap_t pmap)
{
if (pmap == kernel_pmap || pmap->pm_active)
if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
invltlb();
}
@ -1099,7 +1113,7 @@ pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
{
pde_store(pde, newpde);
if (pmap == kernel_pmap || pmap->pm_active)
if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
pmap_update_pde_invalidate(va, newpde);
}
#endif /* !SMP */
@ -1607,7 +1621,7 @@ pmap_pinit0(pmap_t pmap)
PMAP_LOCK_INIT(pmap);
pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(KPML4phys);
pmap->pm_root = NULL;
pmap->pm_active = 0;
CPU_ZERO(&pmap->pm_active);
PCPU_SET(curpmap, pmap);
TAILQ_INIT(&pmap->pm_pvchunk);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
@ -1649,7 +1663,7 @@ pmap_pinit(pmap_t pmap)
pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | PG_V | PG_RW | PG_A | PG_M;
pmap->pm_root = NULL;
pmap->pm_active = 0;
CPU_ZERO(&pmap->pm_active);
TAILQ_INIT(&pmap->pm_pvchunk);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
@ -5087,11 +5101,11 @@ pmap_activate(struct thread *td)
pmap = vmspace_pmap(td->td_proc->p_vmspace);
oldpmap = PCPU_GET(curpmap);
#ifdef SMP
atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
CPU_NAND_ATOMIC(&oldpmap->pm_active, PCPU_PTR(cpumask));
CPU_OR_ATOMIC(&pmap->pm_active, PCPU_PTR(cpumask));
#else
oldpmap->pm_active &= ~PCPU_GET(cpumask);
pmap->pm_active |= PCPU_GET(cpumask);
CPU_NAND(&oldpmap->pm_active, PCPU_PTR(cpumask));
CPU_OR(&pmap->pm_active, PCPU_PTR(cpumask));
#endif
cr3 = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml4);
td->td_pcb->pcb_cr3 = cr3;

View File

@ -59,6 +59,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/pioctl.h>
#include <sys/proc.h>
#include <sys/sched.h>
#include <sys/sf_buf.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
@ -70,6 +71,7 @@ __FBSDID("$FreeBSD$");
#include <machine/cpu.h>
#include <machine/md_var.h>
#include <machine/pcb.h>
#include <machine/smp.h>
#include <machine/specialreg.h>
#include <machine/tss.h>
@ -512,11 +514,13 @@ cpu_set_user_tls(struct thread *td, void *tls_base)
static void
cpu_reset_proxy()
{
cpuset_t tcrp;
cpu_reset_proxy_active = 1;
while (cpu_reset_proxy_active == 1)
; /* Wait for other cpu to see that we've started */
stop_cpus((1<<cpu_reset_proxyid));
CPU_SETOF(cpu_reset_proxyid, &tcrp);
stop_cpus(tcrp);
printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
DELAY(1000000);
cpu_reset_real();
@ -527,24 +531,28 @@ void
cpu_reset()
{
#ifdef SMP
cpumask_t map;
cpuset_t map;
u_int cnt;
if (smp_active) {
map = PCPU_GET(other_cpus) & ~stopped_cpus;
if (map != 0) {
sched_pin();
map = PCPU_GET(other_cpus);
CPU_NAND(&map, &stopped_cpus);
if (!CPU_EMPTY(&map)) {
printf("cpu_reset: Stopping other CPUs\n");
stop_cpus(map);
}
if (PCPU_GET(cpuid) != 0) {
cpu_reset_proxyid = PCPU_GET(cpuid);
sched_unpin();
cpustop_restartfunc = cpu_reset_proxy;
cpu_reset_proxy_active = 0;
printf("cpu_reset: Restarting BSP\n");
/* Restart CPU #0. */
atomic_store_rel_int(&started_cpus, 1 << 0);
CPU_SETOF(0, &started_cpus);
wmb();
cnt = 0;
while (cpu_reset_proxy_active == 0 && cnt < 10000000)
@ -556,7 +564,8 @@ cpu_reset()
while (1);
/* NOTREACHED */
}
} else
sched_unpin();
DELAY(1000000);
}

View File

@ -61,7 +61,6 @@ typedef unsigned long __uint64_t;
* Standard type definitions.
*/
typedef __int32_t __clock_t; /* clock()... */
typedef unsigned int __cpumask_t;
typedef __int64_t __critical_t;
typedef double __double_t;
typedef float __float_t;

View File

@ -152,6 +152,7 @@
#ifndef LOCORE
#include <sys/queue.h>
#include <sys/_cpuset.h>
#include <sys/_lock.h>
#include <sys/_mutex.h>
@ -251,7 +252,7 @@ struct pmap {
struct mtx pm_mtx;
pml4_entry_t *pm_pml4; /* KVA of level 4 page table */
TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
cpumask_t pm_active; /* active on cpus */
cpuset_t pm_active; /* active on cpus */
/* spare u_int here due to padding */
struct pmap_statistics pm_stats; /* pmap statistics */
vm_page_t pm_root; /* spare page table pages */

View File

@ -63,17 +63,16 @@ void ipi_all_but_self(u_int ipi);
void ipi_bitmap_handler(struct trapframe frame);
void ipi_cpu(int cpu, u_int ipi);
int ipi_nmi_handler(void);
void ipi_selected(cpumask_t cpus, u_int ipi);
void ipi_selected(cpuset_t cpus, u_int ipi);
u_int mp_bootaddress(u_int);
int mp_grab_cpu_hlt(void);
void smp_cache_flush(void);
void smp_invlpg(vm_offset_t addr);
void smp_masked_invlpg(cpumask_t mask, vm_offset_t addr);
void smp_masked_invlpg(cpuset_t mask, vm_offset_t addr);
void smp_invlpg_range(vm_offset_t startva, vm_offset_t endva);
void smp_masked_invlpg_range(cpumask_t mask, vm_offset_t startva,
void smp_masked_invlpg_range(cpuset_t mask, vm_offset_t startva,
vm_offset_t endva);
void smp_invltlb(void);
void smp_masked_invltlb(cpumask_t mask);
void smp_masked_invltlb(cpuset_t mask);
#endif /* !LOCORE */
#endif /* SMP */

View File

@ -2395,7 +2395,7 @@ pmap_bootstrap(vm_offset_t firstaddr, vm_offset_t lastaddr, struct pv_addr *l1pt
cpu_cpwait();
PMAP_LOCK_INIT(kernel_pmap);
kernel_pmap->pm_active = -1;
CPU_FILL(&kernel_pmap->pm_active);
kernel_pmap->pm_domain = PMAP_DOMAIN_KERNEL;
TAILQ_INIT(&kernel_pmap->pm_pvlist);
@ -3826,7 +3826,7 @@ pmap_pinit(pmap_t pmap)
pmap_alloc_l1(pmap);
bzero(pmap->pm_l2, sizeof(pmap->pm_l2));
pmap->pm_active = 0;
CPU_ZERO(&pmap->pm_active);
TAILQ_INIT(&pmap->pm_pvlist);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);

View File

@ -67,7 +67,6 @@ typedef unsigned long long __uint64_t;
* Standard type definitions.
*/
typedef __uint32_t __clock_t; /* clock()... */
typedef unsigned int __cpumask_t;
typedef __int32_t __critical_t;
typedef double __double_t;
typedef double __float_t;

View File

@ -62,6 +62,7 @@
#ifndef LOCORE
#include <sys/queue.h>
#include <sys/_cpuset.h>
#include <sys/_lock.h>
#include <sys/_mutex.h>
@ -134,7 +135,7 @@ struct pmap {
struct l1_ttable *pm_l1;
struct l2_dtable *pm_l2[L2_SIZE];
pd_entry_t *pm_pdir; /* KVA of page directory */
cpumask_t pm_active; /* active on cpus */
cpuset_t pm_active; /* active on cpus */
struct pmap_statistics pm_stats; /* pmap statictics */
TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
};

View File

@ -6,7 +6,7 @@ MK_SSP= no
LIB= ia64
INTERNALLIB=
SRCS= autoload.c bootinfo.c copy.c devicename.c exec.c
SRCS= autoload.c bootinfo.c copy.c devicename.c exec.c icache.c
CFLAGS+= -I${.CURDIR}/../../efi/include
CFLAGS+= -I${.CURDIR}/../../efi/include/${MACHINE_CPUARCH}

View File

@ -258,6 +258,8 @@ ia64_loadseg(Elf_Ehdr *eh, Elf_Phdr *ph, uint64_t delta)
if (ph->p_flags & PF_X) {
ia64_text_start = ph->p_vaddr + delta;
ia64_text_size = ph->p_memsz;
ia64_sync_icache(ia64_text_start, ia64_text_size);
} else {
ia64_data_start = ph->p_vaddr + delta;
ia64_data_size = ph->p_memsz;

View File

@ -0,0 +1,51 @@
/*-
* Copyright (c) 2011 Marcel Moolenaar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <stand.h>
#include <machine/ia64_cpu.h>
#include "libia64.h"
void
ia64_sync_icache(vm_offset_t va, size_t sz)
{
uintptr_t pa;
size_t cnt, max;
while (sz > 0) {
max = sz;
pa = (uintptr_t)ia64_va2pa(va, &max);
for (cnt = 0; cnt < max; cnt += 32)
ia64_fc_i(pa + cnt);
ia64_sync_i();
va += max;
sz -= max;
}
ia64_srlz_i();
}

View File

@ -64,6 +64,7 @@ void ia64_loadseg(void *, void *, uint64_t);
ssize_t ia64_copyin(const void *, vm_offset_t, size_t);
ssize_t ia64_copyout(vm_offset_t, void *, size_t);
void ia64_sync_icache(vm_offset_t, size_t);
ssize_t ia64_readin(int, vm_offset_t, size_t);
void *ia64_va2pa(vm_offset_t, size_t *);

View File

@ -230,3 +230,35 @@ ia64_platform_enter(const char *kernel)
return (0);
}
COMMAND_SET(pbvm, "pbvm", "show PBVM details", command_pbvm);
static int
command_pbvm(int argc, char *argv[])
{
uint64_t limit, pg, start;
u_int idx;
printf("Page table @ %p, size %x\n", ia64_pgtbl, ia64_pgtblsz);
if (ia64_pgtbl == NULL)
return (0);
limit = ~0;
start = ~0;
idx = 0;
while (ia64_pgtbl[idx] != 0) {
pg = ia64_pgtbl[idx];
if (pg != limit) {
if (start != ~0)
printf("%#lx-%#lx\n", start, limit);
start = pg;
}
limit = pg + IA64_PBVM_PAGE_SIZE;
idx++;
}
if (start != ~0)
printf("%#lx-%#lx\n", start, limit);
return (0);
}

View File

@ -153,9 +153,7 @@ main(int argc, CHAR16 *argv[])
*/
cons_probe();
printf("\n");
printf("%s, Revision %s\n", bootprog_name, bootprog_rev);
printf("(%s, %s)\n", bootprog_maker, bootprog_date);
printf("\n%s, Revision %s\n", bootprog_name, bootprog_rev);
find_pal_proc();
@ -214,6 +212,18 @@ static int
command_quit(int argc, char *argv[])
{
exit(0);
/* NOTREACHED */
return (CMD_OK);
}
COMMAND_SET(reboot, "reboot", "reboot the system", command_reboot);
static int
command_reboot(int argc, char *argv[])
{
RS->ResetSystem(EfiResetWarm, EFI_SUCCESS, 0, NULL);
/* NOTREACHED */
return (CMD_OK);
}
@ -585,3 +595,24 @@ command_hcdp(int argc, char *argv[])
printf("<EOT>\n");
return (CMD_OK);
}
COMMAND_SET(about, "about", "about the loader", command_about);
extern uint64_t _start_plabel[];
static int
command_about(int argc, char *argv[])
{
EFI_LOADED_IMAGE *img;
printf("%s\n", bootprog_name);
printf("revision %s\n", bootprog_rev);
printf("built by %s\n", bootprog_maker);
printf("built on %s\n", bootprog_date);
printf("\n");
BS->HandleProtocol(IH, &imgid, (VOID**)&img);
printf("image loaded at %p\n", img->ImageBase);
printf("entry at %#lx (%#lx)\n", _start_plabel[0], _start_plabel[1]);
}

View File

@ -3,6 +3,8 @@ $FreeBSD$
NOTE ANY CHANGES YOU MAKE TO THE BOOTBLOCKS HERE. The format of this
file is important. Make sure the current version number is on line 6.
3.1: Add the about, reboot and pbvm commands.
I-cache coherency is maintained.
3.0: Add support for PBVM.
2.2: Create direct mapping based on start address instead of mapping
first 256M.

View File

@ -40,8 +40,6 @@
extern void atomic_add_64(volatile uint64_t *target, int64_t delta);
extern void atomic_dec_64(volatile uint64_t *target);
#endif
#ifndef __LP64__
#endif
#ifndef __sparc64__
extern uint32_t atomic_cas_32(volatile uint32_t *target, uint32_t cmp,
uint32_t newval);

View File

@ -500,9 +500,11 @@ spa_history_log_version(spa_t *spa, history_internal_events_t event)
utsname.nodename, utsname.release, utsname.version,
utsname.machine);
}
#if 0
cmn_err(CE_CONT, "!%s version %llu pool %s using %llu",
event == LOG_POOL_IMPORT ? "imported" :
event == LOG_POOL_CREATE ? "created" : "accessed",
(u_longlong_t)current_vers, spa_name(spa), SPA_VERSION);
#endif
#endif
}

View File

@ -123,7 +123,9 @@ reprogram(cyb_arg_t arg __unused, hrtime_t exp)
static void xcall(cyb_arg_t arg __unused, cpu_t *c, cyc_func_t func,
void *param)
{
cpuset_t cpus;
smp_rendezvous_cpus((cpumask_t)1 << c->cpuid,
CPU_SETOF(c->cpuid, &cpus);
smp_rendezvous_cpus(cpus,
smp_no_rendevous_barrier, func, smp_no_rendevous_barrier, param);
}

View File

@ -113,12 +113,12 @@ dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
void
dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
{
cpumask_t cpus;
cpuset_t cpus;
if (cpu == DTRACE_CPUALL)
cpus = all_cpus;
else
cpus = (cpumask_t)1 << cpu;
CPU_SETOF(cpu, &cpus);
smp_rendezvous_cpus(cpus, smp_no_rendevous_barrier, func,
smp_no_rendevous_barrier, arg);
@ -374,7 +374,7 @@ dtrace_gethrtime_init(void *arg)
{
struct pcpu *pc;
uint64_t tsc_f;
cpumask_t map;
cpuset_t map;
int i;
/*
@ -412,7 +412,8 @@ dtrace_gethrtime_init(void *arg)
continue;
pc = pcpu_find(i);
map = PCPU_GET(cpumask) | pc->pc_cpumask;
map = PCPU_GET(cpumask);
CPU_OR(&map, &pc->pc_cpumask);
smp_rendezvous_cpus(map, NULL,
dtrace_gethrtime_init_cpu,

View File

@ -30,6 +30,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/types.h>
#include <sys/cpuset.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/kmem.h>
@ -113,12 +114,12 @@ dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
void
dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
{
cpumask_t cpus;
cpuset_t cpus;
if (cpu == DTRACE_CPUALL)
cpus = all_cpus;
else
cpus = (cpumask_t)1 << cpu;
CPU_SETOF(cpu, &cpus);
smp_rendezvous_cpus(cpus, smp_no_rendevous_barrier, func,
smp_no_rendevous_barrier, arg);
@ -372,9 +373,9 @@ dtrace_gethrtime_init_cpu(void *arg)
static void
dtrace_gethrtime_init(void *arg)
{
cpuset_t map;
struct pcpu *pc;
uint64_t tsc_f;
cpumask_t map;
int i;
/*
@ -412,7 +413,8 @@ dtrace_gethrtime_init(void *arg)
continue;
pc = pcpu_find(i);
map = PCPU_GET(cpumask) | pc->pc_cpumask;
map = PCPU_GET(cpumask);
CPU_OR(&map, &pc->pc_cpumask);
smp_rendezvous_cpus(map, NULL,
dtrace_gethrtime_init_cpu,

View File

@ -432,7 +432,10 @@ options KTRACE_REQUEST_POOL=101
# defined by the KTR_* constants in <sys/ktr.h>. KTR_MASK defines the
# initial value of the ktr_mask variable which determines at runtime
# what events to trace. KTR_CPUMASK determines which CPU's log
# events, with bit X corresponding to CPU X. KTR_VERBOSE enables
# events, with bit X corresponding to CPU X. The layout of the string
# passed as KTR_CPUMASK must match a serie of bitmasks each of them
# separated by the ", " characters (ie:
# KTR_CPUMASK=("0xAF, 0xFFFFFFFFFFFFFFFF")). KTR_VERBOSE enables
# dumping of KTR events to the console by default. This functionality
# can be toggled via the debug.ktr_verbose sysctl and defaults to off
# if KTR_VERBOSE is not defined. See ktr(4) and ktrdump(8) for details.
@ -441,7 +444,7 @@ options KTR
options KTR_ENTRIES=1024
options KTR_COMPILE=(KTR_INTR|KTR_PROC)
options KTR_MASK=KTR_INTR
options KTR_CPUMASK=0x3
options KTR_CPUMASK=("0x3")
options KTR_VERBOSE
#

View File

@ -2748,6 +2748,7 @@ netinet/ip_gre.c optional gre inet
netinet/ip_id.c optional inet
netinet/in_mcast.c optional inet
netinet/in_pcb.c optional inet | inet6
netinet/in_pcbgroup.c optional inet pcbgroup | inet6 pcbgroup
netinet/in_proto.c optional inet | inet6 \
compile-with "${NORMAL_C} -I$S/contrib/pf"
netinet/in_rmx.c optional inet
@ -2825,6 +2826,7 @@ netinet6/in6_gif.c optional gif inet6 | netgraph_gif inet6
netinet6/in6_ifattach.c optional inet6
netinet6/in6_mcast.c optional inet6
netinet6/in6_pcb.c optional inet6
netinet6/in6_pcbgroup.c optional inet6 pcbgroup
netinet6/in6_proto.c optional inet6
netinet6/in6_rmx.c optional inet6
netinet6/in6_src.c optional inet6

View File

@ -419,6 +419,7 @@ MROUTING opt_mrouting.h
NCP
NETATALK opt_atalk.h
NFSLOCKD
PCBGROUP opt_pcbgroup.h
RADIX_MPATH opt_mpath.h
ROUTETABLES opt_route.h
SLIP_IFF_OPTS opt_slip.h

View File

@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$");
#include <sys/signalvar.h>
#include <sys/systm.h>
#include <sys/cons.h>
#include <sys/conf.h>
#include <sys/watchdog.h>
#include <sys/kernel.h>
@ -64,6 +65,7 @@ db_addr_t db_last_addr;
db_addr_t db_prev;
db_addr_t db_next;
static db_cmdfcn_t db_dump;
static db_cmdfcn_t db_fncall;
static db_cmdfcn_t db_gdb;
static db_cmdfcn_t db_halt;
@ -102,6 +104,7 @@ static struct command db_cmds[] = {
{ "w", db_write_cmd, CS_MORE|CS_SET_DOT, 0 },
{ "delete", db_delete_cmd, 0, 0 },
{ "d", db_delete_cmd, 0, 0 },
{ "dump", db_dump, 0, 0 },
{ "break", db_breakpoint_cmd, 0, 0 },
{ "b", db_breakpoint_cmd, 0, 0 },
{ "dwatch", db_deletewatch_cmd, 0, 0 },
@ -526,6 +529,27 @@ db_error(s)
kdb_reenter();
}
static void
db_dump(db_expr_t dummy, boolean_t dummy2, db_expr_t dummy3, char *dummy4)
{
int error;
error = doadump(FALSE);
if (error) {
db_printf("Cannot dump: ");
switch (error) {
case EBUSY:
db_printf("debugger got invoked while dumping.\n");
break;
case ENXIO:
db_printf("no dump device specified.\n");
break;
default:
db_printf("unknown error (error=%d).\n", error);
break;
}
}
}
/*
* Call random function:

View File

@ -25,8 +25,8 @@
*/
/*
* This is a driver for watchdog timer present in AMD SB600/SB7xx
* south bridges and other watchdog timers advertised via WDRT ACPI table.
* This is a driver for watchdog timer present in AMD SB600/SB7xx/SB8xx
* southbridges.
* Please see the following specifications for the descriptions of the
* registers and flags:
* - AMD SB600 Register Reference Guide, Public Version, Rev. 3.03 (SB600 RRG)
@ -35,11 +35,13 @@
* http://developer.amd.com/assets/43009_sb7xx_rrg_pub_1.00.pdf
* - AMD SB700/710/750 Register Programming Requirements (RPR)
* http://developer.amd.com/assets/42413_sb7xx_rpr_pub_1.00.pdf
* - AMD SB800-Series Southbridges Register Reference Guide (RRG)
* http://support.amd.com/us/Embedded_TechDocs/45482.pdf
* Please see the following for Watchdog Resource Table specification:
* - Watchdog Timer Hardware Requirements for Windows Server 2003 (WDRT)
* http://www.microsoft.com/whdc/system/sysinternals/watchdog.mspx
* AMD SB600/SB7xx watchdog hardware seems to conform to the above,
* but my system doesn't provide the table.
* AMD SB600/SB7xx/SB8xx watchdog hardware seems to conform to the above
* specifications, but the table hasn't been spotted in the wild yet.
*/
#include <sys/cdefs.h>
@ -59,15 +61,15 @@ __FBSDID("$FreeBSD$");
#include <dev/pci/pcivar.h>
#include <isa/isavar.h>
/* RRG 2.3.3.1.1, page 161. */
/* SB7xx RRG 2.3.3.1.1. */
#define AMDSB_PMIO_INDEX 0xcd6
#define AMDSB_PMIO_DATA (PMIO_INDEX + 1)
#define AMDSB_PMIO_WIDTH 2
/* RRG 2.3.3.2, page 181. */
/* SB7xx RRG 2.3.3.2. */
#define AMDSB_PM_RESET_STATUS0 0x44
#define AMDSB_PM_RESET_STATUS1 0x45
#define AMDSB_WD_RST_STS 0x02
/* RRG 2.3.3.2, page 188; RPR 2.36, page 30. */
/* SB7xx RRG 2.3.3.2, RPR 2.36. */
#define AMDSB_PM_WDT_CTRL 0x69
#define AMDSB_WDT_DISABLE 0x01
#define AMDSB_WDT_RES_MASK (0x02 | 0x04)
@ -77,7 +79,18 @@ __FBSDID("$FreeBSD$");
#define AMDSB_WDT_RES_1S 0x06
#define AMDSB_PM_WDT_BASE_LSB 0x6c
#define AMDSB_PM_WDT_BASE_MSB 0x6f
/* RRG 2.3.4, page 223, WDRT. */
/* SB8xx RRG 2.3.3. */
#define AMDSB8_PM_WDT_EN 0x48
#define AMDSB8_WDT_DEC_EN 0x01
#define AMDSB8_WDT_DISABLE 0x02
#define AMDSB8_PM_WDT_CTRL 0x4c
#define AMDSB8_WDT_32KHZ 0x00
#define AMDSB8_WDT_1HZ 0x03
#define AMDSB8_WDT_RES_MASK 0x03
#define AMDSB8_PM_RESET_STATUS0 0xC0
#define AMDSB8_PM_RESET_STATUS1 0xC1
#define AMDSB8_WD_RST_STS 0x20
/* SB7xx RRG 2.3.4, WDRT. */
#define AMDSB_WD_CTRL 0x00
#define AMDSB_WD_RUN 0x01
#define AMDSB_WD_FIRED 0x02
@ -90,8 +103,9 @@ __FBSDID("$FreeBSD$");
#define AMDSB_WDIO_REG_WIDTH 4
/* WDRT */
#define MAXCOUNT_MIN_VALUE 511
/* RRG 2.3.1.1, page 122; SB600 RRG 2.3.1.1, page 97. */
#define AMDSB7xx_SMBUS_DEVID 0x43851002
/* SB7xx RRG 2.3.1.1, SB600 RRG 2.3.1.1, SB8xx RRG 2.3.1. */
#define AMDSB_SMBUS_DEVID 0x43851002
#define AMDSB8_SMBUS_REVID 0x40
#define amdsbwd_verbose_printf(dev, ...) \
do { \
@ -265,7 +279,7 @@ amdsbwd_identify(driver_t *driver, device_t parent)
smb_dev = pci_find_bsf(0, 20, 0);
if (smb_dev == NULL)
return;
if (pci_get_devid(smb_dev) != AMDSB7xx_SMBUS_DEVID)
if (pci_get_devid(smb_dev) != AMDSB_SMBUS_DEVID)
return;
child = BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, "amdsbwd", -1);
@ -273,15 +287,102 @@ amdsbwd_identify(driver_t *driver, device_t parent)
device_printf(parent, "add amdsbwd child failed\n");
}
static void
amdsbwd_probe_sb7xx(device_t dev, struct resource *pmres, uint32_t *addr)
{
uint32_t val;
int i;
/* Report cause of previous reset for user's convenience. */
val = pmio_read(pmres, AMDSB_PM_RESET_STATUS0);
if (val != 0)
amdsbwd_verbose_printf(dev, "ResetStatus0 = %#04x\n", val);
val = pmio_read(pmres, AMDSB_PM_RESET_STATUS1);
if (val != 0)
amdsbwd_verbose_printf(dev, "ResetStatus1 = %#04x\n", val);
if ((val & AMDSB_WD_RST_STS) != 0)
device_printf(dev, "Previous Reset was caused by Watchdog\n");
/* Find base address of memory mapped WDT registers. */
for (*addr = 0, i = 0; i < 4; i++) {
*addr <<= 8;
*addr |= pmio_read(pmres, AMDSB_PM_WDT_BASE_MSB - i);
}
/* Set watchdog timer tick to 1s. */
val = pmio_read(pmres, AMDSB_PM_WDT_CTRL);
val &= ~AMDSB_WDT_RES_MASK;
val |= AMDSB_WDT_RES_10MS;
pmio_write(pmres, AMDSB_PM_WDT_CTRL, val);
/* Enable watchdog device (in stopped state). */
val = pmio_read(pmres, AMDSB_PM_WDT_CTRL);
val &= ~AMDSB_WDT_DISABLE;
pmio_write(pmres, AMDSB_PM_WDT_CTRL, val);
/*
* XXX TODO: Ensure that watchdog decode is enabled
* (register 0x41, bit 3).
*/
device_set_desc(dev, "AMD SB600/SB7xx Watchdog Timer");
}
static void
amdsbwd_probe_sb8xx(device_t dev, struct resource *pmres, uint32_t *addr)
{
uint32_t val;
int i;
/* Report cause of previous reset for user's convenience. */
val = pmio_read(pmres, AMDSB8_PM_RESET_STATUS0);
if (val != 0)
amdsbwd_verbose_printf(dev, "ResetStatus0 = %#04x\n", val);
val = pmio_read(pmres, AMDSB8_PM_RESET_STATUS1);
if (val != 0)
amdsbwd_verbose_printf(dev, "ResetStatus1 = %#04x\n", val);
if ((val & AMDSB8_WD_RST_STS) != 0)
device_printf(dev, "Previous Reset was caused by Watchdog\n");
/* Find base address of memory mapped WDT registers. */
for (*addr = 0, i = 0; i < 4; i++) {
*addr <<= 8;
*addr |= pmio_read(pmres, AMDSB8_PM_WDT_EN + 3 - i);
}
*addr &= ~0x07u;
/* Set watchdog timer tick to 1s. */
val = pmio_read(pmres, AMDSB8_PM_WDT_CTRL);
val &= ~AMDSB8_WDT_RES_MASK;
val |= AMDSB8_WDT_1HZ;
pmio_write(pmres, AMDSB8_PM_WDT_CTRL, val);
#ifdef AMDSBWD_DEBUG
val = pmio_read(pmres, AMDSB8_PM_WDT_CTRL);
amdsbwd_verbose_printf(dev, "AMDSB8_PM_WDT_CTRL value = %#02x\n", val);
#endif
/*
* Enable watchdog device (in stopped state)
* and decoding of its address.
*/
val = pmio_read(pmres, AMDSB8_PM_WDT_EN);
val &= ~AMDSB8_WDT_DISABLE;
val |= AMDSB8_WDT_DEC_EN;
pmio_write(pmres, AMDSB8_PM_WDT_EN, val);
#ifdef AMDSBWD_DEBUG
val = pmio_read(pmres, AMDSB8_PM_WDT_EN);
device_printf(dev, "AMDSB8_PM_WDT_EN value = %#02x\n", val);
#endif
device_set_desc(dev, "AMD SB8xx Watchdog Timer");
}
static int
amdsbwd_probe(device_t dev)
{
struct resource *res;
device_t smb_dev;
uint32_t addr;
uint32_t val;
int rid;
int rc;
int i;
/* Do not claim some ISA PnP device by accident. */
if (isa_get_logicalid(dev) != 0)
@ -301,21 +402,16 @@ amdsbwd_probe(device_t dev)
return (ENXIO);
}
/* Report cause of previous reset for user's convenience. */
val = pmio_read(res, AMDSB_PM_RESET_STATUS0);
if (val != 0)
amdsbwd_verbose_printf(dev, "ResetStatus0 = %#04x\n", val);
val = pmio_read(res, AMDSB_PM_RESET_STATUS1);
if (val != 0)
amdsbwd_verbose_printf(dev, "ResetStatus1 = %#04x\n", val);
if ((val & AMDSB_WD_RST_STS) != 0)
device_printf(dev, "Previous Reset was caused by Watchdog\n");
smb_dev = pci_find_bsf(0, 20, 0);
KASSERT(smb_dev != NULL, ("can't find SMBus PCI device\n"));
if (pci_get_revid(smb_dev) < AMDSB8_SMBUS_REVID)
amdsbwd_probe_sb7xx(dev, res, &addr);
else
amdsbwd_probe_sb8xx(dev, res, &addr);
bus_release_resource(dev, SYS_RES_IOPORT, rid, res);
bus_delete_resource(dev, SYS_RES_IOPORT, rid);
/* Find base address of memory mapped WDT registers. */
for (addr = 0, i = 0; i < 4; i++) {
addr <<= 8;
addr |= pmio_read(res, AMDSB_PM_WDT_BASE_MSB - i);
}
amdsbwd_verbose_printf(dev, "memory base address = %#010x\n", addr);
rc = bus_set_resource(dev, SYS_RES_MEMORY, 0, addr + AMDSB_WD_CTRL,
AMDSB_WDIO_REG_WIDTH);
@ -330,36 +426,25 @@ amdsbwd_probe(device_t dev)
return (ENXIO);
}
/* Set watchdog timer tick to 10ms. */
val = pmio_read(res, AMDSB_PM_WDT_CTRL);
val &= ~AMDSB_WDT_RES_MASK;
val |= AMDSB_WDT_RES_10MS;
pmio_write(res, AMDSB_PM_WDT_CTRL, val);
/* Enable watchdog device (in stopped state). */
val = pmio_read(res, AMDSB_PM_WDT_CTRL);
val &= ~AMDSB_WDT_DISABLE;
pmio_write(res, AMDSB_PM_WDT_CTRL, val);
/*
* XXX TODO: Ensure that watchdog decode is enabled
* (register 0x41, bit 3).
*/
bus_release_resource(dev, SYS_RES_IOPORT, rid, res);
bus_delete_resource(dev, SYS_RES_IOPORT, rid);
device_set_desc(dev, "AMD SB600/SB7xx Watchdog Timer");
return (0);
}
static int
amdsbwd_attach_sb(device_t dev, struct amdsbwd_softc *sc)
{
device_t smb_dev;
sc->max_ticks = UINT16_MAX;
sc->ms_per_tick = 10;
sc->rid_ctrl = 0;
sc->rid_count = 1;
smb_dev = pci_find_bsf(0, 20, 0);
KASSERT(smb_dev != NULL, ("can't find SMBus PCI device\n"));
if (pci_get_revid(smb_dev) < AMDSB8_SMBUS_REVID)
sc->ms_per_tick = 10;
else
sc->ms_per_tick = 1000;
sc->res_ctrl = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&sc->rid_ctrl, RF_ACTIVE);
if (sc->res_ctrl == NULL) {
@ -388,6 +473,11 @@ amdsbwd_attach(device_t dev)
if (rc != 0)
goto fail;
#ifdef AMDSBWD_DEBUG
device_printf(dev, "wd ctrl = %#04x\n", wdctrl_read(sc));
device_printf(dev, "wd count = %#04x\n", wdcount_read(sc));
#endif
/* Setup initial state of Watchdog Control. */
wdctrl_write(sc, AMDSB_WD_FIRED);

View File

@ -745,6 +745,17 @@ typedef enum {
HAL_QUIET_ADD_SWBA_RESP_TIME = 0x4, /* add beacon response time to next_start offset */
} HAL_QUIET_FLAG;
#define HAL_DFS_EVENT_PRICH 0x0000001
struct dfs_event {
uint64_t re_full_ts; /* 64-bit full timestamp from interrupt time */
uint32_t re_ts; /* Original 15 bit recv timestamp */
uint8_t re_rssi; /* rssi of radar event */
uint8_t re_dur; /* duration of radar pulse */
uint32_t re_flags; /* Flags (see above) */
};
typedef struct dfs_event HAL_DFS_EVENT;
/*
* Hardware Access Layer (HAL) API.
*
@ -928,6 +939,9 @@ struct ath_hal {
HAL_PHYERR_PARAM *pe);
void __ahdecl(*ah_getDfsThresh)(struct ath_hal *ah,
HAL_PHYERR_PARAM *pe);
HAL_BOOL __ahdecl(*ah_procRadarEvent)(struct ath_hal *ah,
struct ath_rx_status *rxs, uint64_t fulltsf,
const char *buf, HAL_DFS_EVENT *event);
/* Key Cache Functions */
uint32_t __ahdecl(*ah_getKeyCacheSize)(struct ath_hal*);

View File

@ -622,5 +622,8 @@ extern HAL_BOOL ar5212IsNFCalInProgress(struct ath_hal *ah);
extern HAL_BOOL ar5212WaitNFCalComplete(struct ath_hal *ah, int i);
extern void ar5212EnableDfs(struct ath_hal *ah, HAL_PHYERR_PARAM *pe);
extern void ar5212GetDfsThresh(struct ath_hal *ah, HAL_PHYERR_PARAM *pe);
extern HAL_BOOL ar5212ProcessRadarEvent(struct ath_hal *ah,
struct ath_rx_status *rxs, uint64_t fulltsf, const char *buf,
HAL_DFS_EVENT *event);
#endif /* _ATH_AR5212_H_ */

View File

@ -132,6 +132,7 @@ static const struct ath_hal_private ar5212hal = {{
/* DFS Functions */
.ah_enableDfs = ar5212EnableDfs,
.ah_getDfsThresh = ar5212GetDfsThresh,
.ah_procRadarEvent = ar5212ProcessRadarEvent,
/* Key Cache Functions */
.ah_getKeyCacheSize = ar5212GetKeyCacheSize,

View File

@ -21,9 +21,7 @@
#include "ah.h"
#include "ah_internal.h"
#include "ah_devid.h"
#ifdef AH_DEBUG
#include "ah_desc.h" /* NB: for HAL_PHYERR* */
#endif
#include "ar5212/ar5212.h"
#include "ar5212/ar5212reg.h"
@ -1180,3 +1178,47 @@ ar5212GetDfsThresh(struct ath_hal *ah, HAL_PHYERR_PARAM *pe)
pe->pe_extchannel = AH_FALSE;
}
/*
* Process the radar phy error and extract the pulse duration.
*/
HAL_BOOL
ar5212ProcessRadarEvent(struct ath_hal *ah, struct ath_rx_status *rxs,
uint64_t fulltsf, const char *buf, HAL_DFS_EVENT *event)
{
uint8_t dur;
uint8_t rssi;
/* Check whether the given phy error is a radar event */
if ((rxs->rs_phyerr != HAL_PHYERR_RADAR) &&
(rxs->rs_phyerr != HAL_PHYERR_FALSE_RADAR_EXT))
return AH_FALSE;
/*
* The first byte is the pulse width - if there's
* no data, simply set the duration to 0
*/
if (rxs->rs_datalen >= 1)
/* The pulse width is byte 0 of the data */
dur = ((uint8_t) buf[0]) & 0xff;
else
dur = 0;
/* Pulse RSSI is the normal reported RSSI */
rssi = (uint8_t) rxs->rs_rssi;
/* 0 duration/rssi is not a valid radar event */
if (dur == 0 && rssi == 0)
return AH_FALSE;
HALDEBUG(ah, HAL_DEBUG_DFS, "%s: rssi=%d, dur=%d\n",
__func__, rssi, dur);
/* Record the event */
event->re_full_ts = fulltsf;
event->re_ts = rxs->rs_tstamp;
event->re_rssi = rssi;
event->re_dur = dur;
event->re_flags = HAL_DFS_EVENT_PRICH;
return AH_TRUE;
}

View File

@ -205,6 +205,9 @@ extern HAL_BOOL ar5416SetRifsDelay(struct ath_hal *ah,
const struct ieee80211_channel *chan, HAL_BOOL enable);
extern void ar5416EnableDfs(struct ath_hal *ah, HAL_PHYERR_PARAM *pe);
extern void ar5416GetDfsThresh(struct ath_hal *ah, HAL_PHYERR_PARAM *pe);
extern HAL_BOOL ar5416ProcessRadarEvent(struct ath_hal *ah,
struct ath_rx_status *rxs, uint64_t fulltsf, const char *buf,
HAL_DFS_EVENT *event);
extern HAL_BOOL ar5416SetPowerMode(struct ath_hal *ah, HAL_POWER_MODE mode,
int setChip);

View File

@ -147,6 +147,7 @@ ar5416InitState(struct ath_hal_5416 *ahp5416, uint16_t devid, HAL_SOFTC sc,
/* DFS Functions */
ah->ah_enableDfs = ar5416EnableDfs;
ah->ah_getDfsThresh = ar5416GetDfsThresh;
ah->ah_procRadarEvent = ar5416ProcessRadarEvent;
/* Power Management Functions */
ah->ah_setPowerMode = ar5416SetPowerMode;

View File

@ -692,3 +692,19 @@ ar5416EnableDfs(struct ath_hal *ah, HAL_PHYERR_PARAM *pe)
OS_REG_WRITE(ah, AR_PHY_RADAR_1, val);
}
}
/*
* Extract the radar event information from the given phy error.
*
* Returns AH_TRUE if the phy error was actually a phy error,
* AH_FALSE if the phy error wasn't a phy error.
*/
HAL_BOOL
ar5416ProcessRadarEvent(struct ath_hal *ah, struct ath_rx_status *rxs,
uint64_t fulltsf, const char *buf, HAL_DFS_EVENT *event)
{
/*
* For now, this isn't implemented.
*/
return AH_FALSE;
}

View File

@ -709,6 +709,8 @@ void ath_intr(void *);
((*(_ah)->ah_enableDfs)((_ah), (_param)))
#define ath_hal_getdfsthresh(_ah, _param) \
((*(_ah)->ah_getDfsThresh)((_ah), (_param)))
#define ath_hal_procradarevent(_ah, _rxs, _fulltsf, _buf, _event) \
((*(_ah)->ah_procRadarEvent)((_ah), (_rxs), (_fulltsf), (_buf), (_event)))
#define ath_hal_gpioCfgOutput(_ah, _gpio, _type) \
((*(_ah)->ah_gpioCfgOutput)((_ah), (_gpio), (_type)))

View File

@ -1097,10 +1097,8 @@ get_typematic(keyboard_t *kbd)
x86regs_t regs;
uint8_t *p;
if (!(kbd->kb_config & KB_CONF_PROBE_TYPEMATIC))
return (ENODEV);
if (x86bios_get_intr(0x15) == 0 || x86bios_get_intr(0x16) == 0)
if (x86bios_get_intr(0x15) != 0xf000f859 ||
x86bios_get_intr(0x16) != 0xf000e82e)
return (ENODEV);
/* Is BIOS system configuration table supported? */

View File

@ -36,7 +36,6 @@
#define KB_CONF_NO_RESET (1 << 1) /* don't reset the keyboard */
#define KB_CONF_ALT_SCANCODESET (1 << 2) /* assume the XT type keyboard */
#define KB_CONF_NO_PROBE_TEST (1 << 3) /* don't test keyboard during probe */
#define KB_CONF_PROBE_TYPEMATIC (1 << 4) /* probe keyboard typematic */
#ifdef _KERNEL

View File

@ -324,7 +324,7 @@ decode_tuple_bar(device_t cbdev, device_t child, int id,
* hint when the cardbus bridge is a child of pci0 (the main
* bus). The PC Card spec seems to indicate that this should
* only be done on x86 based machines, which suggests that on
* non-x86 machines the adddresses can be anywhere. Since the
* non-x86 machines the addresses can be anywhere. Since the
* hardware can do it on non-x86 machines, it should be able
* to do it on x86 machines too. Therefore, we can and should
* ignore this hint. Furthermore, the PC Card spec recommends
@ -430,7 +430,6 @@ cardbus_read_tuple_finish(device_t cbdev, device_t child, int rid,
{
if (res != CIS_CONFIG_SPACE) {
bus_release_resource(child, SYS_RES_MEMORY, rid, res);
bus_delete_resource(child, SYS_RES_MEMORY, rid);
}
}
@ -467,7 +466,7 @@ cardbus_read_tuple_init(device_t cbdev, device_t child, uint32_t *start,
}
/* allocate the memory space to read CIS */
res = bus_alloc_resource(child, SYS_RES_MEMORY, rid, 0, ~0, 1,
res = bus_alloc_resource_any(child, SYS_RES_MEMORY, rid,
rman_make_alignment_flags(4096) | RF_ACTIVE);
if (res == NULL) {
device_printf(cbdev, "Unable to allocate resource "

View File

@ -1991,7 +1991,7 @@ pmc_hook_handler(struct thread *td, int function, void *arg)
* had already processed the interrupt). We don't
* lose the interrupt sample.
*/
atomic_clear_int(&pmc_cpumask, (1 << PCPU_GET(cpuid)));
CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmc_cpumask);
pmc_process_samples(PCPU_GET(cpuid));
break;
@ -4083,7 +4083,7 @@ pmc_process_interrupt(int cpu, struct pmc *pm, struct trapframe *tf,
done:
/* mark CPU as needing processing */
atomic_set_int(&pmc_cpumask, (1 << cpu));
CPU_SET_ATOMIC(cpu, &pmc_cpumask);
return (error);
}
@ -4193,7 +4193,7 @@ pmc_process_samples(int cpu)
break;
if (ps->ps_nsamples == PMC_SAMPLE_INUSE) {
/* Need a rescan at a later time. */
atomic_set_int(&pmc_cpumask, (1 << cpu));
CPU_SET_ATOMIC(cpu, &pmc_cpumask);
break;
}
@ -4782,7 +4782,7 @@ pmc_cleanup(void)
PMCDBG(MOD,INI,0, "%s", "cleanup");
/* switch off sampling */
pmc_cpumask = 0;
CPU_ZERO(&pmc_cpumask);
pmc_intr = NULL;
sx_xlock(&pmc_sx);

View File

@ -1405,8 +1405,8 @@ pccard_ccr_read_impl(device_t brdev, device_t child, uint32_t offset,
struct pccard_ivar *devi = PCCARD_IVAR(child);
*val = pccard_ccr_read(devi->pf, offset);
device_printf(child, "ccr_read of %#x (%#x) is %#x\n", offset,
devi->pf->pf_ccr_offset, *val);
DEVPRINTF((child, "ccr_read of %#x (%#x) is %#x\n", offset,
devi->pf->pf_ccr_offset, *val));
return 0;
}
@ -1421,8 +1421,8 @@ pccard_ccr_write_impl(device_t brdev, device_t child, uint32_t offset,
* Can't use pccard_ccr_write since client drivers may access
* registers not contained in the 'mask' if they are non-standard.
*/
device_printf(child, "ccr_write of %#x to %#x (%#x)\n", val, offset,
devi->pf->pf_ccr_offset);
DEVPRINTF((child, "ccr_write of %#x to %#x (%#x)\n", val, offset,
devi->pf->pf_ccr_offset));
bus_space_write_1(pf->pf_ccrt, pf->pf_ccrh, pf->pf_ccr_offset + offset,
val);
return 0;

View File

@ -2576,6 +2576,17 @@ pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
uint16_t cmd;
struct resource *res;
/*
* The BAR may already exist if the device is a CardBus card
* whose CIS is stored in this BAR.
*/
pm = pci_find_bar(dev, reg);
if (pm != NULL) {
maprange = pci_maprange(pm->pm_value);
barlen = maprange == 64 ? 2 : 1;
return (barlen);
}
pci_read_bar(dev, reg, &map, &testval);
if (PCI_BAR_MEM(map)) {
type = SYS_RES_MEMORY;

View File

@ -51,12 +51,12 @@ static puc_config_f puc_config_amc;
static puc_config_f puc_config_diva;
static puc_config_f puc_config_exar;
static puc_config_f puc_config_icbook;
static puc_config_f puc_config_oxford_pcie;
static puc_config_f puc_config_quatech;
static puc_config_f puc_config_syba;
static puc_config_f puc_config_siig;
static puc_config_f puc_config_timedia;
static puc_config_f puc_config_titan;
static puc_config_f puc_config_oxford_pcie;
const struct puc_cfg puc_pci_devices[] = {
@ -1366,14 +1366,12 @@ puc_config_oxford_pcie(struct puc_softc *sc, enum puc_cfg_cmd cmd, int port,
bar = puc_get_bar(sc, cfg->rid);
if (bar == NULL)
return (ENXIO);
for (idx = 0; idx < sc->sc_nports; idx++) {
value = bus_read_1(bar->b_res, 0x1000 + (idx << 9)
+ 0x92);
value = bus_read_1(bar->b_res, 0x1000 + (idx << 9) +
0x92);
bus_write_1(bar->b_res, 0x1000 + (idx << 9) + 0x92,
value | 0x10);
value | 0x10);
}
return (0);
case PUC_CFG_GET_LEN:
*res = 0x200;

View File

@ -51,7 +51,7 @@ int pcm_veto_load = 1;
int snd_unit = -1;
TUNABLE_INT("hw.snd.default_unit", &snd_unit);
static int snd_unit_auto = 0;
static int snd_unit_auto = -1;
TUNABLE_INT("hw.snd.default_auto", &snd_unit_auto);
SYSCTL_INT(_hw_snd, OID_AUTO, default_auto, CTLFLAG_RW,
&snd_unit_auto, 0, "assign default unit to a newly attached device");
@ -443,6 +443,7 @@ sysctl_hw_snd_default_unit(SYSCTL_HANDLER_ARGS)
if (!PCM_REGISTERED(d) || CHN_EMPTY(d, channels.pcm))
return EINVAL;
snd_unit = unit;
snd_unit_auto = 0;
}
return (error);
}
@ -737,6 +738,32 @@ pcm_killchan(device_t dev)
return (pcm_chn_destroy(ch));
}
static int
pcm_best_unit(int old)
{
struct snddev_info *d;
int i, best, bestprio, prio;
best = -1;
bestprio = -100;
for (i = 0; pcm_devclass != NULL &&
i < devclass_get_maxunit(pcm_devclass); i++) {
d = devclass_get_softc(pcm_devclass, i);
if (!PCM_REGISTERED(d))
continue;
prio = 0;
if (d->playcount == 0)
prio -= 10;
if (d->reccount == 0)
prio -= 2;
if (prio > bestprio || (prio == bestprio && i == old)) {
best = i;
bestprio = prio;
}
}
return (best);
}
int
pcm_setstatus(device_t dev, char *str)
{
@ -770,8 +797,12 @@ pcm_setstatus(device_t dev, char *str)
PCM_UNLOCK(d);
if (snd_unit < 0 || snd_unit_auto != 0)
if (snd_unit_auto < 0)
snd_unit_auto = (snd_unit < 0) ? 1 : 0;
if (snd_unit < 0 || snd_unit_auto > 1)
snd_unit = device_get_unit(dev);
else if (snd_unit_auto == 1)
snd_unit = pcm_best_unit(snd_unit);
return (0);
}
@ -1113,7 +1144,6 @@ pcm_unregister(device_t dev)
struct snddev_info *d;
struct pcm_channel *ch;
struct thread *td;
int i;
td = curthread;
d = device_get_softc(dev);
@ -1216,21 +1246,9 @@ pcm_unregister(device_t dev)
sndstat_release(td);
if (snd_unit == device_get_unit(dev)) {
/*
* Reassign default unit to the next available dev, but
* first, reset snd_unit to something ridiculous.
*/
snd_unit = -1;
for (i = 0; pcm_devclass != NULL &&
i < devclass_get_maxunit(pcm_devclass); i++) {
if (device_get_unit(dev) == i)
continue;
d = devclass_get_softc(pcm_devclass, i);
if (PCM_REGISTERED(d)) {
snd_unit = i;
break;
}
}
snd_unit = pcm_best_unit(-1);
if (snd_unit_auto == 0)
snd_unit_auto = 1;
}
return (0);

View File

@ -187,6 +187,8 @@ struct usb_device {
struct usb_host_endpoint *linux_endpoint_end;
uint16_t devnum;
#endif
uint32_t clear_stall_errors; /* number of clear-stall failures */
};
/* globals */

View File

@ -66,6 +66,7 @@
#define USB_HUB_MAX_DEPTH 5
#define USB_EP0_BUFSIZE 1024 /* bytes */
#define USB_CS_RESET_LIMIT 20 /* failures = 20 * 50 ms = 1sec */
typedef uint32_t usb_timeout_t; /* milliseconds */
typedef uint32_t usb_frlength_t; /* bytes */

View File

@ -966,10 +966,8 @@ ugen_re_enumerate(struct usb_fifo *f)
/* ignore any errors */
DPRINTFN(6, "no FIFOs\n");
}
if (udev->re_enumerate_wait == 0) {
udev->re_enumerate_wait = 1;
usb_needs_explore(udev->bus, 0);
}
/* start re-enumeration of device */
usbd_start_re_enumerate(udev);
return (0);
}

View File

@ -242,9 +242,14 @@ uhub_explore_sub(struct uhub_softc *sc, struct usb_port *up)
if (child->flags.usb_mode == USB_MODE_HOST) {
usbd_enum_lock(child);
if (child->re_enumerate_wait) {
err = usbd_set_config_index(child, USB_UNCONFIG_INDEX);
if (err == 0)
err = usbd_req_re_enumerate(child, NULL);
err = usbd_set_config_index(child,
USB_UNCONFIG_INDEX);
if (err != 0) {
DPRINTF("Unconfigure failed: "
"%s: Ignored.\n",
usbd_errstr(err));
}
err = usbd_req_re_enumerate(child, NULL);
if (err == 0)
err = usbd_set_config_index(child, 0);
if (err == 0) {
@ -2471,3 +2476,19 @@ usbd_filter_power_mode(struct usb_device *udev, uint8_t power_mode)
/* use fixed power mode given by hardware driver */
return (temp);
}
/*------------------------------------------------------------------------*
* usbd_start_re_enumerate
*
* This function starts re-enumeration of the given USB device. This
* function does not need to be called BUS-locked. This function does
* not wait until the re-enumeration is completed.
*------------------------------------------------------------------------*/
void
usbd_start_re_enumerate(struct usb_device *udev)
{
if (udev->re_enumerate_wait == 0) {
udev->re_enumerate_wait = 1;
usb_needs_explore(udev->bus, 0);
}
}

View File

@ -238,6 +238,10 @@ usb_do_clear_stall_callback(struct usb_xfer *xfer, usb_error_t error)
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
/* reset error counter */
udev->clear_stall_errors = 0;
if (ep == NULL)
goto tr_setup; /* device was unconfigured */
if (ep->edesc &&
@ -289,8 +293,23 @@ usb_do_clear_stall_callback(struct usb_xfer *xfer, usb_error_t error)
goto tr_setup;
default:
if (xfer->error == USB_ERR_CANCELLED) {
if (error == USB_ERR_CANCELLED)
break;
DPRINTF("Clear stall failed.\n");
if (udev->clear_stall_errors == USB_CS_RESET_LIMIT)
goto tr_setup;
if (error == USB_ERR_TIMEOUT) {
udev->clear_stall_errors = USB_CS_RESET_LIMIT;
DPRINTF("Trying to re-enumerate.\n");
usbd_start_re_enumerate(udev);
} else {
udev->clear_stall_errors++;
if (udev->clear_stall_errors == USB_CS_RESET_LIMIT) {
DPRINTF("Trying to re-enumerate.\n");
usbd_start_re_enumerate(udev);
}
}
goto tr_setup;
}
@ -1936,6 +1955,23 @@ usbd_req_re_enumerate(struct usb_device *udev, struct mtx *mtx)
return (USB_ERR_INVAL);
}
retry:
/*
* Try to reset the High Speed parent HUB of a LOW- or FULL-
* speed device, if any.
*/
if (udev->parent_hs_hub != NULL &&
udev->speed != USB_SPEED_HIGH) {
DPRINTF("Trying to reset parent High Speed TT.\n");
err = usbd_req_reset_tt(udev->parent_hs_hub, NULL,
udev->hs_port_no);
if (err) {
DPRINTF("Resetting parent High "
"Speed TT failed (%s).\n",
usbd_errstr(err));
}
}
/* Try to reset the parent HUB port. */
err = usbd_req_reset_port(parent_hub, mtx, udev->port_no);
if (err) {
DPRINTFN(0, "addr=%d, port reset failed, %s\n",
@ -2033,3 +2069,65 @@ usbd_req_set_device_feature(struct usb_device *udev, struct mtx *mtx,
USETW(req.wLength, 0);
return (usbd_do_request(udev, mtx, &req, 0));
}
/*------------------------------------------------------------------------*
* usbd_req_reset_tt
*
* Returns:
* 0: Success
* Else: Failure
*------------------------------------------------------------------------*/
usb_error_t
usbd_req_reset_tt(struct usb_device *udev, struct mtx *mtx,
uint8_t port)
{
struct usb_device_request req;
/* For single TT HUBs the port should be 1 */
if (udev->ddesc.bDeviceClass == UDCLASS_HUB &&
udev->ddesc.bDeviceProtocol == UDPROTO_HSHUBSTT)
port = 1;
req.bmRequestType = UT_WRITE_CLASS_OTHER;
req.bRequest = UR_RESET_TT;
USETW(req.wValue, 0);
req.wIndex[0] = port;
req.wIndex[1] = 0;
USETW(req.wLength, 0);
return (usbd_do_request(udev, mtx, &req, 0));
}
/*------------------------------------------------------------------------*
* usbd_req_clear_tt_buffer
*
* For single TT HUBs the port should be 1.
*
* Returns:
* 0: Success
* Else: Failure
*------------------------------------------------------------------------*/
usb_error_t
usbd_req_clear_tt_buffer(struct usb_device *udev, struct mtx *mtx,
uint8_t port, uint8_t addr, uint8_t type, uint8_t endpoint)
{
struct usb_device_request req;
uint16_t wValue;
/* For single TT HUBs the port should be 1 */
if (udev->ddesc.bDeviceClass == UDCLASS_HUB &&
udev->ddesc.bDeviceProtocol == UDPROTO_HSHUBSTT)
port = 1;
wValue = (endpoint & 0xF) | ((addr & 0x7F) << 4) |
((endpoint & 0x80) << 8) | ((type & 3) << 12);
req.bmRequestType = UT_WRITE_CLASS_OTHER;
req.bRequest = UR_CLEAR_TT_BUFFER;
USETW(req.wValue, wValue);
req.wIndex[0] = port;
req.wIndex[1] = 0;
USETW(req.wLength, 0);
return (usbd_do_request(udev, mtx, &req, 0));
}

View File

@ -85,5 +85,9 @@ usb_error_t usbd_req_set_hub_u2_timeout(struct usb_device *udev,
struct mtx *mtx, uint8_t port, uint8_t timeout);
usb_error_t usbd_req_set_hub_depth(struct usb_device *udev,
struct mtx *mtx, uint16_t depth);
usb_error_t usbd_req_reset_tt(struct usb_device *udev, struct mtx *mtx,
uint8_t port);
usb_error_t usbd_req_clear_tt_buffer(struct usb_device *udev, struct mtx *mtx,
uint8_t port, uint8_t addr, uint8_t type, uint8_t endpoint);
#endif /* _USB_REQUEST_H_ */

View File

@ -2927,6 +2927,11 @@ usbd_ctrl_transfer_setup(struct usb_device *udev)
*/
usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
/*
* Reset clear stall error counter.
*/
udev->clear_stall_errors = 0;
/*
* Try to setup a new USB transfer for the
* default control endpoint:

View File

@ -542,6 +542,7 @@ void usbd_m_copy_in(struct usb_page_cache *cache, usb_frlength_t dst_offset,
struct mbuf *m, usb_size_t src_offset, usb_frlength_t src_len);
void usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset,
usb_frlength_t len);
void usbd_start_re_enumerate(struct usb_device *udev);
int usb_fifo_attach(struct usb_device *udev, void *priv_sc,
struct mtx *priv_mtx, struct usb_fifo_methods *pm,

View File

@ -203,24 +203,29 @@ xctrl_suspend()
unsigned long max_pfn, start_info_mfn;
#ifdef SMP
cpumask_t map;
struct thread *td;
cpuset_t map;
/*
* Bind us to CPU 0 and stop any other VCPUs.
*/
thread_lock(curthread);
sched_bind(curthread, 0);
thread_unlock(curthread);
td = curthread;
thread_lock(td);
sched_bind(td, 0);
thread_unlock(td);
KASSERT(PCPU_GET(cpuid) == 0, ("xen_suspend: not running on cpu 0"));
map = PCPU_GET(other_cpus) & ~stopped_cpus;
if (map)
sched_pin();
map = PCPU_GET(other_cpus);
sched_unpin();
CPU_NAND(&map, &stopped_cpus);
if (!CPU_EMPTY(&map))
stop_cpus(map);
#endif
if (DEVICE_SUSPEND(root_bus) != 0) {
printf("xen_suspend: device_suspend failed\n");
#ifdef SMP
if (map)
if (!CPU_EMPTY(&map))
restart_cpus(map);
#endif
return;
@ -289,7 +294,7 @@ xctrl_suspend()
thread_lock(curthread);
sched_unbind(curthread);
thread_unlock(curthread);
if (map)
if (!CPU_EMPTY(&map))
restart_cpus(map);
#endif
}

View File

@ -401,10 +401,10 @@ int nfsrpc_readdirplus(vnode_t, struct uio *, nfsuint64 *,
int nfsrpc_commit(vnode_t, u_quad_t, int, struct ucred *,
NFSPROC_T *, u_char *, struct nfsvattr *, int *, void *);
int nfsrpc_advlock(vnode_t, off_t, int, struct flock *, int,
struct ucred *, NFSPROC_T *);
struct ucred *, NFSPROC_T *, void *, int);
int nfsrpc_lockt(struct nfsrv_descript *, vnode_t,
struct nfsclclient *, u_int64_t, u_int64_t, struct flock *,
struct ucred *, NFSPROC_T *);
struct ucred *, NFSPROC_T *, void *, int);
int nfsrpc_lock(struct nfsrv_descript *, struct nfsmount *, vnode_t,
u_int8_t *, int, struct nfscllockowner *, int, int, u_int64_t,
u_int64_t, short, struct ucred *, NFSPROC_T *, int);
@ -439,16 +439,16 @@ struct nfsclclient *nfscl_findcl(struct nfsmount *);
void nfscl_clientrelease(struct nfsclclient *);
void nfscl_freelock(struct nfscllock *, int);
int nfscl_getbytelock(vnode_t, u_int64_t, u_int64_t, short,
struct ucred *, NFSPROC_T *, struct nfsclclient *, int, u_int8_t *,
u_int8_t *, struct nfscllockowner **, int *, int *);
struct ucred *, NFSPROC_T *, struct nfsclclient *, int, void *, int,
u_int8_t *, u_int8_t *, struct nfscllockowner **, int *, int *);
int nfscl_relbytelock(vnode_t, u_int64_t, u_int64_t,
struct ucred *, NFSPROC_T *, int, struct nfsclclient *,
struct nfscllockowner **, int *);
void *, int, struct nfscllockowner **, int *);
int nfscl_checkwritelocked(vnode_t, struct flock *,
struct ucred *, NFSPROC_T *);
struct ucred *, NFSPROC_T *, void *, int);
void nfscl_lockrelease(struct nfscllockowner *, int, int);
void nfscl_fillclid(u_int64_t, char *, u_int8_t *, u_int16_t);
void nfscl_filllockowner(NFSPROC_T *, u_int8_t *);
void nfscl_filllockowner(void *, u_int8_t *, int);
void nfscl_freeopen(struct nfsclopen *, int);
void nfscl_umount(struct nfsmount *, NFSPROC_T *);
void nfscl_renewthread(struct nfsclclient *, NFSPROC_T *);
@ -466,9 +466,10 @@ void nfscl_lockexcl(struct nfsv4lock *, void *);
void nfscl_lockunlock(struct nfsv4lock *);
void nfscl_lockderef(struct nfsv4lock *);
void nfscl_docb(struct nfsrv_descript *, NFSPROC_T *);
void nfscl_releasealllocks(struct nfsclclient *, vnode_t, NFSPROC_T *);
void nfscl_releasealllocks(struct nfsclclient *, vnode_t, NFSPROC_T *, void *,
int);
int nfscl_lockt(vnode_t, struct nfsclclient *, u_int64_t,
u_int64_t, struct flock *, NFSPROC_T *);
u_int64_t, struct flock *, NFSPROC_T *, void *, int);
int nfscl_mustflush(vnode_t);
int nfscl_nodeleg(vnode_t, int);
int nfscl_removedeleg(vnode_t, NFSPROC_T *, nfsv4stateid_t *);

View File

@ -500,7 +500,7 @@ nfscl_fillclid(u_int64_t clval, char *uuid, u_int8_t *cp, u_int16_t idlen)
* Fill in a lock owner name. For now, pid + the process's creation time.
*/
void
nfscl_filllockowner(struct thread *td, u_int8_t *cp)
nfscl_filllockowner(void *id, u_int8_t *cp, int flags)
{
union {
u_int32_t lval;
@ -508,37 +508,35 @@ nfscl_filllockowner(struct thread *td, u_int8_t *cp)
} tl;
struct proc *p;
if (td == NULL) {
printf("NULL td\n");
bzero(cp, 12);
return;
}
p = td->td_proc;
if (p == NULL) {
printf("NULL pid\n");
bzero(cp, 12);
return;
}
tl.lval = p->p_pid;
*cp++ = tl.cval[0];
*cp++ = tl.cval[1];
*cp++ = tl.cval[2];
*cp++ = tl.cval[3];
if (p->p_stats == NULL) {
printf("pstats null\n");
bzero(cp, 8);
return;
}
tl.lval = p->p_stats->p_start.tv_sec;
*cp++ = tl.cval[0];
*cp++ = tl.cval[1];
*cp++ = tl.cval[2];
*cp++ = tl.cval[3];
tl.lval = p->p_stats->p_start.tv_usec;
*cp++ = tl.cval[0];
*cp++ = tl.cval[1];
*cp++ = tl.cval[2];
*cp = tl.cval[3];
if (id == NULL) {
printf("NULL id\n");
bzero(cp, NFSV4CL_LOCKNAMELEN);
return;
}
if ((flags & F_POSIX) != 0) {
p = (struct proc *)id;
tl.lval = p->p_pid;
*cp++ = tl.cval[0];
*cp++ = tl.cval[1];
*cp++ = tl.cval[2];
*cp++ = tl.cval[3];
tl.lval = p->p_stats->p_start.tv_sec;
*cp++ = tl.cval[0];
*cp++ = tl.cval[1];
*cp++ = tl.cval[2];
*cp++ = tl.cval[3];
tl.lval = p->p_stats->p_start.tv_usec;
*cp++ = tl.cval[0];
*cp++ = tl.cval[1];
*cp++ = tl.cval[2];
*cp = tl.cval[3];
} else if ((flags & F_FLOCK) != 0) {
bcopy(&id, cp, sizeof(id));
bzero(&cp[sizeof(id)], NFSV4CL_LOCKNAMELEN - sizeof(id));
} else {
printf("nfscl_filllockowner: not F_POSIX or F_FLOCK\n");
bzero(cp, NFSV4CL_LOCKNAMELEN);
}
}
/*
@ -943,6 +941,7 @@ nfscl_getmyip(struct nfsmount *nmp, int *isinet6p)
sad.sin_family = AF_INET;
sad.sin_len = sizeof (struct sockaddr_in);
sad.sin_addr.s_addr = sin->sin_addr.s_addr;
CURVNET_SET(CRED_TO_VNET(nmp->nm_sockreq.nr_cred));
rt = rtalloc1((struct sockaddr *)&sad, 0, 0UL);
if (rt != NULL) {
if (rt->rt_ifp != NULL &&
@ -956,6 +955,7 @@ nfscl_getmyip(struct nfsmount *nmp, int *isinet6p)
}
RTFREE_LOCKED(rt);
}
CURVNET_RESTORE();
#ifdef INET6
} else if (nmp->nm_nam->sa_family == AF_INET6) {
struct sockaddr_in6 sad6, *sin6;
@ -966,6 +966,7 @@ nfscl_getmyip(struct nfsmount *nmp, int *isinet6p)
sad6.sin6_family = AF_INET6;
sad6.sin6_len = sizeof (struct sockaddr_in6);
sad6.sin6_addr = sin6->sin6_addr;
CURVNET_SET(CRED_TO_VNET(nmp->nm_sockreq.nr_cred));
rt = rtalloc1((struct sockaddr *)&sad6, 0, 0UL);
if (rt != NULL) {
if (rt->rt_ifp != NULL &&
@ -980,6 +981,7 @@ nfscl_getmyip(struct nfsmount *nmp, int *isinet6p)
}
RTFREE_LOCKED(rt);
}
CURVNET_RESTORE();
#endif
}
return (retp);

View File

@ -3459,7 +3459,7 @@ nfsrpc_commit(vnode_t vp, u_quad_t offset, int cnt, struct ucred *cred,
*/
APPLESTATIC int
nfsrpc_advlock(vnode_t vp, off_t size, int op, struct flock *fl,
int reclaim, struct ucred *cred, NFSPROC_T *p)
int reclaim, struct ucred *cred, NFSPROC_T *p, void *id, int flags)
{
struct nfscllockowner *lp;
struct nfsclclient *clp;
@ -3511,11 +3511,11 @@ nfsrpc_advlock(vnode_t vp, off_t size, int op, struct flock *fl,
error = nfscl_getcl(vp, cred, p, &clp);
if (error)
return (error);
error = nfscl_lockt(vp, clp, off, len, fl, p);
error = nfscl_lockt(vp, clp, off, len, fl, p, id, flags);
if (!error) {
clidrev = clp->nfsc_clientidrev;
error = nfsrpc_lockt(nd, vp, clp, off, len, fl, cred,
p);
p, id, flags);
} else if (error == -1) {
error = 0;
}
@ -3530,7 +3530,7 @@ nfsrpc_advlock(vnode_t vp, off_t size, int op, struct flock *fl,
return (error);
do {
error = nfscl_relbytelock(vp, off, len, cred, p, callcnt,
clp, &lp, &dorpc);
clp, id, flags, &lp, &dorpc);
/*
* If it returns a NULL lp, we're done.
*/
@ -3538,7 +3538,7 @@ nfsrpc_advlock(vnode_t vp, off_t size, int op, struct flock *fl,
if (callcnt == 0)
nfscl_clientrelease(clp);
else
nfscl_releasealllocks(clp, vp, p);
nfscl_releasealllocks(clp, vp, p, id, flags);
return (error);
}
if (nmp->nm_clp != NULL)
@ -3572,10 +3572,10 @@ nfsrpc_advlock(vnode_t vp, off_t size, int op, struct flock *fl,
}
callcnt++;
} while (error == 0 && nd->nd_repstat == 0);
nfscl_releasealllocks(clp, vp, p);
nfscl_releasealllocks(clp, vp, p, id, flags);
} else if (op == F_SETLK) {
error = nfscl_getbytelock(vp, off, len, fl->l_type, cred, p,
NULL, 0, NULL, NULL, &lp, &newone, &donelocally);
NULL, 0, id, flags, NULL, NULL, &lp, &newone, &donelocally);
if (error || donelocally) {
return (error);
}
@ -3625,7 +3625,7 @@ nfsrpc_advlock(vnode_t vp, off_t size, int op, struct flock *fl,
APPLESTATIC int
nfsrpc_lockt(struct nfsrv_descript *nd, vnode_t vp,
struct nfsclclient *clp, u_int64_t off, u_int64_t len, struct flock *fl,
struct ucred *cred, NFSPROC_T *p)
struct ucred *cred, NFSPROC_T *p, void *id, int flags)
{
u_int32_t *tl;
int error, type, size;
@ -3643,7 +3643,7 @@ nfsrpc_lockt(struct nfsrv_descript *nd, vnode_t vp,
tl += 2;
*tl++ = clp->nfsc_clientid.lval[0];
*tl = clp->nfsc_clientid.lval[1];
nfscl_filllockowner(p, own);
nfscl_filllockowner(id, own, flags);
(void) nfsm_strtom(nd, own, NFSV4CL_LOCKNAMELEN);
error = nfscl_request(nd, vp, p, cred, NULL);
if (error)

View File

@ -226,7 +226,7 @@ nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg,
* If none found, add the new one or return error, depending upon
* "create".
*/
nfscl_filllockowner(p, own);
nfscl_filllockowner(p->td_proc, own, F_POSIX);
NFSLOCKCLSTATE();
dp = NULL;
/* First check the delegation list */
@ -521,7 +521,7 @@ nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode,
* If p != NULL, we want to search the parentage tree
* for a matching OpenOwner and use that.
*/
nfscl_filllockowner(p, own);
nfscl_filllockowner(p->td_proc, own, F_POSIX);
error = nfscl_getopen(&clp->nfsc_owner, nfhp, fhlen, NULL, p,
mode, NULL, &op);
if (error == 0) {
@ -596,7 +596,7 @@ nfscl_getopen(struct nfsclownerhead *ohp, u_int8_t *nfhp, int fhlen,
op = NULL;
while (op == NULL && (nproc != NULL || rown != NULL)) {
if (nproc != NULL) {
nfscl_filllockowner(nproc, own);
nfscl_filllockowner(nproc->td_proc, own, F_POSIX);
ownp = own;
} else {
ownp = rown;
@ -881,7 +881,7 @@ nfscl_clientrelease(struct nfsclclient *clp)
APPLESTATIC int
nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp,
int recovery, u_int8_t *rownp, u_int8_t *ropenownp,
int recovery, void *id, int flags, u_int8_t *rownp, u_int8_t *ropenownp,
struct nfscllockowner **lpp, int *newonep, int *donelocallyp)
{
struct nfscllockowner *lp;
@ -942,7 +942,7 @@ nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
if (recovery) {
ownp = rownp;
} else {
nfscl_filllockowner(p, own);
nfscl_filllockowner(id, own, flags);
ownp = own;
}
if (!recovery) {
@ -1079,7 +1079,8 @@ nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
APPLESTATIC int
nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
__unused struct ucred *cred, NFSPROC_T *p, int callcnt,
struct nfsclclient *clp, struct nfscllockowner **lpp, int *dorpcp)
struct nfsclclient *clp, void *id, int flags,
struct nfscllockowner **lpp, int *dorpcp)
{
struct nfscllockowner *lp;
struct nfsclowner *owp;
@ -1116,7 +1117,7 @@ nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
*other_lop = *nlop;
}
nfscl_filllockowner(p, own);
nfscl_filllockowner(id, own, flags);
dp = NULL;
NFSLOCKCLSTATE();
if (callcnt == 0)
@ -1188,7 +1189,8 @@ nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
* Release all lockowners marked in progess for this process and file.
*/
APPLESTATIC void
nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p)
nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p,
void *id, int flags)
{
struct nfsclowner *owp;
struct nfsclopen *op;
@ -1197,7 +1199,7 @@ nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p)
u_int8_t own[NFSV4CL_LOCKNAMELEN];
np = VTONFS(vp);
nfscl_filllockowner(p, own);
nfscl_filllockowner(id, own, flags);
NFSLOCKCLSTATE();
LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
@ -1226,7 +1228,7 @@ nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p)
*/
APPLESTATIC int
nfscl_checkwritelocked(vnode_t vp, struct flock *fl,
struct ucred *cred, NFSPROC_T *p)
struct ucred *cred, NFSPROC_T *p, void *id, int flags)
{
struct nfsclowner *owp;
struct nfscllockowner *lp;
@ -1266,7 +1268,7 @@ nfscl_checkwritelocked(vnode_t vp, struct flock *fl,
error = nfscl_getcl(vp, cred, p, &clp);
if (error)
return (1);
nfscl_filllockowner(p, own);
nfscl_filllockowner(id, own, flags);
NFSLOCKCLSTATE();
/*
@ -1641,7 +1643,7 @@ nfscl_cleanup(NFSPROC_T *p)
if (!nfscl_inited)
return;
nfscl_filllockowner(p, own);
nfscl_filllockowner(p->td_proc, own, F_POSIX);
NFSLOCKCLSTATE();
/*
@ -3322,7 +3324,7 @@ nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop,
*/
APPLESTATIC int
nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off,
u_int64_t len, struct flock *fl, NFSPROC_T *p)
u_int64_t len, struct flock *fl, NFSPROC_T *p, void *id, int flags)
{
struct nfscllock *lop, nlck;
struct nfscldeleg *dp;
@ -3340,7 +3342,7 @@ nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off,
return (NFSERR_INVAL);
}
np = VTONFS(vp);
nfscl_filllockowner(p, own);
nfscl_filllockowner(id, own, flags);
NFSLOCKCLSTATE();
dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len,
@ -3615,7 +3617,7 @@ nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
off = lop->nfslo_first;
len = lop->nfslo_end - lop->nfslo_first;
error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p,
clp, 1, lp->nfsl_owner, lp->nfsl_openowner, &nlp, &newone,
clp, 1, NULL, 0, lp->nfsl_owner, lp->nfsl_openowner, &nlp, &newone,
&donelocally);
if (error || donelocally)
return (error);

View File

@ -2884,8 +2884,11 @@ nfs_advlock(struct vop_advlock_args *ap)
int ret, error = EOPNOTSUPP;
u_quad_t size;
if (NFS_ISV4(vp) && (ap->a_flags & F_POSIX)) {
cred = p->p_ucred;
if (NFS_ISV4(vp) && (ap->a_flags & (F_POSIX | F_FLOCK)) != 0) {
if ((ap->a_flags & F_POSIX) != 0)
cred = p->p_ucred;
else
cred = td->td_ucred;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (vp->v_iflag & VI_DOOMED) {
VOP_UNLOCK(vp, 0);
@ -2898,7 +2901,8 @@ nfs_advlock(struct vop_advlock_args *ap)
* RFC3530 Sec. 9.3.2.
*/
if (ap->a_op == F_UNLCK &&
nfscl_checkwritelocked(vp, ap->a_fl, cred, td))
nfscl_checkwritelocked(vp, ap->a_fl, cred, td, ap->a_id,
ap->a_flags))
(void) ncl_flush(vp, MNT_WAIT, cred, td, 1, 0);
/*
@ -2907,7 +2911,7 @@ nfs_advlock(struct vop_advlock_args *ap)
*/
do {
ret = nfsrpc_advlock(vp, np->n_size, ap->a_op,
ap->a_fl, 0, cred, td);
ap->a_fl, 0, cred, td, ap->a_id, ap->a_flags);
if (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) &&
ap->a_op == F_SETLK) {
VOP_UNLOCK(vp, 0);

View File

@ -672,7 +672,7 @@ static int
g_eli_cpu_is_disabled(int cpu)
{
#ifdef SMP
return ((hlt_cpus_mask & (1 << cpu)) != 0);
return (CPU_ISSET(cpu, &hlt_cpus_mask));
#else
return (0);
#endif

View File

@ -409,8 +409,7 @@ DB_SHOW_COMMAND(irqs, db_show_irqs)
* allocate CPUs round-robin.
*/
/* The BSP is always a valid target. */
static cpumask_t intr_cpus = (1 << 0);
static cpuset_t intr_cpus;
static int current_cpu;
/*
@ -432,7 +431,7 @@ intr_next_cpu(void)
current_cpu++;
if (current_cpu > mp_maxid)
current_cpu = 0;
} while (!(intr_cpus & (1 << current_cpu)));
} while (!CPU_ISSET(current_cpu, &intr_cpus));
mtx_unlock_spin(&icu_lock);
return (apic_id);
}
@ -463,7 +462,7 @@ intr_add_cpu(u_int cpu)
printf("INTR: Adding local APIC %d as a target\n",
cpu_apic_ids[cpu]);
intr_cpus |= (1 << cpu);
CPU_SET(cpu, &intr_cpus);
}
/*
@ -483,6 +482,9 @@ intr_shuffle_irqs(void *arg __unused)
return;
#endif
/* The BSP is always a valid target. */
CPU_SETOF(0, &intr_cpus);
/* Don't bother on UP. */
if (mp_ncpus == 1)
return;

View File

@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$");
#include "opt_isa.h"
#include "opt_kstack_pages.h"
#include "opt_maxmem.h"
#include "opt_mp_watchdog.h"
#include "opt_npx.h"
#include "opt_perfmon.h"
#include "opt_xbox.h"
@ -118,6 +119,7 @@ __FBSDID("$FreeBSD$");
#include <x86/mca.h>
#include <machine/md_var.h>
#include <machine/metadata.h>
#include <machine/mp_watchdog.h>
#include <machine/pc/bios.h>
#include <machine/pcb.h>
#include <machine/pcb_ext.h>
@ -1357,9 +1359,8 @@ cpu_idle(int busy)
CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
busy, curcpu);
#if defined(SMP) && !defined(XEN)
if (mp_grab_cpu_hlt())
return;
#if defined(MP_WATCHDOG) && !defined(XEN)
ap_watchdog(PCPU_GET(cpuid));
#endif
#ifndef XEN
/* If we are busy - try to use fast methods. */

View File

@ -29,7 +29,6 @@ __FBSDID("$FreeBSD$");
#include "opt_apic.h"
#include "opt_cpu.h"
#include "opt_kstack_pages.h"
#include "opt_mp_watchdog.h"
#include "opt_pmap.h"
#include "opt_sched.h"
#include "opt_smp.h"
@ -51,6 +50,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/cons.h> /* cngetc() */
#include <sys/cpuset.h>
#ifdef GPROF
#include <sys/gmon.h>
#endif
@ -77,7 +77,6 @@ __FBSDID("$FreeBSD$");
#include <machine/cputypes.h>
#include <x86/mca.h>
#include <machine/md_var.h>
#include <machine/mp_watchdog.h>
#include <machine/pcb.h>
#include <machine/psl.h>
#include <machine/smp.h>
@ -173,7 +172,7 @@ static u_long *ipi_hardclock_counts[MAXCPU];
* Local data and functions.
*/
static volatile cpumask_t ipi_nmi_pending;
static volatile cpuset_t ipi_nmi_pending;
/* used to hold the AP's until we are ready to release them */
static struct mtx ap_boot_mtx;
@ -208,11 +207,8 @@ static int start_all_aps(void);
static int start_ap(int apic_id);
static void release_aps(void *dummy);
static int hlt_logical_cpus;
static u_int hyperthreading_cpus; /* logical cpus sharing L1 cache */
static cpumask_t hyperthreading_cpus_mask;
static int hyperthreading_allowed = 1;
static struct sysctl_ctx_list logical_cpu_clist;
static void
mem_range_AP_init(void)
@ -289,8 +285,11 @@ topo_probe_0x4(void)
* logical processors that belong to the same core
* as BSP thus deducing number of threads per core.
*/
cpuid_count(0x04, 0, p);
max_cores = ((p[0] >> 26) & 0x3f) + 1;
if (cpu_high >= 0x4) {
cpuid_count(0x04, 0, p);
max_cores = ((p[0] >> 26) & 0x3f) + 1;
} else
max_cores = 1;
core_id_bits = mask_width(max_logical/max_cores);
if (core_id_bits < 0)
return;
@ -382,7 +381,7 @@ topo_probe(void)
if (cpu_topo_probed)
return;
logical_cpus_mask = 0;
CPU_ZERO(&logical_cpus_mask);
if (mp_ncpus <= 1)
cpu_cores = cpu_logical = 1;
else if (cpu_vendor_id == CPU_VENDOR_AMD)
@ -524,7 +523,7 @@ cpu_mp_probe(void)
* Always record BSP in CPU map so that the mbuf init code works
* correctly.
*/
all_cpus = 1;
CPU_SETOF(0, &all_cpus);
if (mp_ncpus == 0) {
/*
* No CPUs were found, so this must be a UP system. Setup
@ -659,6 +658,7 @@ cpu_mp_announce(void)
void
init_secondary(void)
{
cpuset_t tcpuset, tallcpus;
struct pcpu *pc;
vm_offset_t addr;
int gsel_tss;
@ -783,19 +783,17 @@ init_secondary(void)
CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
tcpuset = PCPU_GET(cpumask);
/* Determine if we are a logical CPU. */
/* XXX Calculation depends on cpu_logical being a power of 2, e.g. 2 */
if (cpu_logical > 1 && PCPU_GET(apic_id) % cpu_logical != 0)
logical_cpus_mask |= PCPU_GET(cpumask);
/* Determine if we are a hyperthread. */
if (hyperthreading_cpus > 1 &&
PCPU_GET(apic_id) % hyperthreading_cpus != 0)
hyperthreading_cpus_mask |= PCPU_GET(cpumask);
CPU_OR(&logical_cpus_mask, &tcpuset);
/* Build our map of 'other' CPUs. */
PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
tallcpus = all_cpus;
CPU_NAND(&tallcpus, &tcpuset);
PCPU_SET(other_cpus, tallcpus);
if (bootverbose)
lapic_dump("AP");
@ -874,7 +872,7 @@ assign_cpu_ids(void)
if (hyperthreading_cpus > 1 && i % hyperthreading_cpus != 0) {
cpu_info[i].cpu_hyperthread = 1;
#if defined(SCHED_ULE)
/*
* Don't use HT CPU if it has been disabled by a
* tunable.
@ -883,7 +881,6 @@ assign_cpu_ids(void)
cpu_info[i].cpu_disabled = 1;
continue;
}
#endif
}
/* Don't use this CPU if it has been disabled by a tunable. */
@ -893,6 +890,11 @@ assign_cpu_ids(void)
}
}
if (hyperthreading_allowed == 0 && hyperthreading_cpus > 1) {
hyperthreading_cpus = 0;
cpu_logical = 1;
}
/*
* Assign CPU IDs to local APIC IDs and disable any CPUs
* beyond MAXCPU. CPU 0 is always assigned to the BSP.
@ -932,6 +934,7 @@ assign_cpu_ids(void)
static int
start_all_aps(void)
{
cpuset_t tallcpus;
#ifndef PC98
u_char mpbiosreason;
#endif
@ -991,11 +994,13 @@ start_all_aps(void)
}
CHECK_PRINT("trace"); /* show checkpoints */
all_cpus |= (1 << cpu); /* record AP in CPU map */
CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
}
/* build our map of 'other' CPUs */
PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
tallcpus = all_cpus;
CPU_NAND(&tallcpus, PCPU_PTR(cpumask));
PCPU_SET(other_cpus, tallcpus);
/* restore the warmstart vector */
*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
@ -1191,6 +1196,30 @@ SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range_size, CTLFLAG_RW,
&ipi_masked_range_size, 0, "");
#endif /* COUNT_XINVLTLB_HITS */
/*
* Send an IPI to specified CPU handling the bitmap logic.
*/
static void
ipi_send_cpu(int cpu, u_int ipi)
{
u_int bitmap, old_pending, new_pending;
KASSERT(cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu));
if (IPI_IS_BITMAPED(ipi)) {
bitmap = 1 << ipi;
ipi = IPI_BITMAP_VECTOR;
do {
old_pending = cpu_ipi_pending[cpu];
new_pending = old_pending | bitmap;
} while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
old_pending, new_pending));
if (old_pending)
return;
}
lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
}
/*
* Flush the TLB on all other CPU's
*/
@ -1215,28 +1244,19 @@ smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
}
static void
smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
{
int ncpu, othercpus;
int cpu, ncpu, othercpus;
othercpus = mp_ncpus - 1;
if (mask == (u_int)-1) {
ncpu = othercpus;
if (ncpu < 1)
if (CPU_ISFULLSET(&mask)) {
if (othercpus < 1)
return;
} else {
mask &= ~PCPU_GET(cpumask);
if (mask == 0)
return;
ncpu = bitcount32(mask);
if (ncpu > othercpus) {
/* XXX this should be a panic offence */
printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
ncpu, othercpus);
ncpu = othercpus;
}
/* XXX should be a panic, implied by mask == 0 above */
if (ncpu < 1)
sched_pin();
CPU_NAND(&mask, PCPU_PTR(cpumask));
sched_unpin();
if (CPU_EMPTY(&mask))
return;
}
if (!(read_eflags() & PSL_I))
@ -1245,39 +1265,25 @@ smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_o
smp_tlb_addr1 = addr1;
smp_tlb_addr2 = addr2;
atomic_store_rel_int(&smp_tlb_wait, 0);
if (mask == (u_int)-1)
if (CPU_ISFULLSET(&mask)) {
ncpu = othercpus;
ipi_all_but_self(vector);
else
ipi_selected(mask, vector);
} else {
ncpu = 0;
while ((cpu = cpusetobj_ffs(&mask)) != 0) {
cpu--;
CPU_CLR(cpu, &mask);
CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu,
vector);
ipi_send_cpu(cpu, vector);
ncpu++;
}
}
while (smp_tlb_wait < ncpu)
ia32_pause();
mtx_unlock_spin(&smp_ipi_mtx);
}
/*
* Send an IPI to specified CPU handling the bitmap logic.
*/
static void
ipi_send_cpu(int cpu, u_int ipi)
{
u_int bitmap, old_pending, new_pending;
KASSERT(cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu));
if (IPI_IS_BITMAPED(ipi)) {
bitmap = 1 << ipi;
ipi = IPI_BITMAP_VECTOR;
do {
old_pending = cpu_ipi_pending[cpu];
new_pending = old_pending | bitmap;
} while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
old_pending, new_pending));
if (old_pending)
return;
}
lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
}
void
smp_cache_flush(void)
{
@ -1324,7 +1330,7 @@ smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
}
void
smp_masked_invltlb(cpumask_t mask)
smp_masked_invltlb(cpuset_t mask)
{
if (smp_started) {
@ -1336,7 +1342,7 @@ smp_masked_invltlb(cpumask_t mask)
}
void
smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
smp_masked_invlpg(cpuset_t mask, vm_offset_t addr)
{
if (smp_started) {
@ -1348,7 +1354,7 @@ smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
}
void
smp_masked_invlpg_range(cpumask_t mask, vm_offset_t addr1, vm_offset_t addr2)
smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2)
{
if (smp_started) {
@ -1401,7 +1407,7 @@ ipi_bitmap_handler(struct trapframe frame)
* send an IPI to a set of cpus.
*/
void
ipi_selected(cpumask_t cpus, u_int ipi)
ipi_selected(cpuset_t cpus, u_int ipi)
{
int cpu;
@ -1411,12 +1417,12 @@ ipi_selected(cpumask_t cpus, u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
atomic_set_int(&ipi_nmi_pending, cpus);
CPU_OR_ATOMIC(&ipi_nmi_pending, &cpus);
CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
while ((cpu = ffs(cpus)) != 0) {
while ((cpu = cpusetobj_ffs(&cpus)) != 0) {
cpu--;
cpus &= ~(1 << cpu);
CPU_CLR(cpu, &cpus);
CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
ipi_send_cpu(cpu, ipi);
}
}
@ -1434,7 +1440,7 @@ ipi_cpu(int cpu, u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
atomic_set_int(&ipi_nmi_pending, 1 << cpu);
CPU_SET_ATOMIC(cpu, &ipi_nmi_pending);
CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
ipi_send_cpu(cpu, ipi);
@ -1447,8 +1453,10 @@ void
ipi_all_but_self(u_int ipi)
{
sched_pin();
if (IPI_IS_BITMAPED(ipi)) {
ipi_selected(PCPU_GET(other_cpus), ipi);
sched_unpin();
return;
}
@ -1458,7 +1466,9 @@ ipi_all_but_self(u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
atomic_set_int(&ipi_nmi_pending, PCPU_GET(other_cpus));
CPU_OR_ATOMIC(&ipi_nmi_pending, PCPU_PTR(other_cpus));
sched_unpin();
CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
}
@ -1466,7 +1476,7 @@ ipi_all_but_self(u_int ipi)
int
ipi_nmi_handler()
{
cpumask_t cpumask;
cpuset_t cpumask;
/*
* As long as there is not a simple way to know about a NMI's
@ -1474,11 +1484,13 @@ ipi_nmi_handler()
* the global pending bitword an IPI_STOP_HARD has been issued
* and should be handled.
*/
sched_pin();
cpumask = PCPU_GET(cpumask);
if ((ipi_nmi_pending & cpumask) == 0)
sched_unpin();
if (!CPU_OVERLAP(&ipi_nmi_pending, &cpumask))
return (1);
atomic_clear_int(&ipi_nmi_pending, cpumask);
CPU_NAND_ATOMIC(&ipi_nmi_pending, &cpumask);
cpustop_handler();
return (0);
}
@ -1490,23 +1502,25 @@ ipi_nmi_handler()
void
cpustop_handler(void)
{
cpumask_t cpumask;
cpuset_t cpumask;
u_int cpu;
sched_pin();
cpu = PCPU_GET(cpuid);
cpumask = PCPU_GET(cpumask);
sched_unpin();
savectx(&stoppcbs[cpu]);
/* Indicate that we are stopped */
atomic_set_int(&stopped_cpus, cpumask);
CPU_OR_ATOMIC(&stopped_cpus, &cpumask);
/* Wait for restart */
while (!(started_cpus & cpumask))
while (!CPU_OVERLAP(&started_cpus, &cpumask))
ia32_pause();
atomic_clear_int(&started_cpus, cpumask);
atomic_clear_int(&stopped_cpus, cpumask);
CPU_NAND_ATOMIC(&started_cpus, &cpumask);
CPU_NAND_ATOMIC(&stopped_cpus, &cpumask);
if (cpu == 0 && cpustop_restartfunc != NULL) {
cpustop_restartfunc();
@ -1530,158 +1544,6 @@ release_aps(void *dummy __unused)
}
SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
static int
sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
{
cpumask_t mask;
int error;
mask = hlt_cpus_mask;
error = sysctl_handle_int(oidp, &mask, 0, req);
if (error || !req->newptr)
return (error);
if (logical_cpus_mask != 0 &&
(mask & logical_cpus_mask) == logical_cpus_mask)
hlt_logical_cpus = 1;
else
hlt_logical_cpus = 0;
if (! hyperthreading_allowed)
mask |= hyperthreading_cpus_mask;
if ((mask & all_cpus) == all_cpus)
mask &= ~(1<<0);
hlt_cpus_mask = mask;
return (error);
}
SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
0, 0, sysctl_hlt_cpus, "IU",
"Bitmap of CPUs to halt. 101 (binary) will halt CPUs 0 and 2.");
static int
sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
{
int disable, error;
disable = hlt_logical_cpus;
error = sysctl_handle_int(oidp, &disable, 0, req);
if (error || !req->newptr)
return (error);
if (disable)
hlt_cpus_mask |= logical_cpus_mask;
else
hlt_cpus_mask &= ~logical_cpus_mask;
if (! hyperthreading_allowed)
hlt_cpus_mask |= hyperthreading_cpus_mask;
if ((hlt_cpus_mask & all_cpus) == all_cpus)
hlt_cpus_mask &= ~(1<<0);
hlt_logical_cpus = disable;
return (error);
}
static int
sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS)
{
int allowed, error;
allowed = hyperthreading_allowed;
error = sysctl_handle_int(oidp, &allowed, 0, req);
if (error || !req->newptr)
return (error);
#ifdef SCHED_ULE
/*
* SCHED_ULE doesn't allow enabling/disabling HT cores at
* run-time.
*/
if (allowed != hyperthreading_allowed)
return (ENOTSUP);
return (error);
#endif
if (allowed)
hlt_cpus_mask &= ~hyperthreading_cpus_mask;
else
hlt_cpus_mask |= hyperthreading_cpus_mask;
if (logical_cpus_mask != 0 &&
(hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask)
hlt_logical_cpus = 1;
else
hlt_logical_cpus = 0;
if ((hlt_cpus_mask & all_cpus) == all_cpus)
hlt_cpus_mask &= ~(1<<0);
hyperthreading_allowed = allowed;
return (error);
}
static void
cpu_hlt_setup(void *dummy __unused)
{
if (logical_cpus_mask != 0) {
TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
&hlt_logical_cpus);
sysctl_ctx_init(&logical_cpu_clist);
SYSCTL_ADD_PROC(&logical_cpu_clist,
SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
"hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
sysctl_hlt_logical_cpus, "IU", "");
SYSCTL_ADD_UINT(&logical_cpu_clist,
SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
"logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
&logical_cpus_mask, 0, "");
if (hlt_logical_cpus)
hlt_cpus_mask |= logical_cpus_mask;
/*
* If necessary for security purposes, force
* hyperthreading off, regardless of the value
* of hlt_logical_cpus.
*/
if (hyperthreading_cpus_mask) {
SYSCTL_ADD_PROC(&logical_cpu_clist,
SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
"hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW,
0, 0, sysctl_hyperthreading_allowed, "IU", "");
if (! hyperthreading_allowed)
hlt_cpus_mask |= hyperthreading_cpus_mask;
}
}
}
SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
int
mp_grab_cpu_hlt(void)
{
cpumask_t mask;
#ifdef MP_WATCHDOG
u_int cpuid;
#endif
int retval;
mask = PCPU_GET(cpumask);
#ifdef MP_WATCHDOG
cpuid = PCPU_GET(cpuid);
ap_watchdog(cpuid);
#endif
retval = 0;
while (mask & hlt_cpus_mask) {
retval = 1;
__asm __volatile("sti; hlt" : : : "memory");
}
return (retval);
}
#ifdef COUNT_IPIS
/*
* Setup interrupt counters for IPI handlers.

View File

@ -125,6 +125,8 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#ifdef SMP
#include <sys/smp.h>
#else
#include <sys/cpuset.h>
#endif
#include <vm/vm.h>
@ -386,7 +388,7 @@ pmap_bootstrap(vm_paddr_t firstaddr)
kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT);
#endif
kernel_pmap->pm_root = NULL;
kernel_pmap->pm_active = -1; /* don't allow deactivation */
CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */
TAILQ_INIT(&kernel_pmap->pm_pvchunk);
LIST_INIT(&allpmaps);
@ -930,19 +932,20 @@ pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde)
void
pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
{
cpumask_t cpumask, other_cpus;
cpuset_t cpumask, other_cpus;
sched_pin();
if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
invlpg(va);
smp_invlpg(va);
} else {
cpumask = PCPU_GET(cpumask);
other_cpus = PCPU_GET(other_cpus);
if (pmap->pm_active & cpumask)
if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
invlpg(va);
if (pmap->pm_active & other_cpus)
smp_masked_invlpg(pmap->pm_active & other_cpus, va);
CPU_AND(&other_cpus, &pmap->pm_active);
if (!CPU_EMPTY(&other_cpus))
smp_masked_invlpg(other_cpus, va);
}
sched_unpin();
}
@ -950,23 +953,23 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
void
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
cpumask_t cpumask, other_cpus;
cpuset_t cpumask, other_cpus;
vm_offset_t addr;
sched_pin();
if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
for (addr = sva; addr < eva; addr += PAGE_SIZE)
invlpg(addr);
smp_invlpg_range(sva, eva);
} else {
cpumask = PCPU_GET(cpumask);
other_cpus = PCPU_GET(other_cpus);
if (pmap->pm_active & cpumask)
if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
for (addr = sva; addr < eva; addr += PAGE_SIZE)
invlpg(addr);
if (pmap->pm_active & other_cpus)
smp_masked_invlpg_range(pmap->pm_active & other_cpus,
sva, eva);
CPU_AND(&other_cpus, &pmap->pm_active);
if (!CPU_EMPTY(&other_cpus))
smp_masked_invlpg_range(other_cpus, sva, eva);
}
sched_unpin();
}
@ -974,19 +977,20 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
void
pmap_invalidate_all(pmap_t pmap)
{
cpumask_t cpumask, other_cpus;
cpuset_t cpumask, other_cpus;
sched_pin();
if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
invltlb();
smp_invltlb();
} else {
cpumask = PCPU_GET(cpumask);
other_cpus = PCPU_GET(other_cpus);
if (pmap->pm_active & cpumask)
if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
invltlb();
if (pmap->pm_active & other_cpus)
smp_masked_invltlb(pmap->pm_active & other_cpus);
CPU_AND(&other_cpus, &pmap->pm_active);
if (!CPU_EMPTY(&other_cpus))
smp_masked_invltlb(other_cpus);
}
sched_unpin();
}
@ -1002,8 +1006,8 @@ pmap_invalidate_cache(void)
}
struct pde_action {
cpumask_t store; /* processor that updates the PDE */
cpumask_t invalidate; /* processors that invalidate their TLB */
cpuset_t store; /* processor that updates the PDE */
cpuset_t invalidate; /* processors that invalidate their TLB */
vm_offset_t va;
pd_entry_t *pde;
pd_entry_t newpde;
@ -1016,7 +1020,10 @@ pmap_update_pde_kernel(void *arg)
pd_entry_t *pde;
pmap_t pmap;
if (act->store == PCPU_GET(cpumask))
sched_pin();
if (!CPU_CMP(&act->store, PCPU_PTR(cpumask))) {
sched_unpin();
/*
* Elsewhere, this operation requires allpmaps_lock for
* synchronization. Here, it does not because it is being
@ -1026,6 +1033,8 @@ pmap_update_pde_kernel(void *arg)
pde = pmap_pde(pmap, act->va);
pde_store(pde, act->newpde);
}
} else
sched_unpin();
}
static void
@ -1033,8 +1042,12 @@ pmap_update_pde_user(void *arg)
{
struct pde_action *act = arg;
if (act->store == PCPU_GET(cpumask))
sched_pin();
if (!CPU_CMP(&act->store, PCPU_PTR(cpumask))) {
sched_unpin();
pde_store(act->pde, act->newpde);
} else
sched_unpin();
}
static void
@ -1042,8 +1055,12 @@ pmap_update_pde_teardown(void *arg)
{
struct pde_action *act = arg;
if ((act->invalidate & PCPU_GET(cpumask)) != 0)
sched_pin();
if (CPU_OVERLAP(&act->invalidate, PCPU_PTR(cpumask))) {
sched_unpin();
pmap_update_pde_invalidate(act->va, act->newpde);
} else
sched_unpin();
}
/*
@ -1058,21 +1075,23 @@ static void
pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
{
struct pde_action act;
cpumask_t active, cpumask;
cpuset_t active, cpumask, other_cpus;
sched_pin();
cpumask = PCPU_GET(cpumask);
other_cpus = PCPU_GET(other_cpus);
if (pmap == kernel_pmap)
active = all_cpus;
else
active = pmap->pm_active;
if ((active & PCPU_GET(other_cpus)) != 0) {
if (CPU_OVERLAP(&active, &other_cpus)) {
act.store = cpumask;
act.invalidate = active;
act.va = va;
act.pde = pde;
act.newpde = newpde;
smp_rendezvous_cpus(cpumask | active,
CPU_OR(&cpumask, &active);
smp_rendezvous_cpus(cpumask,
smp_no_rendevous_barrier, pmap == kernel_pmap ?
pmap_update_pde_kernel : pmap_update_pde_user,
pmap_update_pde_teardown, &act);
@ -1081,7 +1100,7 @@ pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
pmap_kenter_pde(va, newpde);
else
pde_store(pde, newpde);
if ((active & cpumask) != 0)
if (CPU_OVERLAP(&active, &cpumask))
pmap_update_pde_invalidate(va, newpde);
}
sched_unpin();
@ -1095,7 +1114,7 @@ PMAP_INLINE void
pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
{
if (pmap == kernel_pmap || pmap->pm_active)
if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
invlpg(va);
}
@ -1104,7 +1123,7 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
vm_offset_t addr;
if (pmap == kernel_pmap || pmap->pm_active)
if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
for (addr = sva; addr < eva; addr += PAGE_SIZE)
invlpg(addr);
}
@ -1113,7 +1132,7 @@ PMAP_INLINE void
pmap_invalidate_all(pmap_t pmap)
{
if (pmap == kernel_pmap || pmap->pm_active)
if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
invltlb();
}
@ -1132,7 +1151,7 @@ pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
pmap_kenter_pde(va, newpde);
else
pde_store(pde, newpde);
if (pmap == kernel_pmap || pmap->pm_active)
if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
pmap_update_pde_invalidate(va, newpde);
}
#endif /* !SMP */
@ -1689,7 +1708,7 @@ pmap_pinit0(pmap_t pmap)
pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT);
#endif
pmap->pm_root = NULL;
pmap->pm_active = 0;
CPU_ZERO(&pmap->pm_active);
PCPU_SET(curpmap, pmap);
TAILQ_INIT(&pmap->pm_pvchunk);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
@ -1770,7 +1789,7 @@ pmap_pinit(pmap_t pmap)
#endif
}
pmap->pm_active = 0;
CPU_ZERO(&pmap->pm_active);
TAILQ_INIT(&pmap->pm_pvchunk);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
@ -1886,7 +1905,7 @@ pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
* Deal with a SMP shootdown of other users of the pmap that we are
* trying to dispose of. This can be a bit hairy.
*/
static cpumask_t *lazymask;
static cpuset_t *lazymask;
static u_int lazyptd;
static volatile u_int lazywait;
@ -1895,36 +1914,42 @@ void pmap_lazyfix_action(void);
void
pmap_lazyfix_action(void)
{
cpumask_t mymask = PCPU_GET(cpumask);
#ifdef COUNT_IPIS
(*ipi_lazypmap_counts[PCPU_GET(cpuid)])++;
#endif
if (rcr3() == lazyptd)
load_cr3(PCPU_GET(curpcb)->pcb_cr3);
atomic_clear_int(lazymask, mymask);
CPU_CLR_ATOMIC(PCPU_GET(cpuid), lazymask);
atomic_store_rel_int(&lazywait, 1);
}
static void
pmap_lazyfix_self(cpumask_t mymask)
pmap_lazyfix_self(cpuset_t mymask)
{
if (rcr3() == lazyptd)
load_cr3(PCPU_GET(curpcb)->pcb_cr3);
atomic_clear_int(lazymask, mymask);
CPU_NAND_ATOMIC(lazymask, &mymask);
}
static void
pmap_lazyfix(pmap_t pmap)
{
cpumask_t mymask, mask;
cpuset_t mymask, mask;
u_int spins;
int lsb;
while ((mask = pmap->pm_active) != 0) {
mask = pmap->pm_active;
while (!CPU_EMPTY(&mask)) {
spins = 50000000;
mask = mask & -mask; /* Find least significant set bit */
/* Find least significant set bit. */
lsb = cpusetobj_ffs(&mask);
MPASS(lsb != 0);
lsb--;
CPU_SETOF(lsb, &mask);
mtx_lock_spin(&smp_ipi_mtx);
#ifdef PAE
lazyptd = vtophys(pmap->pm_pdpt);
@ -1932,7 +1957,7 @@ pmap_lazyfix(pmap_t pmap)
lazyptd = vtophys(pmap->pm_pdir);
#endif
mymask = PCPU_GET(cpumask);
if (mask == mymask) {
if (!CPU_CMP(&mask, &mymask)) {
lazymask = &pmap->pm_active;
pmap_lazyfix_self(mymask);
} else {
@ -1949,6 +1974,7 @@ pmap_lazyfix(pmap_t pmap)
mtx_unlock_spin(&smp_ipi_mtx);
if (spins == 0)
printf("pmap_lazyfix: spun for 50000000\n");
mask = pmap->pm_active;
}
}
@ -1968,7 +1994,7 @@ pmap_lazyfix(pmap_t pmap)
cr3 = vtophys(pmap->pm_pdir);
if (cr3 == rcr3()) {
load_cr3(PCPU_GET(curpcb)->pcb_cr3);
pmap->pm_active &= ~(PCPU_GET(cpumask));
CPU_CLR(PCPU_GET(cpuid), &pmap->pm_active);
}
}
#endif /* SMP */
@ -5078,11 +5104,11 @@ pmap_activate(struct thread *td)
pmap = vmspace_pmap(td->td_proc->p_vmspace);
oldpmap = PCPU_GET(curpmap);
#if defined(SMP)
atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
CPU_NAND_ATOMIC(&oldpmap->pm_active, PCPU_PTR(cpumask));
CPU_OR_ATOMIC(&pmap->pm_active, PCPU_PTR(cpumask));
#else
oldpmap->pm_active &= ~1;
pmap->pm_active |= 1;
CPU_NAND(&oldpmap->pm_active, PCPU_PTR(cpumask));
CPU_OR(&pmap->pm_active, PCPU_PTR(cpumask));
#endif
#ifdef PAE
cr3 = vtophys(pmap->pm_pdpt);

View File

@ -573,11 +573,13 @@ kvtop(void *addr)
static void
cpu_reset_proxy()
{
cpuset_t tcrp;
cpu_reset_proxy_active = 1;
while (cpu_reset_proxy_active == 1)
; /* Wait for other cpu to see that we've started */
stop_cpus((1<<cpu_reset_proxyid));
CPU_SETOF(cpu_reset_proxyid, &tcrp);
stop_cpus(tcrp);
printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
DELAY(1000000);
cpu_reset_real();
@ -596,25 +598,29 @@ cpu_reset()
#endif
#ifdef SMP
cpumask_t map;
cpuset_t map;
u_int cnt;
if (smp_active) {
map = PCPU_GET(other_cpus) & ~stopped_cpus;
if (map != 0) {
sched_pin();
map = PCPU_GET(other_cpus);
CPU_NAND(&map, &stopped_cpus);
if (!CPU_EMPTY(&map)) {
printf("cpu_reset: Stopping other CPUs\n");
stop_cpus(map);
}
if (PCPU_GET(cpuid) != 0) {
cpu_reset_proxyid = PCPU_GET(cpuid);
sched_unpin();
cpustop_restartfunc = cpu_reset_proxy;
cpu_reset_proxy_active = 0;
printf("cpu_reset: Restarting BSP\n");
/* Restart CPU #0. */
/* XXX: restart_cpus(1 << 0); */
atomic_store_rel_int(&started_cpus, (1 << 0));
CPU_SETOF(0, &started_cpus);
wmb();
cnt = 0;
while (cpu_reset_proxy_active == 0 && cnt < 10000000)
@ -626,7 +632,8 @@ cpu_reset()
while (1);
/* NOTREACHED */
}
} else
sched_unpin();
DELAY(1000000);
}
@ -795,7 +802,7 @@ sf_buf_alloc(struct vm_page *m, int flags)
struct sf_head *hash_list;
struct sf_buf *sf;
#ifdef SMP
cpumask_t cpumask, other_cpus;
cpuset_t cpumask, other_cpus;
#endif
int error;
@ -867,22 +874,23 @@ sf_buf_alloc(struct vm_page *m, int flags)
*/
#ifdef SMP
if ((opte & (PG_V | PG_A)) == (PG_V | PG_A))
sf->cpumask = 0;
CPU_ZERO(&sf->cpumask);
shootdown:
sched_pin();
cpumask = PCPU_GET(cpumask);
if ((sf->cpumask & cpumask) == 0) {
sf->cpumask |= cpumask;
if (!CPU_OVERLAP(&cpumask, &sf->cpumask)) {
CPU_OR(&sf->cpumask, &cpumask);
invlpg(sf->kva);
}
if ((flags & SFB_CPUPRIVATE) == 0) {
other_cpus = PCPU_GET(other_cpus) & ~sf->cpumask;
if (other_cpus != 0) {
sf->cpumask |= other_cpus;
other_cpus = PCPU_GET(other_cpus);
CPU_NAND(&other_cpus, &sf->cpumask);
if (!CPU_EMPTY(&other_cpus)) {
CPU_OR(&sf->cpumask, &other_cpus);
smp_masked_invlpg(other_cpus, sf->kva);
}
}
sched_unpin();
sched_unpin();
#else
if ((opte & (PG_V | PG_A)) == (PG_V | PG_A))
pmap_invalidate_page(kernel_pmap, sf->kva);

View File

@ -69,7 +69,6 @@ typedef unsigned long long __uint64_t;
* Standard type definitions.
*/
typedef unsigned long __clock_t; /* clock()... */
typedef unsigned int __cpumask_t;
typedef __int32_t __critical_t;
typedef long double __double_t;
typedef long double __float_t;

View File

@ -155,6 +155,7 @@
#ifndef LOCORE
#include <sys/queue.h>
#include <sys/_cpuset.h>
#include <sys/_lock.h>
#include <sys/_mutex.h>
@ -433,7 +434,7 @@ struct pmap {
struct mtx pm_mtx;
pd_entry_t *pm_pdir; /* KVA of page directory */
TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
cpumask_t pm_active; /* active on cpus */
cpuset_t pm_active; /* active on cpus */
struct pmap_statistics pm_stats; /* pmap statistics */
LIST_ENTRY(pmap) pm_list; /* List of all pmaps */
#ifdef PAE

View File

@ -29,6 +29,7 @@
#ifndef _MACHINE_SF_BUF_H_
#define _MACHINE_SF_BUF_H_
#include <sys/_cpuset.h>
#include <sys/queue.h>
struct vm_page;
@ -40,7 +41,7 @@ struct sf_buf {
vm_offset_t kva; /* va of mapping */
int ref_count; /* usage of this mapping */
#ifdef SMP
cpumask_t cpumask; /* cpus on which mapping is valid */
cpuset_t cpumask; /* cpus on which mapping is valid */
#endif
};

View File

@ -66,17 +66,16 @@ void ipi_bitmap_handler(struct trapframe frame);
#endif
void ipi_cpu(int cpu, u_int ipi);
int ipi_nmi_handler(void);
void ipi_selected(cpumask_t cpus, u_int ipi);
void ipi_selected(cpuset_t cpus, u_int ipi);
u_int mp_bootaddress(u_int);
int mp_grab_cpu_hlt(void);
void smp_cache_flush(void);
void smp_invlpg(vm_offset_t addr);
void smp_masked_invlpg(cpumask_t mask, vm_offset_t addr);
void smp_masked_invlpg(cpuset_t mask, vm_offset_t addr);
void smp_invlpg_range(vm_offset_t startva, vm_offset_t endva);
void smp_masked_invlpg_range(cpumask_t mask, vm_offset_t startva,
void smp_masked_invlpg_range(cpuset_t mask, vm_offset_t startva,
vm_offset_t endva);
void smp_invltlb(void);
void smp_masked_invltlb(cpumask_t mask);
void smp_masked_invltlb(cpuset_t mask);
#ifdef XEN
void ipi_to_irq_init(void);

View File

@ -52,6 +52,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/cons.h> /* cngetc() */
#include <sys/cpuset.h>
#ifdef GPROF
#include <sys/gmon.h>
#endif
@ -116,7 +117,7 @@ volatile int smp_tlb_wait;
typedef void call_data_func_t(uintptr_t , uintptr_t);
static u_int logical_cpus;
static volatile cpumask_t ipi_nmi_pending;
static volatile cpuset_t ipi_nmi_pending;
/* used to hold the AP's until we are ready to release them */
static struct mtx ap_boot_mtx;
@ -149,7 +150,7 @@ static int start_ap(int apic_id);
static void release_aps(void *dummy);
static u_int hyperthreading_cpus;
static cpumask_t hyperthreading_cpus_mask;
static cpuset_t hyperthreading_cpus_mask;
extern void Xhypervisor_callback(void);
extern void failsafe_callback(void);
@ -239,7 +240,7 @@ cpu_mp_probe(void)
* Always record BSP in CPU map so that the mbuf init code works
* correctly.
*/
all_cpus = 1;
CPU_SETOF(0, &all_cpus);
if (mp_ncpus == 0) {
/*
* No CPUs were found, so this must be a UP system. Setup
@ -293,7 +294,8 @@ cpu_mp_start(void)
start_all_aps();
/* Setup the initial logical CPUs info. */
logical_cpus = logical_cpus_mask = 0;
logical_cpus = 0;
CPU_ZERO(&logical_cpus_mask);
if (cpu_feature & CPUID_HTT)
logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
@ -521,6 +523,7 @@ xen_smp_intr_init_cpus(void *unused)
void
init_secondary(void)
{
cpuset_t tcpuset, tallcpus;
vm_offset_t addr;
int gsel_tss;
@ -600,18 +603,21 @@ init_secondary(void)
CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
tcpuset = PCPU_GET(cpumask);
/* Determine if we are a logical CPU. */
if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
logical_cpus_mask |= PCPU_GET(cpumask);
CPU_OR(&logical_cpus_mask, &tcpuset);
/* Determine if we are a hyperthread. */
if (hyperthreading_cpus > 1 &&
PCPU_GET(apic_id) % hyperthreading_cpus != 0)
hyperthreading_cpus_mask |= PCPU_GET(cpumask);
CPU_OR(&hyperthreading_cpus_mask, &tcpuset);
/* Build our map of 'other' CPUs. */
PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
tallcpus = all_cpus;
CPU_NAND(&tallcpus, &tcpuset);
PCPU_SET(other_cpus, tallcpus);
#if 0
if (bootverbose)
lapic_dump("AP");
@ -725,6 +731,7 @@ assign_cpu_ids(void)
int
start_all_aps(void)
{
cpuset_t tallcpus;
int x,apic_id, cpu;
struct pcpu *pc;
@ -778,12 +785,14 @@ start_all_aps(void)
panic("bye-bye");
}
all_cpus |= (1 << cpu); /* record AP in CPU map */
CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
}
/* build our map of 'other' CPUs */
PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
tallcpus = all_cpus;
CPU_NAND(&tallcpus, PCPU_PTR(cpumask));
PCPU_SET(other_cpus, tallcpus);
pmap_invalidate_range(kernel_pmap, 0, NKPT * NBPDR - 1);
@ -1012,29 +1021,20 @@ smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
}
static void
smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
{
int ncpu, othercpus;
int cpu, ncpu, othercpus;
struct _call_data data;
othercpus = mp_ncpus - 1;
if (mask == (u_int)-1) {
ncpu = othercpus;
if (ncpu < 1)
if (CPU_ISFULLSET(&mask)) {
if (othercpus < 1)
return;
} else {
mask &= ~PCPU_GET(cpumask);
if (mask == 0)
return;
ncpu = bitcount32(mask);
if (ncpu > othercpus) {
/* XXX this should be a panic offence */
printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
ncpu, othercpus);
ncpu = othercpus;
}
/* XXX should be a panic, implied by mask == 0 above */
if (ncpu < 1)
critical_enter();
CPU_NAND(&mask, PCPU_PTR(cpumask));
critical_exit();
if (CPU_EMPTY(&mask))
return;
}
if (!(read_eflags() & PSL_I))
@ -1046,10 +1046,20 @@ smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_o
call_data->arg1 = addr1;
call_data->arg2 = addr2;
atomic_store_rel_int(&smp_tlb_wait, 0);
if (mask == (u_int)-1)
if (CPU_ISFULLSET(&mask)) {
ncpu = othercpus;
ipi_all_but_self(vector);
else
ipi_selected(mask, vector);
} else {
ncpu = 0;
while ((cpu = cpusetobj_ffs(&mask)) != 0) {
cpu--;
CPU_CLR(cpu, &mask);
CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu,
vector);
ipi_send_cpu(cpu, vector);
ncpu++;
}
}
while (smp_tlb_wait < ncpu)
ia32_pause();
call_data = NULL;
@ -1092,7 +1102,7 @@ smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
}
void
smp_masked_invltlb(cpumask_t mask)
smp_masked_invltlb(cpuset_t mask)
{
if (smp_started) {
@ -1101,7 +1111,7 @@ smp_masked_invltlb(cpumask_t mask)
}
void
smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
smp_masked_invlpg(cpuset_t mask, vm_offset_t addr)
{
if (smp_started) {
@ -1110,7 +1120,7 @@ smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
}
void
smp_masked_invlpg_range(cpumask_t mask, vm_offset_t addr1, vm_offset_t addr2)
smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2)
{
if (smp_started) {
@ -1122,7 +1132,7 @@ smp_masked_invlpg_range(cpumask_t mask, vm_offset_t addr1, vm_offset_t addr2)
* send an IPI to a set of cpus.
*/
void
ipi_selected(cpumask_t cpus, u_int ipi)
ipi_selected(cpuset_t cpus, u_int ipi)
{
int cpu;
@ -1132,11 +1142,11 @@ ipi_selected(cpumask_t cpus, u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
atomic_set_int(&ipi_nmi_pending, cpus);
CPU_OR_ATOMIC(&ipi_nmi_pending, &cpus);
while ((cpu = ffs(cpus)) != 0) {
while ((cpu = cpusetobj_ffs(&cpus)) != 0) {
cpu--;
cpus &= ~(1 << cpu);
CPU_CLR(cpu, &cpus);
CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
ipi_send_cpu(cpu, ipi);
}
@ -1155,7 +1165,7 @@ ipi_cpu(int cpu, u_int ipi)
* Set the mask of receiving CPUs for this purpose.
*/
if (ipi == IPI_STOP_HARD)
atomic_set_int(&ipi_nmi_pending, 1 << cpu);
CPU_SET_ATOMIC(cpu, &ipi_nmi_pending);
CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
ipi_send_cpu(cpu, ipi);
@ -1167,23 +1177,27 @@ ipi_cpu(int cpu, u_int ipi)
void
ipi_all_but_self(u_int ipi)
{
cpuset_t other_cpus;
/*
* IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
* of help in order to understand what is the source.
* Set the mask of receiving CPUs for this purpose.
*/
sched_pin();
other_cpus = PCPU_GET(other_cpus);
sched_unpin();
if (ipi == IPI_STOP_HARD)
atomic_set_int(&ipi_nmi_pending, PCPU_GET(other_cpus));
CPU_OR_ATOMIC(&ipi_nmi_pending, &other_cpus);
CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
ipi_selected(PCPU_GET(other_cpus), ipi);
ipi_selected(other_cpus, ipi);
}
int
ipi_nmi_handler()
{
cpumask_t cpumask;
cpuset_t cpumask;
/*
* As long as there is not a simple way to know about a NMI's
@ -1191,11 +1205,13 @@ ipi_nmi_handler()
* the global pending bitword an IPI_STOP_HARD has been issued
* and should be handled.
*/
sched_pin();
cpumask = PCPU_GET(cpumask);
if ((ipi_nmi_pending & cpumask) == 0)
sched_unpin();
if (!CPU_OVERLAP(&ipi_nmi_pending, &cpumask))
return (1);
atomic_clear_int(&ipi_nmi_pending, cpumask);
CPU_NAND_ATOMIC(&ipi_nmi_pending, &cpumask);
cpustop_handler();
return (0);
}
@ -1207,20 +1223,25 @@ ipi_nmi_handler()
void
cpustop_handler(void)
{
int cpu = PCPU_GET(cpuid);
int cpumask = PCPU_GET(cpumask);
cpuset_t cpumask;
int cpu;
sched_pin();
cpumask = PCPU_GET(cpumask);
cpu = PCPU_GET(cpuid);
sched_unpin();
savectx(&stoppcbs[cpu]);
/* Indicate that we are stopped */
atomic_set_int(&stopped_cpus, cpumask);
CPU_OR_ATOMIC(&stopped_cpus, &cpumask);
/* Wait for restart */
while (!(started_cpus & cpumask))
while (!CPU_OVERLAP(&started_cpus, &cpumask))
ia32_pause();
atomic_clear_int(&started_cpus, cpumask);
atomic_clear_int(&stopped_cpus, cpumask);
CPU_NAND_ATOMIC(&started_cpus, &cpumask);
CPU_NAND_ATOMIC(&stopped_cpus, &cpumask);
if (cpu == 0 && cpustop_restartfunc != NULL) {
cpustop_restartfunc();

View File

@ -422,7 +422,7 @@ pmap_bootstrap(vm_paddr_t firstaddr)
#ifdef PAE
kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT);
#endif
kernel_pmap->pm_active = -1; /* don't allow deactivation */
CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */
TAILQ_INIT(&kernel_pmap->pm_pvchunk);
LIST_INIT(&allpmaps);
mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN);
@ -802,22 +802,23 @@ pmap_cache_bits(int mode, boolean_t is_pde)
void
pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
{
cpumask_t cpumask, other_cpus;
cpuset_t cpumask, other_cpus;
CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x",
pmap, va);
sched_pin();
if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
invlpg(va);
smp_invlpg(va);
} else {
cpumask = PCPU_GET(cpumask);
other_cpus = PCPU_GET(other_cpus);
if (pmap->pm_active & cpumask)
if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
invlpg(va);
if (pmap->pm_active & other_cpus)
smp_masked_invlpg(pmap->pm_active & other_cpus, va);
CPU_AND(&other_cpus, &pmap->pm_active);
if (!CPU_EMPTY(&other_cpus))
smp_masked_invlpg(other_cpus, va);
}
sched_unpin();
PT_UPDATES_FLUSH();
@ -826,26 +827,26 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
void
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
cpumask_t cpumask, other_cpus;
cpuset_t cpumask, other_cpus;
vm_offset_t addr;
CTR3(KTR_PMAP, "pmap_invalidate_page: pmap=%p eva=0x%x sva=0x%x",
pmap, sva, eva);
sched_pin();
if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
for (addr = sva; addr < eva; addr += PAGE_SIZE)
invlpg(addr);
smp_invlpg_range(sva, eva);
} else {
cpumask = PCPU_GET(cpumask);
other_cpus = PCPU_GET(other_cpus);
if (pmap->pm_active & cpumask)
if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
for (addr = sva; addr < eva; addr += PAGE_SIZE)
invlpg(addr);
if (pmap->pm_active & other_cpus)
smp_masked_invlpg_range(pmap->pm_active & other_cpus,
sva, eva);
CPU_AND(&other_cpus, &pmap->pm_active);
if (!CPU_EMPTY(&other_cpus))
smp_masked_invlpg_range(other_cpus, sva, eva);
}
sched_unpin();
PT_UPDATES_FLUSH();
@ -854,21 +855,22 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
void
pmap_invalidate_all(pmap_t pmap)
{
cpumask_t cpumask, other_cpus;
cpuset_t cpumask, other_cpus;
CTR1(KTR_PMAP, "pmap_invalidate_page: pmap=%p", pmap);
sched_pin();
if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
invltlb();
smp_invltlb();
} else {
cpumask = PCPU_GET(cpumask);
other_cpus = PCPU_GET(other_cpus);
if (pmap->pm_active & cpumask)
if (CPU_OVERLAP(&pmap->pm_active, &cpumask))
invltlb();
if (pmap->pm_active & other_cpus)
smp_masked_invltlb(pmap->pm_active & other_cpus);
CPU_AND(&other_cpus, &pmap->pm_active);
if (!CPU_EMPTY(&other_cpus))
smp_masked_invltlb(other_cpus);
}
sched_unpin();
}
@ -893,7 +895,7 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x",
pmap, va);
if (pmap == kernel_pmap || pmap->pm_active)
if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
invlpg(va);
PT_UPDATES_FLUSH();
}
@ -907,7 +909,7 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
CTR3(KTR_PMAP, "pmap_invalidate_range: pmap=%p sva=0x%x eva=0x%x",
pmap, sva, eva);
if (pmap == kernel_pmap || pmap->pm_active)
if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
for (addr = sva; addr < eva; addr += PAGE_SIZE)
invlpg(addr);
PT_UPDATES_FLUSH();
@ -919,7 +921,7 @@ pmap_invalidate_all(pmap_t pmap)
CTR1(KTR_PMAP, "pmap_invalidate_all: pmap=%p", pmap);
if (pmap == kernel_pmap || pmap->pm_active)
if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
invltlb();
}
@ -1449,7 +1451,7 @@ pmap_pinit0(pmap_t pmap)
#ifdef PAE
pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT);
#endif
pmap->pm_active = 0;
CPU_ZERO(&pmap->pm_active);
PCPU_SET(curpmap, pmap);
TAILQ_INIT(&pmap->pm_pvchunk);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
@ -1556,7 +1558,7 @@ pmap_pinit(pmap_t pmap)
}
xen_flush_queue();
vm_page_unlock_queues();
pmap->pm_active = 0;
CPU_ZERO(&pmap->pm_active);
TAILQ_INIT(&pmap->pm_pvchunk);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
@ -1686,7 +1688,7 @@ pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
* Deal with a SMP shootdown of other users of the pmap that we are
* trying to dispose of. This can be a bit hairy.
*/
static cpumask_t *lazymask;
static cpuset_t *lazymask;
static u_int lazyptd;
static volatile u_int lazywait;
@ -1695,36 +1697,42 @@ void pmap_lazyfix_action(void);
void
pmap_lazyfix_action(void)
{
cpumask_t mymask = PCPU_GET(cpumask);
#ifdef COUNT_IPIS
(*ipi_lazypmap_counts[PCPU_GET(cpuid)])++;
#endif
if (rcr3() == lazyptd)
load_cr3(PCPU_GET(curpcb)->pcb_cr3);
atomic_clear_int(lazymask, mymask);
CPU_CLR_ATOMIC(PCPU_GET(cpuid), lazymask);
atomic_store_rel_int(&lazywait, 1);
}
static void
pmap_lazyfix_self(cpumask_t mymask)
pmap_lazyfix_self(cpuset_t mymask)
{
if (rcr3() == lazyptd)
load_cr3(PCPU_GET(curpcb)->pcb_cr3);
atomic_clear_int(lazymask, mymask);
CPU_NAND_ATOMIC(lazymask, &mymask);
}
static void
pmap_lazyfix(pmap_t pmap)
{
cpumask_t mymask, mask;
cpuset_t mymask, mask;
u_int spins;
int lsb;
while ((mask = pmap->pm_active) != 0) {
mask = pmap->pm_active;
while (!CPU_EMPTY(&mask)) {
spins = 50000000;
mask = mask & -mask; /* Find least significant set bit */
/* Find least significant set bit. */
lsb = cpusetobj_ffs(&mask);
MPASS(lsb != 0);
lsb--;
CPU_SETOF(lsb, &mask);
mtx_lock_spin(&smp_ipi_mtx);
#ifdef PAE
lazyptd = vtophys(pmap->pm_pdpt);
@ -1732,7 +1740,7 @@ pmap_lazyfix(pmap_t pmap)
lazyptd = vtophys(pmap->pm_pdir);
#endif
mymask = PCPU_GET(cpumask);
if (mask == mymask) {
if (!CPU_CMP(&mask, &mymask)) {
lazymask = &pmap->pm_active;
pmap_lazyfix_self(mymask);
} else {
@ -1749,6 +1757,7 @@ pmap_lazyfix(pmap_t pmap)
mtx_unlock_spin(&smp_ipi_mtx);
if (spins == 0)
printf("pmap_lazyfix: spun for 50000000\n");
mask = pmap->pm_active;
}
}
@ -1768,7 +1777,7 @@ pmap_lazyfix(pmap_t pmap)
cr3 = vtophys(pmap->pm_pdir);
if (cr3 == rcr3()) {
load_cr3(PCPU_GET(curpcb)->pcb_cr3);
pmap->pm_active &= ~(PCPU_GET(cpumask));
CPU_CLR(PCPU_GET(cpuid), &pmap->pm_active);
}
}
#endif /* SMP */
@ -4123,11 +4132,11 @@ pmap_activate(struct thread *td)
pmap = vmspace_pmap(td->td_proc->p_vmspace);
oldpmap = PCPU_GET(curpmap);
#if defined(SMP)
atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
CPU_NAND_ATOMIC(&oldpmap->pm_active, PCPU_PTR(cpumask));
CPU_OR_ATOMIC(&pmap->pm_active, PCPU_PTR(cpumask));
#else
oldpmap->pm_active &= ~1;
pmap->pm_active |= 1;
CPU_NAND(&oldpmap->pm_active, PCPU_PTR(cpumask));
CPU_OR(&pmap->pm_active, PCPU_PTR(cpumask));
#endif
#ifdef PAE
cr3 = vtophys(pmap->pm_pdpt);

View File

@ -56,7 +56,14 @@ acpi_machdep_quirks(int *quirks)
void
acpi_cpu_c1()
{
#ifdef INVARIANTS
register_t ie;
ie = intr_disable();
KASSERT(ie == 0, ("%s called with interrupts enabled\n", __func__));
#endif
ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0);
ia64_enable_intr();
}
void *

View File

@ -411,12 +411,34 @@ cpu_halt()
void
cpu_idle(int busy)
{
struct ia64_pal_result res;
register_t ie;
if (cpu_idle_hook != NULL)
#if 0
if (!busy) {
critical_enter();
cpu_idleclock();
}
#endif
ie = intr_disable();
KASSERT(ie != 0, ("%s called with interrupts disabled\n", __func__));
if (sched_runnable())
ia64_enable_intr();
else if (cpu_idle_hook != NULL) {
(*cpu_idle_hook)();
else
res = ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0);
/* The hook must enable interrupts! */
} else {
ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0);
ia64_enable_intr();
}
#if 0
if (!busy) {
cpu_activeclock();
critical_exit();
}
#endif
}
int
@ -644,9 +666,12 @@ calculate_frequencies(void)
{
struct ia64_sal_result sal;
struct ia64_pal_result pal;
register_t ie;
ie = intr_disable();
sal = ia64_sal_entry(SAL_FREQ_BASE, 0, 0, 0, 0, 0, 0, 0);
pal = ia64_call_pal_static(PAL_FREQ_RATIOS, 0, 0, 0);
intr_restore(ie);
if (sal.sal_status == 0 && pal.pal_status == 0) {
if (bootverbose) {
@ -761,6 +786,8 @@ ia64_init(void)
ia64_sal_init();
calculate_frequencies();
set_cputicker(ia64_get_itc, (u_long)itc_freq * 1000000, 0);
/*
* Setup the PCPU data for the bootstrap processor. It is needed
* by printf(). Also, since printf() has critical sections, we

View File

@ -139,18 +139,18 @@ ia64_ih_rndzvs(struct thread *td, u_int xiv, struct trapframe *tf)
static u_int
ia64_ih_stop(struct thread *td, u_int xiv, struct trapframe *tf)
{
cpumask_t mybit;
cpuset_t mybit;
PCPU_INC(md.stats.pcs_nstops);
mybit = PCPU_GET(cpumask);
savectx(PCPU_PTR(md.pcb));
atomic_set_int(&stopped_cpus, mybit);
while ((started_cpus & mybit) == 0)
CPU_OR_ATOMIC(&stopped_cpus, &mybit);
while (!CPU_OVERLAP(&started_cpus, &mybit))
cpu_spinwait();
atomic_clear_int(&started_cpus, mybit);
atomic_clear_int(&stopped_cpus, mybit);
CPU_NAND_ATOMIC(&started_cpus, &mybit);
CPU_NAND_ATOMIC(&stopped_cpus, &mybit);
return (0);
}
@ -286,7 +286,7 @@ cpu_mp_add(u_int acpi_id, u_int id, u_int eid)
cpuid = (IA64_LID_GET_SAPIC_ID(ia64_get_lid()) == sapic_id)
? 0 : smp_cpus++;
KASSERT((all_cpus & (1UL << cpuid)) == 0,
KASSERT(!CPU_ISSET(cpuid, &all_cpus),
("%s: cpu%d already in CPU map", __func__, acpi_id));
if (cpuid != 0) {
@ -300,7 +300,7 @@ cpu_mp_add(u_int acpi_id, u_int id, u_int eid)
pc->pc_acpi_id = acpi_id;
pc->pc_md.lid = IA64_LID_SET_SAPIC_ID(sapic_id);
all_cpus |= (1UL << pc->pc_cpuid);
CPU_SET(pc->pc_cpuid, &all_cpus);
}
void
@ -359,7 +359,8 @@ cpu_mp_start()
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
pc->pc_md.current_pmap = kernel_pmap;
pc->pc_other_cpus = all_cpus & ~pc->pc_cpumask;
pc->pc_other_cpus = all_cpus;
CPU_NAND(&pc->pc_other_cpus, &pc->pc_cpumask);
/* The BSP is obviously running already. */
if (pc->pc_cpuid == 0) {
pc->pc_md.awake = 1;
@ -458,12 +459,12 @@ cpu_mp_unleash(void *dummy)
* send an IPI to a set of cpus.
*/
void
ipi_selected(cpumask_t cpus, int ipi)
ipi_selected(cpuset_t cpus, int ipi)
{
struct pcpu *pc;
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
if (cpus & pc->pc_cpumask)
if (CPU_OVERLAP(&cpus, &pc->pc_cpumask))
ipi_send(pc, ipi);
}
}

Some files were not shown because too many files have changed in this diff Show More