Merge ^/head r287527 through r287679.

This commit is contained in:
Dimitry Andric 2015-09-11 17:20:03 +00:00
commit 0e1e5c22c2
171 changed files with 4250 additions and 3944 deletions

View File

@ -12,17 +12,24 @@
name="background_fsck"
rcvar="background_fsck"
start_cmd="bgfsck_start"
start_precmd="bgfsck_start_precmd"
stop_cmd=":"
bgfsck_start_precmd()
{
if [ $($ID -u) != 0 ]; then
err 1 "Must be root."
fi
}
bgfsck_start()
{
if [ -z "${rc_force}" ]; then
background_fsck_delay=${background_fsck_delay:-0}
else
: ${background_fsck_delay=0}
if [ -n "${rc_force}" ]; then
background_fsck_delay=0
fi
if [ ${background_fsck_delay} -lt 0 ]; then
echo "Background file system checks delayed indefinitely"
warn "Background file system checks delayed indefinitely"
return 0
fi

View File

@ -419,7 +419,7 @@ jail_status()
jail_start()
{
local _j _jid _jl
local _j _jid _jl _id _name
if [ $# = 0 ]; then
return
@ -432,10 +432,9 @@ jail_start()
command_args="-f $jail_conf -c"
_tmp=`mktemp -t jail` || exit 3
if $command $rc_flags $command_args >> $_tmp 2>&1; then
$jail_jls jid name | while read IN; do
set -- $IN
echo -n " $2"
echo $1 > /var/run/jail_$2.id
$jail_jls jid name | while read _id _name; do
echo -n " $_name"
echo $_id > /var/run/jail_${_name}.id
done
else
tail -1 $_tmp

View File

@ -88,7 +88,7 @@ netif_start()
fi
if [ -f /etc/rc.d/routing -a -n "$cmdifn" ] ; then
for _if in $cmdifn; do
/etc/rc.d/routing start any $_if
/etc/rc.d/routing static any $_if
done
fi
}

View File

@ -234,7 +234,7 @@ To erase a line you've written at the command prompt, use "Ctrl-U".
To find the hostname associated with an IP address, use
drill -x IP_address
-- Allan Jude <allanjude@freebsd.org>
-- Allan Jude <allanjude@FreeBSD.org>
%
To obtain a neat PostScript rendering of a manual page, use ``-t'' switch
of the man(1) utility: ``man -t <topic>''. For example:

View File

@ -28,7 +28,7 @@
.\" @(#)sysctl.3 8.4 (Berkeley) 5/9/95
.\" $FreeBSD$
.\"
.Dd May 17, 2013
.Dd September 10, 2015
.Dt SYSCTL 3
.Os
.Sh NAME
@ -736,8 +736,6 @@ privilege may change the value.
.It "VM_LOADAVG struct loadavg no"
.It "VM_TOTAL struct vmtotal no"
.It "VM_SWAPPING_ENABLED integer maybe"
.It "VM_V_CACHE_MAX integer yes"
.It "VM_V_CACHE_MIN integer yes"
.It "VM_V_FREE_MIN integer yes"
.It "VM_V_FREE_RESERVED integer yes"
.It "VM_V_FREE_TARGET integer yes"
@ -757,12 +755,6 @@ The returned data consists of a
1 if process swapping is enabled or 0 if disabled.
This variable is
permanently set to 0 if the kernel was built with swapping disabled.
.It Li VM_V_CACHE_MAX
Maximum desired size of the cache queue.
.It Li VM_V_CACHE_MIN
Minimum desired size of the cache queue.
If the cache queue size
falls very far below this value, the pageout daemon is awakened.
.It Li VM_V_FREE_MIN
Minimum amount of memory (cache memory plus free memory)
required to be available before a process waiting on memory will be

View File

@ -78,6 +78,8 @@ getnameinfo(const struct sockaddr *sa, socklen_t salen,
char *host, size_t hostlen, char *serv, size_t servlen,
int flags)
{
if (sa == NULL)
return (EAI_FAIL);
switch (sa->sa_family) {
case AF_INET:
@ -124,25 +126,19 @@ getnameinfo_inet(const struct sockaddr *sa, socklen_t salen,
struct servent *sp;
struct hostent *hp;
u_short port;
int family, i;
const char *addr;
u_int32_t v4a;
int h_error;
char numserv[512];
char numaddr[512];
if (sa == NULL)
return EAI_FAIL;
for (afd = &afdl[0]; afd->a_af > 0; afd++) {
if (afd->a_af == sa->sa_family)
break;
}
if (afd->a_af == 0)
return (EAI_FAMILY);
family = sa->sa_family;
for (i = 0; afdl[i].a_af; i++)
if (afdl[i].a_af == family) {
afd = &afdl[i];
goto found;
}
return EAI_FAMILY;
found:
if (salen != afd->a_socklen)
return EAI_FAIL;

View File

@ -70,9 +70,7 @@ if_nametoindex(const char *ifname)
s = _socket(AF_INET, SOCK_DGRAM | SOCK_CLOEXEC, 0);
if (s != -1) {
#ifdef PURIFY
memset(&ifr, 0, sizeof(ifr));
#endif
strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
if (_ioctl(s, SIOCGIFINDEX, &ifr) != -1) {
_close(s);

View File

@ -91,7 +91,7 @@ acl_create_entry_np(acl_t *acl_p, acl_entry_t *entry_p, int offset)
return (-1);
}
if (offset < 0 || offset >= acl_int->acl_cnt) {
if (offset < 0 || offset > acl_int->acl_cnt) {
errno = EINVAL;
return (-1);
}

View File

@ -25,10 +25,7 @@ NETBSD_ATF_TESTS_C+= kevent_test
NETBSD_ATF_TESTS_C+= kill_test
NETBSD_ATF_TESTS_C+= link_test
NETBSD_ATF_TESTS_C+= listen_test
# On arm64 triggers panic ARM64TODO: pmap_mincore (PR202307).
.if ${MACHINE_CPUARCH} != "aarch64"
NETBSD_ATF_TESTS_C+= mincore_test
.endif
NETBSD_ATF_TESTS_C+= mkdir_test
NETBSD_ATF_TESTS_C+= mkfifo_test
NETBSD_ATF_TESTS_C+= mknod_test

View File

@ -50,9 +50,11 @@ __weak_reference(_pthread_once, pthread_once);
static void
once_cancel_handler(void *arg)
{
pthread_once_t *once_control = arg;
pthread_once_t *once_control;
if (atomic_cmpset_rel_int(&once_control->state, ONCE_IN_PROGRESS, ONCE_NEVER_DONE))
once_control = arg;
if (atomic_cmpset_rel_int(&once_control->state, ONCE_IN_PROGRESS,
ONCE_NEVER_DONE))
return;
atomic_store_rel_int(&once_control->state, ONCE_NEVER_DONE);
_thr_umtx_wake(&once_control->state, INT_MAX, 0);
@ -68,16 +70,22 @@ _pthread_once(pthread_once_t *once_control, void (*init_routine) (void))
for (;;) {
state = once_control->state;
if (state == ONCE_DONE)
if (state == ONCE_DONE) {
atomic_thread_fence_acq();
return (0);
}
if (state == ONCE_NEVER_DONE) {
if (atomic_cmpset_acq_int(&once_control->state, state, ONCE_IN_PROGRESS))
if (atomic_cmpset_int(&once_control->state, state,
ONCE_IN_PROGRESS))
break;
} else if (state == ONCE_IN_PROGRESS) {
if (atomic_cmpset_acq_int(&once_control->state, state, ONCE_WAIT))
_thr_umtx_wait_uint(&once_control->state, ONCE_WAIT, NULL, 0);
if (atomic_cmpset_int(&once_control->state, state,
ONCE_WAIT))
_thr_umtx_wait_uint(&once_control->state,
ONCE_WAIT, NULL, 0);
} else if (state == ONCE_WAIT) {
_thr_umtx_wait_uint(&once_control->state, state, NULL, 0);
_thr_umtx_wait_uint(&once_control->state, state,
NULL, 0);
} else
return (EINVAL);
}
@ -86,7 +94,8 @@ _pthread_once(pthread_once_t *once_control, void (*init_routine) (void))
THR_CLEANUP_PUSH(curthread, once_cancel_handler, once_control);
init_routine();
THR_CLEANUP_POP(curthread, 0);
if (atomic_cmpset_rel_int(&once_control->state, ONCE_IN_PROGRESS, ONCE_DONE))
if (atomic_cmpset_rel_int(&once_control->state, ONCE_IN_PROGRESS,
ONCE_DONE))
return (0);
atomic_store_rel_int(&once_control->state, ONCE_DONE);
_thr_umtx_wake(&once_control->state, INT_MAX, 0);
@ -94,6 +103,6 @@ _pthread_once(pthread_once_t *once_control, void (*init_routine) (void))
}
void
_thr_once_init()
_thr_once_init(void)
{
}

View File

@ -34,7 +34,7 @@ if [ "x$1" = "x-b" ]; then
mkdir efi
mount -t msdosfs /dev/$device efi
mkdir -p efi/efi/boot
cp ${4}/boot/loader.efi efi/efi/boot/bootx64.efi
cp "$4/boot/loader.efi" efi/efi/boot/bootx64.efi
umount efi
rmdir efi
mdconfig -d -u $device
@ -46,15 +46,15 @@ else
fi
if [ $# -lt 3 ]; then
echo Usage: $0 '[-b] image-label image-name base-bits-dir [extra-bits-dir]'
echo "Usage: $0 [-b] image-label image-name base-bits-dir [extra-bits-dir]"
exit 1
fi
LABEL=`echo $1 | tr '[:lower:]' '[:upper:]'`; shift
NAME=$1; shift
LABEL=`echo "$1" | tr '[:lower:]' '[:upper:]'`; shift
NAME="$1"; shift
publisher="The FreeBSD Project. http://www.FreeBSD.org/"
echo "/dev/iso9660/$LABEL / cd9660 ro 0 0" > $1/etc/fstab
makefs -t cd9660 $bootable -o rockridge -o label=$LABEL -o publisher="$publisher" $NAME $*
rm $1/etc/fstab
echo "/dev/iso9660/$LABEL / cd9660 ro 0 0" > "$1/etc/fstab"
makefs -t cd9660 $bootable -o rockridge -o label="$LABEL" -o publisher="$publisher" "$NAME" "$@"
rm "$1/etc/fstab"
rm -f efiboot.img

View File

@ -32,14 +32,14 @@ else
fi
if [ $# -lt 3 ]; then
echo Usage: $0 '[-b] image-label image-name base-bits-dir [extra-bits-dir]'
echo "Usage: $0 [-b] image-label image-name base-bits-dir [extra-bits-dir]"
exit 1
fi
LABEL=`echo $1 | tr '[:lower:]' '[:upper:]'`; shift
NAME=$1; shift
LABEL=`echo "$1" | tr '[:lower:]' '[:upper:]'`; shift
NAME="$1"; shift
publisher="The FreeBSD Project. http://www.FreeBSD.org/"
echo "/dev/iso9660/$LABEL / cd9660 ro 0 0" > $1/etc/fstab
makefs -t cd9660 $bootable -o rockridge -o label=$LABEL -o publisher="$publisher" $NAME $*
rm $1/etc/fstab
echo "/dev/iso9660/$LABEL / cd9660 ro 0 0" > "$1/etc/fstab"
makefs -t cd9660 $bootable -o rockridge -o label="$LABEL" -o publisher="$publisher" "$NAME" "$@"
rm "$1/etc/fstab"

View File

@ -32,14 +32,14 @@ else
fi
if [ $# -lt 3 ]; then
echo Usage: $0 '[-b] image-label image-name base-bits-dir [extra-bits-dir]'
echo "Usage: $0 [-b] image-label image-name base-bits-dir [extra-bits-dir]"
exit 1
fi
LABEL=`echo $1 | tr '[:lower:]' '[:upper:]'`; shift
NAME=$1; shift
LABEL=`echo "$1" | tr '[:lower:]' '[:upper:]'`; shift
NAME="$1"; shift
publisher="The FreeBSD Project. http://www.FreeBSD.org/"
echo "/dev/iso9660/$LABEL / cd9660 ro 0 0" > $1/etc/fstab
makefs -t cd9660 $bootable -o rockridge -o label=$LABEL -o publisher="$publisher" $NAME $*
rm $1/etc/fstab
echo "/dev/iso9660/$LABEL / cd9660 ro 0 0" > "$1/etc/fstab"
makefs -t cd9660 $bootable -o rockridge -o label="$LABEL" -o publisher="$publisher" "$NAME" "$@"
rm "$1/etc/fstab"

View File

@ -25,18 +25,18 @@
if [ "x$1" = "x-b" ]; then
# Apple boot code
uudecode -o /tmp/hfs-boot-block.bz2 `dirname $0`/hfs-boot.bz2.uu
uudecode -o /tmp/hfs-boot-block.bz2 "`dirname "$0"`/hfs-boot.bz2.uu"
bzip2 -d /tmp/hfs-boot-block.bz2
OFFSET=$(hd /tmp/hfs-boot-block | grep 'Loader START' | cut -f 1 -d ' ')
OFFSET=0x$(echo 0x$OFFSET | awk '{printf("%x\n",$1/512);}')
dd if=$4/boot/loader of=/tmp/hfs-boot-block seek=$OFFSET conv=notrunc
dd if="$4/boot/loader" of=/tmp/hfs-boot-block seek=$OFFSET conv=notrunc
bootable="-o bootimage=macppc;/tmp/hfs-boot-block -o no-emul-boot"
# pSeries/PAPR boot code
mkdir -p $4/ppc/chrp
cp $4/boot/loader $4/ppc/chrp
cat > $4/ppc/bootinfo.txt << EOF
mkdir -p "$4/ppc/chrp"
cp "$4/boot/loader" "$4/ppc/chrp"
cat > "$4/ppc/bootinfo.txt" << EOF
<chrp-boot>
<description>FreeBSD Install</description>
<os-name>FreeBSD</os-name>
@ -46,7 +46,7 @@ EOF
bootable="$bootable -o chrp-boot"
# Playstation 3 boot code
echo "FreeBSD Install='/boot/loader.ps3'" > $4/etc/kboot.conf
echo "FreeBSD Install='/boot/loader.ps3'" > "$4/etc/kboot.conf"
shift
else
@ -54,16 +54,16 @@ else
fi
if [ $# -lt 3 ]; then
echo Usage: $0 '[-b] image-label image-name base-bits-dir [extra-bits-dir]'
echo "Usage: $0 [-b] image-label image-name base-bits-dir [extra-bits-dir]"
exit 1
fi
LABEL=`echo $1 | tr '[:lower:]' '[:upper:]'`; shift
NAME=$1; shift
LABEL=`echo "$1" | tr '[:lower:]' '[:upper:]'`; shift
NAME="$1"; shift
publisher="The FreeBSD Project. http://www.FreeBSD.org/"
echo "/dev/iso9660/$LABEL / cd9660 ro 0 0" > $1/etc/fstab
makefs -t cd9660 $bootable -o rockridge -o label=$LABEL -o publisher="$publisher" $NAME $*
rm $1/etc/fstab
echo "/dev/iso9660/$LABEL / cd9660 ro 0 0" > "$1/etc/fstab"
makefs -t cd9660 $bootable -o rockridge -o label="$LABEL" -o publisher="$publisher" "$NAME" "$@"
rm "$1/etc/fstab"
rm /tmp/hfs-boot-block
rm -rf $1/ppc
rm -rf "$1/ppc"

View File

@ -23,62 +23,62 @@
# extra-bits-dir, if provided, contains additional files to be merged
# into base-bits-dir as part of making the image.
if [ $# -lt 3 ]; then
echo Usage: $0 '[-b] image-label image-name base-bits-dir [extra-bits-dir]' > /dev/stderr
echo "Usage: $0 [-b] image-label image-name base-bits-dir [extra-bits-dir]" > /dev/stderr
exit 1
fi
case $1 in
-b) BOPT=$1; shift ;;
case "$1" in
-b) BOPT="$1"; shift ;;
esac
LABEL=`echo $1 | tr '[:lower:]' '[:upper:]'`; shift
NAME=$1; shift
BASEBITSDIR=$1
LABEL=`echo "$1" | tr '[:lower:]' '[:upper:]'`; shift
NAME="$1"; shift
BASEBITSDIR="$1"
# Create an ISO image
publisher="The FreeBSD Project. http://www.FreeBSD.org/"
echo "/dev/iso9660/$LABEL / cd9660 ro 0 0" > "${BASEBITSDIR}/etc/fstab"
makefs -t cd9660 -o rockridge -o label="$LABEL" -o publisher="$publisher" ${NAME}.tmp $*
rm "${BASEBITSDIR}/etc/fstab"
echo "/dev/iso9660/$LABEL / cd9660 ro 0 0" > "$BASEBITSDIR/etc/fstab"
makefs -t cd9660 -o rockridge -o label="$LABEL" -o publisher="$publisher" "$NAME.tmp" "$@"
rm "$BASEBITSDIR/etc/fstab"
if [ "x$BOPT" != "x-b" ]; then
mv ${NAME}.tmp ${NAME}
mv "$NAME.tmp" "$NAME"
exit 0
fi
TMPIMGDIR=`mktemp -d /tmp/bootfs.XXXXXXXX` || exit 1
BOOTFSDIR="${TMPIMGDIR}/bootfs"
BOOTFSIMG="${TMPIMGDIR}/bootfs.img"
BOOTFSDIR="$TMPIMGDIR/bootfs"
BOOTFSIMG="$TMPIMGDIR/bootfs.img"
# Create a boot filesystem
mkdir -p "${BOOTFSDIR}/boot"
cp -p "${BASEBITSDIR}/boot/loader" "${BOOTFSDIR}/boot"
makefs -t ffs -B be -M 512k "${BOOTFSIMG}" "${BOOTFSDIR}"
dd if="${BASEBITSDIR}/boot/boot1" of="${BOOTFSIMG}" bs=512 conv=notrunc,sync
mkdir -p "$BOOTFSDIR/boot"
cp -p "$BASEBITSDIR/boot/loader" "$BOOTFSDIR/boot"
makefs -t ffs -B be -M 512k "$BOOTFSIMG" "$BOOTFSDIR"
dd if="$BASEBITSDIR/boot/boot1" of="$BOOTFSIMG" bs=512 conv=notrunc,sync
# Create a boot ISO image
: ${CYLSIZE:=640}
ISOSIZE=$(stat -f %z ${NAME}.tmp)
ISOBLKS=$(((${ISOSIZE} + 511) / 512))
ISOCYLS=$(((${ISOBLKS} + (${CYLSIZE} - 1)) / ${CYLSIZE}))
ISOSIZE=$(stat -f %z "$NAME.tmp")
ISOBLKS=$((($ISOSIZE + 511) / 512))
ISOCYLS=$((($ISOBLKS + ($CYLSIZE - 1)) / $CYLSIZE))
BOOTFSSIZE=$(stat -f %z "${BOOTFSIMG}")
BOOTFSBLKS=$(((${BOOTFSSIZE} + 511) / 512))
BOOTFSCYLS=$(((${BOOTFSBLKS} + (${CYLSIZE} - 1)) / ${CYLSIZE}))
BOOTFSSIZE=$(stat -f %z "$BOOTFSIMG")
BOOTFSBLKS=$((($BOOTFSSIZE + 511) / 512))
BOOTFSCYLS=$((($BOOTFSBLKS + ($CYLSIZE - 1)) / $CYLSIZE))
ENDCYL=$((${ISOCYLS} + ${BOOTFSCYLS}))
NSECTS=$((${ENDCYL} * 1 * ${CYLSIZE}))
ENDCYL=$(($ISOCYLS + $BOOTFSCYLS))
NSECTS=$(($ENDCYL * 1 * $CYLSIZE))
dd if=${NAME}.tmp of=${NAME} bs=${CYLSIZE}b conv=notrunc,sync
dd if=${BOOTFSIMG} of=${NAME} bs=${CYLSIZE}b seek=${ISOCYLS} conv=notrunc,sync
dd if="$NAME.tmp" of="$NAME" bs="${CYLSIZE}b" conv=notrunc,sync
dd if="$BOOTFSIMG" of="$NAME" bs="${CYLSIZE}b" seek=$ISOCYLS conv=notrunc,sync
# The number of alternative cylinders is always 2.
dd if=/dev/zero of=${NAME} bs=${CYLSIZE}b seek=${ENDCYL} count=2 conv=notrunc,sync
rm -rf ${NAME}.tmp ${TMPIMGDIR}
dd if=/dev/zero of="$NAME" bs="${CYLSIZE}b" seek=$ENDCYL count=2 conv=notrunc,sync
rm -rf "$NAME.tmp" "$TMPIMGDIR"
# Write VTOC8 label to boot ISO image
MD=`mdconfig -a -t vnode -S 512 -y 1 -x ${CYLSIZE} -f ${NAME}`
gpart create -s VTOC8 ${MD}
MD=`mdconfig -a -t vnode -S 512 -y 1 -x "$CYLSIZE" -f "$NAME"`
gpart create -s VTOC8 $MD
# !4: usr, for ISO image part
gpart add -i 1 -s $((${ISOCYLS} * ${CYLSIZE} * 512))b -t \!4 ${MD}
gpart add -i 1 -s "$(($ISOCYLS * $CYLSIZE * 512))b" -t \!4 $MD
# !2: root, for bootfs part.
gpart add -i 6 -s $((${BOOTFSCYLS} * ${CYLSIZE} * 512))b -t \!2 ${MD}
gpart add -i 6 -s "$(($BOOTFSCYLS * $CYLSIZE * 512))b" -t \!2 $MD
mdconfig -d -u ${MD#md}

View File

@ -51,7 +51,7 @@ static const char rcsid[] =
#include "ifconfig.h"
#define GIFBITS "\020\1ACCEPT_REV_ETHIP_VER\2IGNORE_SOURCE\5SEND_REV_ETHIP_VER"
#define GIFBITS "\020\2IGNORE_SOURCE"
static void gif_status(int);
@ -70,8 +70,7 @@ gif_status(int s)
}
static void
setgifopts(const char *val,
int d, int s, const struct afswtch *afp)
setgifopts(const char *val, int d, int s, const struct afswtch *afp)
{
int opts;
@ -93,12 +92,8 @@ setgifopts(const char *val,
}
static struct cmd gif_cmds[] = {
DEF_CMD("accept_rev_ethip_ver", GIF_ACCEPT_REVETHIP, setgifopts),
DEF_CMD("-accept_rev_ethip_ver",-GIF_ACCEPT_REVETHIP, setgifopts),
DEF_CMD("ignore_source", GIF_IGNORE_SOURCE, setgifopts),
DEF_CMD("-ignore_source", -GIF_IGNORE_SOURCE, setgifopts),
DEF_CMD("send_rev_ethip_ver", GIF_SEND_REVETHIP, setgifopts),
DEF_CMD("-send_rev_ethip_ver", -GIF_SEND_REVETHIP, setgifopts),
};
static struct afswtch af_gif = {
@ -110,11 +105,9 @@ static struct afswtch af_gif = {
static __constructor void
gif_ctor(void)
{
#define N(a) (sizeof(a) / sizeof(a[0]))
size_t i;
for (i = 0; i < N(gif_cmds); i++)
for (i = 0; i < nitems(gif_cmds); i++)
cmd_register(&gif_cmds[i]);
af_register(&af_gif);
#undef N
}

View File

@ -14,15 +14,13 @@
#
# $FreeBSD$
SRCDIR= ${.CURDIR}/../../usr.sbin/rtsold
.PATH: ${SRCDIR}
.PATH: ${.CURDIR}/../../usr.sbin/rtsold
PROG= rtsol
SRCS= rtsold.c rtsol.c if.c probe.c dump.c rtsock.c
MAN=
WARNS?= 3
CFLAGS+= -DHAVE_ARC4RANDOM -DHAVE_POLL_H -DSMALL
CFLAGS+= -DHAVE_ARC4RANDOM -DSMALL
.include <bsd.prog.mk>

View File

@ -56,7 +56,7 @@ syscall:::return
self->syscallname = "";
}
nfsclient:::
nfscl:::
/self->syscallname != 0 && self->syscallname != ""/
{
@ -64,7 +64,7 @@ nfsclient:::
self->syscallname);
}
nfsclient:::
nfscl:::
/self->syscallname == 0 || self->syscallname == ""/
{

View File

@ -53,13 +53,13 @@ syscall:::entry
self->count = 0;
}
nfsclient:nfs3::start
nfscl:nfs3::start
{
self->timestamp = timestamp;
}
nfsclient:nfs3::done
nfscl:nfs3::done
{
self->count += (timestamp - self->timestamp);

View File

@ -12,25 +12,35 @@
.\"
.\"
.\" $FreeBSD$
.Dd January 1, 2007
.Dd September 6, 2015
.Dt BLACKHOLE 4
.Os
.Sh NAME
.Nm blackhole
.Nd a
.Xr sysctl 8
MIB for manipulating behaviour in respect of refused TCP or UDP connection
MIB for manipulating behaviour in respect of refused SCTP, TCP, or UDP connection
attempts
.Sh SYNOPSIS
.Cd sysctl net.inet.tcp.blackhole[=[0 | 1 | 2]]
.Cd sysctl net.inet.udp.blackhole[=[0 | 1]]
.Cd sysctl net.inet.sctp.blackhole Ns Op = Ns Brq "0 | 1 | 2"
.Cd sysctl net.inet.tcp.blackhole Ns Op = Ns Brq "0 | 1 | 2"
.Cd sysctl net.inet.udp.blackhole Ns Op = Ns Brq "0 | 1"
.Sh DESCRIPTION
The
.Nm
.Xr sysctl 8
MIB is used to control system behaviour when connection requests
are received on TCP or UDP ports where there is no socket listening.
are received on SCTP, TCP, or UDP ports where there is no socket listening.
.Pp
The blackhole behaviour is useful to slow down an attacker who is port-scanning
a system in an attempt to detect vulnerable services.
It might also slow down an attempted denial of service attack.
.Ss SCTP
Setting the SCTP blackhole MIB to a numeric value of one
will prevent sending an ABORT packet in response to an incoming INIT.
A MIB value of two will do the same, but will also prevent sending an ABORT packet
when unexpected packets are received.
.Ss TCP
Normal behaviour, when a TCP SYN segment is received on a port where
there is no socket accepting connections, is for the system to return
a RST segment, and drop the connection.
@ -44,20 +54,15 @@ as a blackhole.
By setting the MIB value to two, any segment arriving
on a closed port is dropped without returning a RST.
This provides some degree of protection against stealth port scans.
.Pp
In the UDP instance, enabling blackhole behaviour turns off the sending
.Ss UDP
Enabling blackhole behaviour turns off the sending
of an ICMP port unreachable message in response to a UDP datagram which
arrives on a port where there is no socket listening.
It must be noted that this behaviour will prevent remote systems from running
.Xr traceroute 8
to a system.
.Pp
The blackhole behaviour is useful to slow down anyone who is port scanning
a system, attempting to detect vulnerable services on a system.
It could potentially also slow down someone who is attempting a denial
of service attack.
.Sh WARNING
The TCP and UDP blackhole features should not be regarded as a replacement
The SCTP, TCP, and UDP blackhole features should not be regarded as a replacement
for firewall solutions.
Better security would consist of the
.Nm
@ -68,6 +73,7 @@ This mechanism is not a substitute for securing a system.
It should be used together with other security mechanisms.
.Sh SEE ALSO
.Xr ip 4 ,
.Xr sctp 4 ,
.Xr tcp 4 ,
.Xr udp 4 ,
.Xr ipf 8 ,
@ -80,5 +86,10 @@ The TCP and UDP
MIBs
first appeared in
.Fx 4.0 .
.Pp
The SCTP
.Nm
MIB first appeared in
.Fx 9.1 .
.Sh AUTHORS
.An Geoffrey M. Rehmet

View File

@ -29,7 +29,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd October 14, 2014
.Dd September 10, 2015
.Dt GIF 4
.Os
.Sh NAME
@ -246,32 +246,3 @@ had a multi-destination behavior, configurable via
.Dv IFF_LINK0
flag.
The behavior is obsolete and is no longer supported.
.Pp
On
.Fx
6.1, 6.2, 6.3, 7.0, 7.1, and 7.2
the
.Nm
sends and receives incorrect EtherIP packets with reversed version
field when
.Xr if_bridge 4
is used together. As a workaround on this interoperability issue, the
following two
.Xr ifconfig 8
flags can be used:
.Bl -tag -width "accept_rev_ethip_ver" -offset indent
.It accept_rev_ethip_ver
accepts both correct EtherIP packets and ones with reversed version
field, if enabled. If disabled, the
.Nm
accepts the correct packets only. This flag is enabled by default.
.It send_rev_ethip_ver
sends EtherIP packets with reversed version field intentionally, if
enabled. If disabled, the
.Nm
sends the correct packets only. This flag is disabled by default.
.El
.Pp
If interoperability with the older
.Fx
machines is needed, both of these two flags must be enabled.

View File

@ -27,7 +27,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd November 16, 2011
.Dd September 10, 2015
.Dt STACK 9
.Os
.Sh NAME
@ -36,9 +36,11 @@
.Sh SYNOPSIS
.In sys/param.h
.In sys/stack.h
.Pp
In the kernel configuration file:
.Cd "options DDB"
.Cd "options STACK"
.Pp
.Ft struct stack *
.Fn stack_create "void"
.Ft void
@ -63,6 +65,10 @@ In the kernel configuration file:
.Fn stack_sbuf_print_ddb "struct sbuf sb*" "const struct stack *st"
.Ft void
.Fn stack_save "struct stack *st"
.Ft void
.Fn stack_save_td "struct stack *st" "struct thread *td"
.Ft int
.Fn stack_save_td_running "struct stack *st" "struct thread *td"
.Sh DESCRIPTION
The
.Nm
@ -86,6 +92,16 @@ Memory associated with a trace is freed by calling
.Pp
A trace of the current kernel thread's call stack may be captured using
.Fn stack_save .
.Fn stack_save_td
and
.Fn stack_save_td_running
can also be used to capture the stack of a caller-specified thread.
Callers of these functions must own the thread lock of the specified thread.
.Fn stack_save_td
can capture the stack of a kernel thread that is not running or
swapped out at the time of the call.
.Fn stack_save_td_running
can capture the stack of a running kernel thread.
.Pp
.Fn stack_print
and
@ -130,6 +146,23 @@ The utility functions
and
.Nm stack_put
may be used to manipulate stack data structures directly.
.Sh RETURN VALUES
.Fn stack_put
returns 0 on success.
Otherwise the
.Dv struct stack
does not contain space to record additional frames, and a non-zero value is
returned.
.Pp
.Fn stack_save_td_running
returns 0 when the stack capture was successful and a non-zero error number
otherwise.
In particular,
.Er EAGAIN
is returned if the thread was running in user mode at the time that the
capture was attempted, and
.Er EOPNOTSUPP
is returned if the operation is not implemented.
.Sh SEE ALSO
.Xr ddb 4 ,
.Xr printf 9 ,

View File

@ -13,11 +13,14 @@ MK_INSTALL_AS_USER= yes
.warning MAKEOBJDIRPREFIX not supported; setting MAKEOBJDIR...
# put things approximately where they want
OBJROOT:=${MAKEOBJDIRPREFIX}${SRCTOP:S,/src,,}/
MAKEOBJDIRPREFIX=
.export MAKEOBJDIRPREFIX
.endif
.if empty(MAKEOBJDIR) || ${MAKEOBJDIR:M*/*} == ""
# OBJTOP set below
MAKEOBJDIR=$${.CURDIR:S,$${SRCTOP},$${OBJTOP},}
MAKEOBJDIRPREFIX=
# export but do not track
.export-env MAKEOBJDIRPREFIX MAKEOBJDIR
.export-env MAKEOBJDIR
# now for our own use
MAKEOBJDIR= ${.CURDIR:S,${SRCTOP},${OBJTOP},}
.endif

View File

@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$");
#include "opt_hwpmc_hooks.h"
#include "opt_isa.h"
#include "opt_kdb.h"
#include "opt_stack.h"
#include <sys/param.h>
#include <sys/bus.h>
@ -91,6 +92,7 @@ PMC_SOFT_DEFINE( , , page_fault, write);
#ifdef SMP
#include <machine/smp.h>
#endif
#include <machine/stack.h>
#include <machine/tss.h>
#ifdef KDTRACE_HOOKS
@ -202,18 +204,25 @@ trap(struct trapframe *frame)
goto out;
}
#ifdef HWPMC_HOOKS
/*
* CPU PMCs interrupt using an NMI. If the PMC module is
* active, pass the 'rip' value to the PMC module's interrupt
* handler. A return value of '1' from the handler means that
* the NMI was handled by it and we can return immediately.
*/
if (type == T_NMI && pmc_intr &&
(*pmc_intr)(PCPU_GET(cpuid), frame))
goto out;
if (type == T_NMI) {
#ifdef HWPMC_HOOKS
/*
* CPU PMCs interrupt using an NMI. If the PMC module is
* active, pass the 'rip' value to the PMC module's interrupt
* handler. A non-zero return value from the handler means that
* the NMI was consumed by it and we can return immediately.
*/
if (pmc_intr != NULL &&
(*pmc_intr)(PCPU_GET(cpuid), frame) != 0)
goto out;
#endif
#ifdef STACK
if (stack_nmi_handler(frame) != 0)
goto out;
#endif
}
if (type == T_MCHK) {
mca_intr();
goto out;
@ -625,7 +634,6 @@ trap_pfault(frame, usermode)
int usermode;
{
vm_offset_t va;
struct vmspace *vm;
vm_map_t map;
int rv = 0;
vm_prot_t ftype;
@ -687,14 +695,7 @@ trap_pfault(frame, usermode)
map = kernel_map;
} else {
/*
* This is a fault on non-kernel virtual memory. If either
* p or p->p_vmspace is NULL, then the fault is fatal.
*/
if (p == NULL || (vm = p->p_vmspace) == NULL)
goto nogo;
map = &vm->vm_map;
map = &p->p_vmspace->vm_map;
/*
* When accessing a usermode address, kernel must be
@ -729,28 +730,8 @@ trap_pfault(frame, usermode)
else
ftype = VM_PROT_READ;
if (map != kernel_map) {
/*
* Keep swapout from messing with us during this
* critical time.
*/
PROC_LOCK(p);
++p->p_lock;
PROC_UNLOCK(p);
/* Fault in the user page: */
rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
PROC_LOCK(p);
--p->p_lock;
PROC_UNLOCK(p);
} else {
/*
* Don't have to worry about process locking or stacks in the
* kernel.
*/
rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
}
/* Fault in the page. */
rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
if (rv == KERN_SUCCESS) {
#ifdef HWPMC_HOOKS
if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {

View File

@ -1,42 +1,6 @@
/*-
* Mach Operating System
* Copyright (c) 1991,1990 Carnegie Mellon University
* All Rights Reserved.
*
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $FreeBSD$
*/
#ifndef _MACHINE_STACK_H_
#define _MACHINE_STACK_H_
/*
* Stack trace.
* This file is in the public domain.
*/
/* $FreeBSD$ */
struct amd64_frame {
struct amd64_frame *f_frame;
long f_retaddr;
long f_arg0;
};
#endif /* !_MACHINE_STACK_H_ */
#include <x86/stack.h>

View File

@ -71,6 +71,13 @@ stack_save_td(struct stack *st, struct thread *td)
stack_capture(st, frame);
}
int
stack_save_td_running(struct stack *st, struct thread *td)
{
return (EOPNOTSUPP);
}
void
stack_save(struct stack *st)
{

View File

@ -500,28 +500,9 @@ abort_handler(struct trapframe *tf, int prefetch)
onfault = pcb->pcb_onfault;
pcb->pcb_onfault = NULL;
#endif
if (map != kernel_map) {
/*
* Keep swapout from messing with us during this
* critical time.
*/
PROC_LOCK(p);
++p->p_lock;
PROC_UNLOCK(p);
/* Fault in the user page: */
rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
PROC_LOCK(p);
--p->p_lock;
PROC_UNLOCK(p);
} else {
/*
* Don't have to worry about process locking or stacks in the
* kernel.
*/
rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
}
/* Fault in the page. */
rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
#ifdef INVARIANTS
pcb->pcb_onfault = onfault;

View File

@ -365,19 +365,8 @@ abort_handler(struct trapframe *tf, int type)
onfault = pcb->pcb_onfault;
pcb->pcb_onfault = NULL;
if (map != kernel_map) {
PROC_LOCK(p);
p->p_lock++;
PROC_UNLOCK(p);
}
error = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
pcb->pcb_onfault = onfault;
if (map != kernel_map) {
PROC_LOCK(p);
p->p_lock--;
PROC_UNLOCK(p);
}
if (__predict_true(error == 0))
goto out;
fatal_pagefault:
@ -682,20 +671,8 @@ prefetch_abort_handler(struct trapframe *tf)
if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ, 1))
goto out;
if (map != kernel_map) {
PROC_LOCK(p);
p->p_lock++;
PROC_UNLOCK(p);
}
error = vm_fault(map, va, VM_PROT_READ | VM_PROT_EXECUTE,
VM_FAULT_NORMAL);
if (map != kernel_map) {
PROC_LOCK(p);
p->p_lock--;
PROC_UNLOCK(p);
}
if (__predict_true(error == 0))
goto out;

View File

@ -95,6 +95,7 @@ END(copyin)
*/
ENTRY(copyinstr)
mov x5, xzr /* count = 0 */
mov w4, #1 /* If zero return faulure */
cbz x2, 3f /* If len == 0 then skip loop */
adr x6, copyio_fault /* Get the handler address */
@ -102,17 +103,18 @@ ENTRY(copyinstr)
1: ldrb w4, [x0], #1 /* Load from uaddr */
strb w4, [x1], #1 /* Store in kaddr */
cbz w4, 2f /* If == 0 then break */
sub x2, x2, #1 /* len-- */
add x5, x5, #1 /* count++ */
cbz w4, 2f /* Break when NUL-terminated */
sub x2, x2, #1 /* len-- */
cbnz x2, 1b
2: SET_FAULT_HANDLER(xzr, x7) /* Clear the handler */
3: cbz x3, 4f /* Check if done != NULL */
add x5, x5, #1 /* count++ */
str x5, [x3] /* done = count */
4: mov x0, xzr /* return 0 */
4: mov w1, #ENAMETOOLONG /* Load ENAMETOOLONG to return if failed */
cmp w4, #0 /* Check if we saved the NUL-terminator */
csel w0, wzr, w1, eq /* If so return success, else failure */
ret
END(copyinstr)

View File

@ -246,11 +246,11 @@ drop_to_el1:
mrs x2, icc_sre_el2
orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */
msr icc_sre_el2, x2
isb
2:
/* Set the address to return to our return address */
msr elr_el2, x30
isb
eret

View File

@ -3032,8 +3032,74 @@ pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
int
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
{
pd_entry_t *l1p, l1;
pd_entry_t *l2p, l2;
pt_entry_t *l3p, l3;
vm_paddr_t pa;
bool managed;
int val;
panic("ARM64TODO: pmap_mincore");
PMAP_LOCK(pmap);
retry:
pa = 0;
val = 0;
managed = false;
l1p = pmap_l1(pmap, addr);
if (l1p == NULL) /* No l1 */
goto done;
l1 = pmap_load(l1p);
if ((l1 & ATTR_DESCR_MASK) == L1_BLOCK) {
pa = (l1 & ~ATTR_MASK) | (addr & L1_OFFSET);
managed = (l1 & ATTR_SW_MANAGED) == ATTR_SW_MANAGED;
val = MINCORE_SUPER | MINCORE_INCORE;
if (pmap_page_dirty(l1))
val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
if ((l1 & ATTR_AF) == ATTR_AF)
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
goto done;
}
l2p = pmap_l1_to_l2(l1p, addr);
if (l2p == NULL) /* No l2 */
goto done;
l2 = pmap_load(l2p);
if ((l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
pa = (l2 & ~ATTR_MASK) | (addr & L2_OFFSET);
managed = (l2 & ATTR_SW_MANAGED) == ATTR_SW_MANAGED;
val = MINCORE_SUPER | MINCORE_INCORE;
if (pmap_page_dirty(l2))
val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
if ((l2 & ATTR_AF) == ATTR_AF)
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
goto done;
}
l3p = pmap_l2_to_l3(l2p, addr);
if (l3p == NULL) /* No l3 */
goto done;
l3 = pmap_load(l2p);
if ((l3 & ATTR_DESCR_MASK) == L3_PAGE) {
pa = (l3 & ~ATTR_MASK) | (addr & L3_OFFSET);
managed = (l3 & ATTR_SW_MANAGED) == ATTR_SW_MANAGED;
val = MINCORE_INCORE;
if (pmap_page_dirty(l3))
val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
if ((l3 & ATTR_AF) == ATTR_AF)
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
}
done:
if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
(MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
goto retry;
} else
PA_UNLOCK_COND(*locked_pa);
PMAP_UNLOCK(pmap);
return (val);
}
void

View File

@ -72,6 +72,13 @@ stack_save_td(struct stack *st, struct thread *td)
stack_capture(st, &frame);
}
int
stack_save_td_running(struct stack *st, struct thread *td)
{
return (EOPNOTSUPP);
}
void
stack_save(struct stack *st)
{

View File

@ -160,17 +160,18 @@ ENTRY(cpu_switch)
dsb sy
isb
/* Release the old thread */
/*
* Release the old thread. This doesn't need to be a store-release
* as the above dsb instruction will provide release semantics.
*/
str x2, [x0, #TD_LOCK]
#if defined(SCHED_ULE) && defined(SMP)
/* Read the value in blocked_lock */
ldr x0, =_C_LABEL(blocked_lock)
ldr x1, [x0]
/* Load curthread */
ldr x2, [x18, #PC_CURTHREAD]
ldr x2, [x0]
1:
ldr x3, [x2, #TD_LOCK]
cmp x3, x1
ldar x3, [x1, #TD_LOCK]
cmp x3, x2
b.eq 1b
#endif

View File

@ -190,29 +190,8 @@ data_abort(struct trapframe *frame, uint64_t esr, int lower)
va = trunc_page(far);
ftype = ((esr >> 6) & 1) ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
if (map != kernel_map) {
/*
* Keep swapout from messing with us during this
* critical time.
*/
PROC_LOCK(p);
++p->p_lock;
PROC_UNLOCK(p);
/* Fault in the user page: */
error = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
PROC_LOCK(p);
--p->p_lock;
PROC_UNLOCK(p);
} else {
/*
* Don't have to worry about process locking or stacks in the
* kernel.
*/
error = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
}
/* Fault in the page. */
error = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
if (error != KERN_SUCCESS) {
if (lower) {
sig = SIGSEGV;

View File

@ -269,8 +269,10 @@ efifb_from_uga(struct efi_fb *efifb, EFI_UGA_DRAW_PROTOCOL *uga)
EFI_PCI_IO_PROTOCOL *pciio;
char *ev, *p;
EFI_STATUS status;
ssize_t ofs;
uint32_t np, horiz, vert, depth, refresh;
ssize_t offset;
uint64_t fbaddr, fbsize;
uint32_t horiz, vert, stride;
uint32_t np, depth, refresh;
status = uga->GetMode(uga, &horiz, &vert, &depth, &refresh);
if (EFI_ERROR(status))
@ -285,6 +287,63 @@ efifb_from_uga(struct efi_fb *efifb, EFI_UGA_DRAW_PROTOCOL *uga)
efifb_mask_from_pixfmt(efifb, PixelBlueGreenRedReserved8BitPerColor,
NULL);
/* pciio can be NULL on return! */
pciio = efifb_uga_get_pciio();
/* Try to find the frame buffer. */
status = efifb_uga_locate_framebuffer(pciio, &efifb->fb_addr,
&efifb->fb_size);
if (EFI_ERROR(status)) {
efifb->fb_addr = 0;
efifb->fb_size = 0;
}
/*
* There's no reliable way to detect the frame buffer or the
* offset within the frame buffer of the visible region, nor
* the stride. Our only option is to look at the system and
* fill in the blanks based on that. Luckily, UGA was mostly
* only used on Apple hardware.
*/
offset = -1;
ev = getenv("smbios.system.maker");
if (ev != NULL && !strcmp(ev, "Apple Inc.")) {
ev = getenv("smbios.system.product");
if (ev != NULL && !strcmp(ev, "iMac7,1")) {
/* These are the expected values we should have. */
horiz = 1680;
vert = 1050;
fbaddr = 0xc0000000;
/* These are the missing bits. */
offset = 0x10000;
stride = 1728;
} else if (ev != NULL && !strcmp(ev, "MacBook3,1")) {
/* These are the expected values we should have. */
horiz = 1280;
vert = 800;
fbaddr = 0xc0000000;
/* These are the missing bits. */
offset = 0x0;
stride = 2048;
}
}
/*
* If this is hardware we know, make sure that it looks familiar
* before we accept our hardcoded values.
*/
if (offset >= 0 && efifb->fb_width == horiz &&
efifb->fb_height == vert && efifb->fb_addr == fbaddr) {
efifb->fb_addr += offset;
efifb->fb_size -= offset;
efifb->fb_stride = stride;
return (0);
} else if (offset >= 0) {
printf("Hardware make/model known, but graphics not "
"as expected.\n");
printf("Console may not work!\n");
}
/*
* The stride is equal or larger to the width. Often it's the
* next larger power of two. We'll start with that...
@ -298,16 +357,11 @@ efifb_from_uga(struct efi_fb *efifb, EFI_UGA_DRAW_PROTOCOL *uga)
}
} while (np);
/* pciio can be NULL on return! */
pciio = efifb_uga_get_pciio();
ev = getenv("uga_framebuffer");
ev = getenv("hw.efifb.address");
if (ev == NULL) {
/* Try to find the frame buffer. */
status = efifb_uga_locate_framebuffer(pciio, &efifb->fb_addr,
&efifb->fb_size);
if (EFI_ERROR(status)) {
printf("Please set uga_framebuffer!\n");
if (efifb->fb_addr == 0) {
printf("Please set hw.efifb.address and "
"hw.efifb.stride.\n");
return (1);
}
@ -328,30 +382,30 @@ efifb_from_uga(struct efi_fb *efifb, EFI_UGA_DRAW_PROTOCOL *uga)
* to not appear to hang when we can't read from the
* frame buffer.
*/
ofs = efifb_uga_find_pixel(uga, 0, pciio, efifb->fb_addr,
offset = efifb_uga_find_pixel(uga, 0, pciio, efifb->fb_addr,
efifb->fb_size >> 8);
if (ofs == -1) {
if (offset == -1) {
printf("Unable to reliably detect frame buffer.\n");
} else if (ofs > 0) {
efifb->fb_addr += ofs;
efifb->fb_size -= ofs;
} else if (offset > 0) {
efifb->fb_addr += offset;
efifb->fb_size -= offset;
}
} else {
ofs = 0;
offset = 0;
efifb->fb_size = efifb->fb_height * efifb->fb_stride * 4;
efifb->fb_addr = strtoul(ev, &p, 0);
if (*p != '\0')
return (1);
}
ev = getenv("uga_stride");
ev = getenv("hw.efifb.stride");
if (ev == NULL) {
if (pciio != NULL && ofs != -1) {
if (pciio != NULL && offset != -1) {
/* Determine the stride. */
ofs = efifb_uga_find_pixel(uga, 1, pciio,
offset = efifb_uga_find_pixel(uga, 1, pciio,
efifb->fb_addr, horiz * 8);
if (ofs != -1)
efifb->fb_stride = ofs >> 2;
if (offset != -1)
efifb->fb_stride = offset >> 2;
} else {
printf("Unable to reliably detect the stride.\n");
}

View File

@ -43,12 +43,9 @@ Features:
- Persistent reservation support
- Mode sense/select support
- Error injection support
- High Availability support (1)
- High Availability support
- All I/O handled in-kernel, no userland context switch overhead.
(1) HA Support is just an API stub, and needs much more to be fully
functional. See the to-do list below.
Configuring and Running CTL:
===========================
@ -245,27 +242,6 @@ To Do List:
another data structure in the stack, more memory allocations, etc. This
will also require changes to the CAM CCB structure to support CTL.
- Full-featured High Availability support. The HA API that is in ctl_ha.h
is essentially a renamed version of Copan's HA API. There is no
substance to it, but it remains in CTL to show what needs to be done to
implement active/active HA from a CTL standpoint. The things that would
need to be done include:
- A kernel level software API for message passing as well as DMA
between at least two nodes.
- Hardware support and drivers for inter-node communication. This
could be as simples as ethernet hardware and drivers.
- A "supervisor", or startup framework to control and coordinate
HA startup, failover (going from active/active to single mode),
and failback (going from single mode to active/active).
- HA support in other components of the stack. The goal behind HA
is that one node can fail and another node can seamlessly take
over handling I/O requests. This requires support from pretty
much every component in the storage stack, from top to bottom.
CTL is one piece of it, but you also need support in the RAID
stack/filesystem/backing store. You also need full configuration
mirroring, and all peer nodes need to be able to talk to the
underlying storage hardware.
Code Roadmap:
============
@ -365,11 +341,11 @@ This is a CTL frontend port that is also a CAM SIM. The idea is that this
frontend allows for using CTL without any target-capable hardware. So any
LUNs you create in CTL are visible via this port.
ctl_ha.c:
ctl_ha.h:
--------
This is a stubbed-out High Availability API. See the comments in the
header and the description of what is needed as far as HA support above.
This is a High Availability API and TCP-based interlink implementation.
ctl_io.h:
--------

File diff suppressed because it is too large Load Diff

View File

@ -138,25 +138,13 @@ struct ctl_page_index;
SYSCTL_DECL(_kern_cam_ctl);
#endif
/*
* Call these routines to enable or disable front end ports.
*/
int ctl_port_enable(ctl_port_type port_type);
int ctl_port_disable(ctl_port_type port_type);
/*
* This routine grabs a list of frontend ports.
*/
int ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced,
int *num_entries_filled, int *num_entries_dropped,
ctl_port_type port_type, int no_virtual);
/*
* Put a string into an sbuf, escaping characters that are illegal or not
* recommended in XML. Note this doesn't escape everything, just > < and &.
*/
int ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size);
int ctl_ffz(uint32_t *mask, uint32_t size);
int ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last);
int ctl_set_mask(uint32_t *mask, uint32_t bit);
int ctl_clear_mask(uint32_t *mask, uint32_t bit);
int ctl_is_set(uint32_t *mask, uint32_t bit);
@ -165,11 +153,6 @@ int ctl_caching_sp_handler(struct ctl_scsiio *ctsio,
int ctl_control_page_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index,
uint8_t *page_ptr);
/**
int ctl_failover_sp_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index,
uint8_t *page_ptr);
**/
int ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index,
int pc);
@ -189,11 +172,12 @@ void ctl_data_submit_done(union ctl_io *io);
void ctl_config_read_done(union ctl_io *io);
void ctl_config_write_done(union ctl_io *io);
void ctl_portDB_changed(int portnum);
#ifdef notyet
void ctl_init_isc_msg(void);
#endif
int ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct thread *td);
struct ctl_lun;
void ctl_isc_announce_lun(struct ctl_lun *lun);
struct ctl_port;
void ctl_isc_announce_port(struct ctl_port *port);
/*
* KPI to manipulate LUN/port options

View File

@ -307,6 +307,12 @@ int ctl_lun_operable(struct ctl_be_lun *be_lun);
int ctl_lun_offline(struct ctl_be_lun *be_lun);
int ctl_lun_online(struct ctl_be_lun *be_lun);
/*
* Called on LUN HA role change.
*/
int ctl_lun_primary(struct ctl_be_lun *be_lun);
int ctl_lun_secondary(struct ctl_be_lun *be_lun);
/*
* Let the backend notify the initiator about changed capacity.
*/

View File

@ -85,7 +85,9 @@ __FBSDID("$FreeBSD$");
#include <cam/ctl/ctl.h>
#include <cam/ctl/ctl_backend.h>
#include <cam/ctl/ctl_ioctl.h>
#include <cam/ctl/ctl_ha.h>
#include <cam/ctl/ctl_scsi_all.h>
#include <cam/ctl/ctl_private.h>
#include <cam/ctl/ctl_error.h>
/*
@ -124,18 +126,11 @@ typedef enum {
CTL_BE_BLOCK_FILE
} ctl_be_block_type;
struct ctl_be_block_devdata {
struct cdev *cdev;
struct cdevsw *csw;
int dev_ref;
};
struct ctl_be_block_filedata {
struct ucred *cred;
};
union ctl_be_block_bedata {
struct ctl_be_block_devdata dev;
struct ctl_be_block_filedata file;
};
@ -217,6 +212,8 @@ struct ctl_be_block_io {
void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */
};
extern struct ctl_softc *control_softc;
static int cbb_num_threads = 14;
SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0,
"CAM Target Layer Block Backend");
@ -819,16 +816,15 @@ static void
ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun,
struct ctl_be_block_io *beio)
{
struct ctl_be_block_devdata *dev_data;
union ctl_io *io;
struct cdevsw *csw;
struct cdev *dev;
struct uio xuio;
struct iovec *xiovec;
int flags;
int error, i;
int error, flags, i, ref;
DPRINTF("entered\n");
dev_data = &be_lun->backend.dev;
io = beio->io;
flags = 0;
if (ARGS(io)->flags & CTL_LLF_DPO)
@ -861,13 +857,20 @@ ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun,
devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
mtx_unlock(&be_lun->io_lock);
if (beio->bio_cmd == BIO_READ) {
error = (*dev_data->csw->d_read)(dev_data->cdev, &xuio, flags);
csw = devvn_refthread(be_lun->vn, &dev, &ref);
if (csw) {
if (beio->bio_cmd == BIO_READ)
error = csw->d_read(dev, &xuio, flags);
else
error = csw->d_write(dev, &xuio, flags);
dev_relthread(dev, ref);
} else
error = ENXIO;
if (beio->bio_cmd == BIO_READ)
SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0);
} else {
error = (*dev_data->csw->d_write)(dev_data->cdev, &xuio, flags);
else
SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0);
}
mtx_lock(&be_lun->io_lock);
devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
@ -911,23 +914,30 @@ static void
ctl_be_block_gls_zvol(struct ctl_be_block_lun *be_lun,
struct ctl_be_block_io *beio)
{
struct ctl_be_block_devdata *dev_data = &be_lun->backend.dev;
union ctl_io *io = beio->io;
struct cdevsw *csw;
struct cdev *dev;
struct ctl_lba_len_flags *lbalen = ARGS(io);
struct scsi_get_lba_status_data *data;
off_t roff, off;
int error, status;
int error, ref, status;
DPRINTF("entered\n");
csw = devvn_refthread(be_lun->vn, &dev, &ref);
if (csw == NULL) {
status = 0; /* unknown up to the end */
off = be_lun->size_bytes;
goto done;
}
off = roff = ((off_t)lbalen->lba) * be_lun->cbe_lun.blocksize;
error = (*dev_data->csw->d_ioctl)(dev_data->cdev, FIOSEEKHOLE,
(caddr_t)&off, FREAD, curthread);
error = csw->d_ioctl(dev, FIOSEEKHOLE, (caddr_t)&off, FREAD,
curthread);
if (error == 0 && off > roff)
status = 0; /* mapped up to off */
else {
error = (*dev_data->csw->d_ioctl)(dev_data->cdev, FIOSEEKDATA,
(caddr_t)&off, FREAD, curthread);
error = csw->d_ioctl(dev, FIOSEEKDATA, (caddr_t)&off, FREAD,
curthread);
if (error == 0 && off > roff)
status = 1; /* deallocated up to off */
else {
@ -935,7 +945,9 @@ ctl_be_block_gls_zvol(struct ctl_be_block_lun *be_lun,
off = be_lun->size_bytes;
}
}
dev_relthread(dev, ref);
done:
data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr;
scsi_u64to8b(lbalen->lba, data->descr[0].addr);
scsi_ulto4b(MIN(UINT32_MAX, off / be_lun->cbe_lun.blocksize -
@ -951,9 +963,10 @@ ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
{
struct bio *bio;
union ctl_io *io;
struct ctl_be_block_devdata *dev_data;
struct cdevsw *csw;
struct cdev *dev;
int ref;
dev_data = &be_lun->backend.dev;
io = beio->io;
DPRINTF("entered\n");
@ -962,7 +975,6 @@ ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
bio = g_alloc_bio();
bio->bio_cmd = BIO_FLUSH;
bio->bio_dev = dev_data->cdev;
bio->bio_offset = 0;
bio->bio_data = 0;
bio->bio_done = ctl_be_block_biodone;
@ -982,7 +994,15 @@ ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
mtx_unlock(&be_lun->io_lock);
(*dev_data->csw->d_strategy)(bio);
csw = devvn_refthread(be_lun->vn, &dev, &ref);
if (csw) {
bio->bio_dev = dev;
csw->d_strategy(bio);
dev_relthread(dev, ref);
} else {
bio->bio_error = ENXIO;
ctl_be_block_biodone(bio);
}
}
static void
@ -991,15 +1011,17 @@ ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun,
uint64_t off, uint64_t len, int last)
{
struct bio *bio;
struct ctl_be_block_devdata *dev_data;
uint64_t maxlen;
struct cdevsw *csw;
struct cdev *dev;
int ref;
dev_data = &be_lun->backend.dev;
csw = devvn_refthread(be_lun->vn, &dev, &ref);
maxlen = LONG_MAX - (LONG_MAX % be_lun->cbe_lun.blocksize);
while (len > 0) {
bio = g_alloc_bio();
bio->bio_cmd = BIO_DELETE;
bio->bio_dev = dev_data->cdev;
bio->bio_dev = dev;
bio->bio_offset = off;
bio->bio_length = MIN(len, maxlen);
bio->bio_data = 0;
@ -1016,8 +1038,15 @@ ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun,
beio->send_complete = 1;
mtx_unlock(&be_lun->io_lock);
(*dev_data->csw->d_strategy)(bio);
if (csw) {
csw->d_strategy(bio);
} else {
bio->bio_error = ENXIO;
ctl_be_block_biodone(bio);
}
}
if (csw)
dev_relthread(dev, ref);
}
static void
@ -1025,12 +1054,10 @@ ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun,
struct ctl_be_block_io *beio)
{
union ctl_io *io;
struct ctl_be_block_devdata *dev_data;
struct ctl_ptr_len_flags *ptrlen;
struct scsi_unmap_desc *buf, *end;
uint64_t len;
dev_data = &be_lun->backend.dev;
io = beio->io;
DPRINTF("entered\n");
@ -1063,23 +1090,25 @@ ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
struct ctl_be_block_io *beio)
{
TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue);
int i;
struct bio *bio;
struct ctl_be_block_devdata *dev_data;
struct cdevsw *csw;
struct cdev *dev;
off_t cur_offset;
int max_iosize;
int i, max_iosize, ref;
DPRINTF("entered\n");
dev_data = &be_lun->backend.dev;
csw = devvn_refthread(be_lun->vn, &dev, &ref);
/*
* We have to limit our I/O size to the maximum supported by the
* backend device. Hopefully it is MAXPHYS. If the driver doesn't
* set it properly, use DFLTPHYS.
*/
max_iosize = dev_data->cdev->si_iosize_max;
if (max_iosize < PAGE_SIZE)
if (csw) {
max_iosize = dev->si_iosize_max;
if (max_iosize < PAGE_SIZE)
max_iosize = DFLTPHYS;
} else
max_iosize = DFLTPHYS;
cur_offset = beio->io_offset;
@ -1097,7 +1126,7 @@ ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
KASSERT(bio != NULL, ("g_alloc_bio() failed!\n"));
bio->bio_cmd = beio->bio_cmd;
bio->bio_dev = dev_data->cdev;
bio->bio_dev = dev;
bio->bio_caller1 = beio;
bio->bio_length = min(cur_size, max_iosize);
bio->bio_offset = cur_offset;
@ -1124,23 +1153,36 @@ ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
*/
while ((bio = TAILQ_FIRST(&queue)) != NULL) {
TAILQ_REMOVE(&queue, bio, bio_queue);
(*dev_data->csw->d_strategy)(bio);
if (csw)
csw->d_strategy(bio);
else {
bio->bio_error = ENXIO;
ctl_be_block_biodone(bio);
}
}
if (csw)
dev_relthread(dev, ref);
}
static uint64_t
ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun, const char *attrname)
{
struct ctl_be_block_devdata *dev_data = &be_lun->backend.dev;
struct diocgattr_arg arg;
int error;
struct cdevsw *csw;
struct cdev *dev;
int error, ref;
if (dev_data->csw == NULL || dev_data->csw->d_ioctl == NULL)
csw = devvn_refthread(be_lun->vn, &dev, &ref);
if (csw == NULL)
return (UINT64_MAX);
strlcpy(arg.name, attrname, sizeof(arg.name));
arg.len = sizeof(arg.value.off);
error = dev_data->csw->d_ioctl(dev_data->cdev,
DIOCGATTR, (caddr_t)&arg, FREAD, curthread);
if (csw->d_ioctl) {
error = csw->d_ioctl(dev, DIOCGATTR, (caddr_t)&arg, FREAD,
curthread);
} else
error = ENODEV;
dev_relthread(dev, ref);
if (error != 0)
return (UINT64_MAX);
return (arg.value.off);
@ -1573,33 +1615,32 @@ ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
static void
ctl_be_block_worker(void *context, int pending)
{
struct ctl_be_block_lun *be_lun;
struct ctl_be_block_softc *softc;
struct ctl_be_block_lun *be_lun = (struct ctl_be_block_lun *)context;
struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
union ctl_io *io;
be_lun = (struct ctl_be_block_lun *)context;
softc = be_lun->softc;
struct ctl_be_block_io *beio;
DPRINTF("entered\n");
mtx_lock(&be_lun->queue_lock);
/*
* Fetch and process I/Os from all queues. If we detect LUN
* CTL_LUN_FLAG_OFFLINE status here -- it is result of a race,
* so make response maximally opaque to not confuse initiator.
*/
for (;;) {
mtx_lock(&be_lun->queue_lock);
io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue);
if (io != NULL) {
struct ctl_be_block_io *beio;
DPRINTF("datamove queue\n");
STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr,
ctl_io_hdr, links);
mtx_unlock(&be_lun->queue_lock);
beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
if (cbe_lun->flags & CTL_LUN_FLAG_OFFLINE) {
ctl_set_busy(&io->scsiio);
ctl_complete_beio(beio);
return;
}
be_lun->dispatch(be_lun, beio);
mtx_lock(&be_lun->queue_lock);
continue;
}
io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue);
@ -1608,8 +1649,12 @@ ctl_be_block_worker(void *context, int pending)
STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr,
ctl_io_hdr, links);
mtx_unlock(&be_lun->queue_lock);
if (cbe_lun->flags & CTL_LUN_FLAG_OFFLINE) {
ctl_set_busy(&io->scsiio);
ctl_config_write_done(io);
return;
}
ctl_be_block_cw_dispatch(be_lun, io);
mtx_lock(&be_lun->queue_lock);
continue;
}
io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_read_queue);
@ -1618,25 +1663,26 @@ ctl_be_block_worker(void *context, int pending)
STAILQ_REMOVE(&be_lun->config_read_queue, &io->io_hdr,
ctl_io_hdr, links);
mtx_unlock(&be_lun->queue_lock);
if (cbe_lun->flags & CTL_LUN_FLAG_OFFLINE) {
ctl_set_busy(&io->scsiio);
ctl_config_read_done(io);
return;
}
ctl_be_block_cr_dispatch(be_lun, io);
mtx_lock(&be_lun->queue_lock);
continue;
}
io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue);
if (io != NULL) {
DPRINTF("input queue\n");
STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr,
ctl_io_hdr, links);
mtx_unlock(&be_lun->queue_lock);
/*
* We must drop the lock, since this routine and
* its children may sleep.
*/
if (cbe_lun->flags & CTL_LUN_FLAG_OFFLINE) {
ctl_set_busy(&io->scsiio);
ctl_data_submit_done(io);
return;
}
ctl_be_block_dispatch(be_lun, io);
mtx_lock(&be_lun->queue_lock);
continue;
}
@ -1644,9 +1690,9 @@ ctl_be_block_worker(void *context, int pending)
* If we get here, there is no work left in the queues, so
* just break out and let the task queue go to sleep.
*/
mtx_unlock(&be_lun->queue_lock);
break;
}
mtx_unlock(&be_lun->queue_lock);
}
/*
@ -1849,22 +1895,19 @@ ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
{
struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
struct ctl_lun_create_params *params;
struct vattr vattr;
struct cdevsw *csw;
struct cdev *dev;
struct cdevsw *devsw;
char *value;
int error, atomic, maxio, unmap, tmp;
int error, atomic, maxio, ref, unmap, tmp;
off_t ps, pss, po, pos, us, uss, uo, uos, otmp;
params = &be_lun->params;
be_lun->dev_type = CTL_BE_BLOCK_DEV;
be_lun->backend.dev.cdev = be_lun->vn->v_rdev;
be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev,
&be_lun->backend.dev.dev_ref);
if (be_lun->backend.dev.csw == NULL)
panic("Unable to retrieve device switch");
if (strcmp(be_lun->backend.dev.csw->d_name, "zvol") == 0) {
csw = devvn_refthread(be_lun->vn, &dev, &ref);
if (csw == NULL)
return (ENXIO);
if (strcmp(csw->d_name, "zvol") == 0) {
be_lun->dispatch = ctl_be_block_dispatch_zvol;
be_lun->get_lba_status = ctl_be_block_gls_zvol;
atomic = maxio = CTLBLK_MAX_IO_SIZE;
@ -1872,7 +1915,7 @@ ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
be_lun->dispatch = ctl_be_block_dispatch_dev;
be_lun->get_lba_status = NULL;
atomic = 0;
maxio = be_lun->backend.dev.cdev->si_iosize_max;
maxio = dev->si_iosize_max;
if (maxio <= 0)
maxio = DFLTPHYS;
if (maxio > CTLBLK_MAX_IO_SIZE)
@ -1882,26 +1925,17 @@ ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
be_lun->getattr = ctl_be_block_getattr_dev;
be_lun->unmap = ctl_be_block_unmap_dev;
error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED);
if (error) {
if (!csw->d_ioctl) {
dev_relthread(dev, ref);
snprintf(req->error_str, sizeof(req->error_str),
"error getting vnode attributes for device %s",
be_lun->dev_path);
return (error);
}
dev = be_lun->vn->v_rdev;
devsw = dev->si_devsw;
if (!devsw->d_ioctl) {
snprintf(req->error_str, sizeof(req->error_str),
"no d_ioctl for device %s!",
be_lun->dev_path);
"no d_ioctl for device %s!", be_lun->dev_path);
return (ENODEV);
}
error = devsw->d_ioctl(dev, DIOCGSECTORSIZE, (caddr_t)&tmp, FREAD,
error = csw->d_ioctl(dev, DIOCGSECTORSIZE, (caddr_t)&tmp, FREAD,
curthread);
if (error) {
dev_relthread(dev, ref);
snprintf(req->error_str, sizeof(req->error_str),
"error %d returned for DIOCGSECTORSIZE ioctl "
"on %s!", error, be_lun->dev_path);
@ -1919,14 +1953,15 @@ ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
if (params->blocksize_bytes % tmp == 0) {
cbe_lun->blocksize = params->blocksize_bytes;
} else {
dev_relthread(dev, ref);
snprintf(req->error_str, sizeof(req->error_str),
"requested blocksize %u is not an even "
"multiple of backing device blocksize %u",
params->blocksize_bytes, tmp);
return (EINVAL);
}
} else if (params->blocksize_bytes != 0) {
dev_relthread(dev, ref);
snprintf(req->error_str, sizeof(req->error_str),
"requested blocksize %u < backing device "
"blocksize %u", params->blocksize_bytes, tmp);
@ -1934,9 +1969,10 @@ ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
} else
cbe_lun->blocksize = tmp;
error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, (caddr_t)&otmp, FREAD,
curthread);
error = csw->d_ioctl(dev, DIOCGMEDIASIZE, (caddr_t)&otmp, FREAD,
curthread);
if (error) {
dev_relthread(dev, ref);
snprintf(req->error_str, sizeof(req->error_str),
"error %d returned for DIOCGMEDIASIZE "
" ioctl on %s!", error,
@ -1946,6 +1982,7 @@ ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
if (params->lun_size_bytes != 0) {
if (params->lun_size_bytes > otmp) {
dev_relthread(dev, ref);
snprintf(req->error_str, sizeof(req->error_str),
"requested LUN size %ju > backing device "
"size %ju",
@ -1961,13 +1998,13 @@ ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
cbe_lun->maxlba = (be_lun->size_blocks == 0) ?
0 : (be_lun->size_blocks - 1);
error = devsw->d_ioctl(dev, DIOCGSTRIPESIZE,
(caddr_t)&ps, FREAD, curthread);
error = csw->d_ioctl(dev, DIOCGSTRIPESIZE, (caddr_t)&ps, FREAD,
curthread);
if (error)
ps = po = 0;
else {
error = devsw->d_ioctl(dev, DIOCGSTRIPEOFFSET,
(caddr_t)&po, FREAD, curthread);
error = csw->d_ioctl(dev, DIOCGSTRIPEOFFSET, (caddr_t)&po,
FREAD, curthread);
if (error)
po = 0;
}
@ -2012,8 +2049,8 @@ ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
strlcpy(arg.name, "GEOM::candelete", sizeof(arg.name));
arg.len = sizeof(arg.value.i);
error = devsw->d_ioctl(dev, DIOCGATTR,
(caddr_t)&arg, FREAD, curthread);
error = csw->d_ioctl(dev, DIOCGATTR, (caddr_t)&arg, FREAD,
curthread);
unmap = (error == 0) ? arg.value.i : 0;
}
value = ctl_get_opt(&cbe_lun->options, "unmap");
@ -2024,6 +2061,7 @@ ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
else
cbe_lun->flags &= ~CTL_LUN_FLAG_UNMAP;
dev_relthread(dev, ref);
return (0);
}
@ -2034,24 +2072,6 @@ ctl_be_block_close(struct ctl_be_block_lun *be_lun)
int flags;
if (be_lun->vn) {
switch (be_lun->dev_type) {
case CTL_BE_BLOCK_DEV:
if (be_lun->backend.dev.csw) {
dev_relthread(be_lun->backend.dev.cdev,
be_lun->backend.dev.dev_ref);
be_lun->backend.dev.csw = NULL;
be_lun->backend.dev.cdev = NULL;
}
break;
case CTL_BE_BLOCK_FILE:
break;
case CTL_BE_BLOCK_NONE:
break;
default:
panic("Unexpected backend type.");
break;
}
flags = FREAD;
if ((cbe_lun->flags & CTL_LUN_FLAG_READONLY) == 0)
flags |= FWRITE;
@ -2214,7 +2234,13 @@ ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
else
cbe_lun->lun_type = T_DIRECT;
be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED;
cbe_lun->flags = CTL_LUN_FLAG_PRIMARY;
cbe_lun->flags = 0;
value = ctl_get_opt(&cbe_lun->options, "ha_role");
if (value != NULL) {
if (strcmp(value, "primary") == 0)
cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
} else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
if (cbe_lun->lun_type == T_DIRECT) {
be_lun->size_bytes = params->lun_size_bytes;
@ -2226,10 +2252,13 @@ ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
cbe_lun->maxlba = (be_lun->size_blocks == 0) ?
0 : (be_lun->size_blocks - 1);
retval = ctl_be_block_open(softc, be_lun, req);
if (retval != 0) {
retval = 0;
req->status = CTL_LUN_WARNING;
if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) ||
control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) {
retval = ctl_be_block_open(softc, be_lun, req);
if (retval != 0) {
retval = 0;
req->status = CTL_LUN_WARNING;
}
}
num_threads = cbb_num_threads;
} else {
@ -2418,6 +2447,7 @@ ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
{
struct ctl_lun_rm_params *params;
struct ctl_be_block_lun *be_lun;
struct ctl_be_lun *cbe_lun;
int retval;
params = &req->reqdata.rm;
@ -2435,18 +2465,24 @@ ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
params->lun_id);
goto bailout_error;
}
cbe_lun = &be_lun->cbe_lun;
retval = ctl_disable_lun(&be_lun->cbe_lun);
retval = ctl_disable_lun(cbe_lun);
if (retval != 0) {
snprintf(req->error_str, sizeof(req->error_str),
"error %d returned from ctl_disable_lun() for "
"LUN %d", retval, params->lun_id);
goto bailout_error;
}
retval = ctl_invalidate_lun(&be_lun->cbe_lun);
if (be_lun->vn != NULL) {
cbe_lun->flags |= CTL_LUN_FLAG_OFFLINE;
ctl_lun_offline(cbe_lun);
taskqueue_drain_all(be_lun->io_taskqueue);
ctl_be_block_close(be_lun);
}
retval = ctl_invalidate_lun(cbe_lun);
if (retval != 0) {
snprintf(req->error_str, sizeof(req->error_str),
"error %d returned from ctl_invalidate_lun() for "
@ -2455,15 +2491,12 @@ ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
}
mtx_lock(&softc->lock);
be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING;
while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) {
retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0);
if (retval == EINTR)
break;
}
be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING;
if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) {
@ -2478,18 +2511,15 @@ ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
softc->num_luns--;
mtx_unlock(&softc->lock);
taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task);
taskqueue_drain_all(be_lun->io_taskqueue);
taskqueue_free(be_lun->io_taskqueue);
ctl_be_block_close(be_lun);
if (be_lun->disk_stats != NULL)
devstat_remove_entry(be_lun->disk_stats);
uma_zdestroy(be_lun->lun_zone);
ctl_free_opts(&be_lun->cbe_lun.options);
ctl_free_opts(&cbe_lun->options);
free(be_lun->dev_path, M_CTLBLK);
mtx_destroy(&be_lun->queue_lock);
mtx_destroy(&be_lun->io_lock);
@ -2540,21 +2570,25 @@ ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun,
struct ctl_lun_req *req)
{
struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
struct ctl_be_block_devdata *dev_data;
int error;
struct ctl_lun_create_params *params = &be_lun->params;
struct cdevsw *csw;
struct cdev *dev;
uint64_t size_bytes;
int error, ref;
dev_data = &be_lun->backend.dev;
if (!dev_data->csw->d_ioctl) {
csw = devvn_refthread(be_lun->vn, &dev, &ref);
if (csw == NULL)
return (ENXIO);
if (csw->d_ioctl == NULL) {
dev_relthread(dev, ref);
snprintf(req->error_str, sizeof(req->error_str),
"no d_ioctl for device %s!", be_lun->dev_path);
return (ENODEV);
}
error = dev_data->csw->d_ioctl(dev_data->cdev, DIOCGMEDIASIZE,
(caddr_t)&size_bytes, FREAD,
curthread);
error = csw->d_ioctl(dev, DIOCGMEDIASIZE, (caddr_t)&size_bytes, FREAD,
curthread);
dev_relthread(dev, ref);
if (error) {
snprintf(req->error_str, sizeof(req->error_str),
"error %d returned for DIOCGMEDIASIZE ioctl "
@ -2587,8 +2621,9 @@ ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
struct ctl_lun_modify_params *params;
struct ctl_be_block_lun *be_lun;
struct ctl_be_lun *cbe_lun;
char *value;
uint64_t oldsize;
int error;
int error, wasprim;
params = &req->reqdata.modify;
@ -2611,23 +2646,51 @@ ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
be_lun->params.lun_size_bytes = params->lun_size_bytes;
ctl_update_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
oldsize = be_lun->size_blocks;
if (be_lun->vn == NULL)
error = ctl_be_block_open(softc, be_lun, req);
else if (vn_isdisk(be_lun->vn, &error))
error = ctl_be_block_modify_dev(be_lun, req);
else if (be_lun->vn->v_type == VREG)
error = ctl_be_block_modify_file(be_lun, req);
wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY);
value = ctl_get_opt(&cbe_lun->options, "ha_role");
if (value != NULL) {
if (strcmp(value, "primary") == 0)
cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
else
cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
} else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
else
error = EINVAL;
cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) {
if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)
ctl_lun_primary(cbe_lun);
else
ctl_lun_secondary(cbe_lun);
}
oldsize = be_lun->size_blocks;
if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) ||
control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) {
if (be_lun->vn == NULL)
error = ctl_be_block_open(softc, be_lun, req);
else if (vn_isdisk(be_lun->vn, &error))
error = ctl_be_block_modify_dev(be_lun, req);
else if (be_lun->vn->v_type == VREG)
error = ctl_be_block_modify_file(be_lun, req);
else
error = EINVAL;
if ((cbe_lun->flags & CTL_LUN_FLAG_OFFLINE) &&
be_lun->vn != NULL) {
cbe_lun->flags &= ~CTL_LUN_FLAG_OFFLINE;
ctl_lun_online(cbe_lun);
}
} else {
if (be_lun->vn != NULL) {
cbe_lun->flags |= CTL_LUN_FLAG_OFFLINE;
ctl_lun_offline(cbe_lun);
taskqueue_drain_all(be_lun->io_taskqueue);
error = ctl_be_block_close(be_lun);
} else
error = 0;
}
if (be_lun->size_blocks != oldsize)
ctl_lun_capacity_changed(cbe_lun);
if ((cbe_lun->flags & CTL_LUN_FLAG_OFFLINE) &&
be_lun->vn != NULL) {
cbe_lun->flags &= ~CTL_LUN_FLAG_OFFLINE;
ctl_lun_online(cbe_lun);
}
/* Tell the user the exact size we ended up using */
params->lun_size_bytes = be_lun->size_bytes;

View File

@ -56,14 +56,18 @@ __FBSDID("$FreeBSD$");
#include <sys/conf.h>
#include <sys/ioccom.h>
#include <sys/module.h>
#include <sys/sysctl.h>
#include <cam/scsi/scsi_all.h>
#include <cam/scsi/scsi_da.h>
#include <cam/ctl/ctl_io.h>
#include <cam/ctl/ctl.h>
#include <cam/ctl/ctl_util.h>
#include <cam/ctl/ctl_backend.h>
#include <cam/ctl/ctl_debug.h>
#include <cam/ctl/ctl_ioctl.h>
#include <cam/ctl/ctl_ha.h>
#include <cam/ctl/ctl_private.h>
#include <cam/ctl/ctl_error.h>
typedef enum {
@ -101,6 +105,7 @@ struct ctl_be_ramdisk_softc {
};
static struct ctl_be_ramdisk_softc rd_softc;
extern struct ctl_softc *control_softc;
int ctl_backend_ramdisk_init(void);
void ctl_backend_ramdisk_shutdown(void);
@ -502,7 +507,7 @@ ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
mtx_unlock(&softc->lock);
if (retval == 0) {
taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task);
taskqueue_drain_all(be_lun->io_taskqueue);
taskqueue_free(be_lun->io_taskqueue);
ctl_free_opts(&be_lun->cbe_lun.options);
mtx_destroy(&be_lun->queue_lock);
@ -546,7 +551,13 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
else
cbe_lun->lun_type = T_DIRECT;
be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
cbe_lun->flags = CTL_LUN_FLAG_PRIMARY;
cbe_lun->flags = 0;
value = ctl_get_opt(&cbe_lun->options, "ha_role");
if (value != NULL) {
if (strcmp(value, "primary") == 0)
cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
} else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
if (cbe_lun->lun_type == T_DIRECT) {
if (params->blocksize_bytes != 0)
@ -717,7 +728,9 @@ ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
struct ctl_be_ramdisk_lun *be_lun;
struct ctl_be_lun *cbe_lun;
struct ctl_lun_modify_params *params;
char *value;
uint32_t blocksize;
int wasprim;
params = &req->reqdata.modify;
@ -739,15 +752,32 @@ ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
if (params->lun_size_bytes != 0)
be_lun->params.lun_size_bytes = params->lun_size_bytes;
ctl_update_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
blocksize = be_lun->cbe_lun.blocksize;
wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY);
value = ctl_get_opt(&cbe_lun->options, "ha_role");
if (value != NULL) {
if (strcmp(value, "primary") == 0)
cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
else
cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
} else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
else
cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) {
if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)
ctl_lun_primary(cbe_lun);
else
ctl_lun_secondary(cbe_lun);
}
blocksize = be_lun->cbe_lun.blocksize;
if (be_lun->params.lun_size_bytes < blocksize) {
snprintf(req->error_str, sizeof(req->error_str),
"%s: LUN size %ju < blocksize %u", __func__,
be_lun->params.lun_size_bytes, blocksize);
goto bailout_error;
}
be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize;
be_lun->size_bytes = be_lun->size_blocks * blocksize;
be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1;

View File

@ -69,8 +69,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5e[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@ -81,8 +80,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5e[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@ -93,8 +91,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5e[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@ -105,8 +102,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5e[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@ -123,8 +119,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5f[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@ -135,8 +130,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5f[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@ -147,8 +141,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5f[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@ -159,8 +152,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5f[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@ -171,8 +163,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5f[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@ -183,8 +174,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5f[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@ -195,8 +185,7 @@ const struct ctl_cmd_entry ctl_cmd_table_5f[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@ -339,7 +328,7 @@ const struct ctl_cmd_entry ctl_cmd_table_84[32] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@ -436,7 +425,6 @@ const struct ctl_cmd_entry ctl_cmd_table_9e[32] =
{ctl_read_capacity_16, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_SLUN |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_READCAP,
@ -493,8 +481,8 @@ const struct ctl_cmd_entry ctl_cmd_table_a3[32] =
{ctl_report_tagret_port_groups, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_CMD_FLAG_OK_ON_UNAVAIL |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@ -507,8 +495,8 @@ const struct ctl_cmd_entry ctl_cmd_table_a3[32] =
{ctl_report_supported_opcodes, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_CMD_FLAG_OK_ON_UNAVAIL |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@ -518,8 +506,8 @@ const struct ctl_cmd_entry ctl_cmd_table_a3[32] =
{ctl_report_supported_tmf, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_CMD_FLAG_OK_ON_UNAVAIL |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@ -532,8 +520,8 @@ const struct ctl_cmd_entry ctl_cmd_table_a3[32] =
{ctl_report_timestamp, CTL_SERIDX_INQ, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_CMD_FLAG_OK_ON_UNAVAIL |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@ -563,8 +551,8 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
CTL_CMD_FLAG_NO_SENSE |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_CMD_FLAG_OK_ON_UNAVAIL |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE, 6, {0x01, 0, 0, 0xff, 0x07}},
@ -624,8 +612,8 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
CTL_CMD_FLAG_NO_SENSE |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_CMD_FLAG_OK_ON_UNAVAIL |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE, 6, {0xe1, 0xff, 0xff, 0xff, 0x07}},
@ -640,8 +628,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
{ctl_mode_select, CTL_SERIDX_MD_SEL, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT,
CTL_LUN_PAT_NONE, 6, {0x11, 0, 0, 0xff, 0x07}},
@ -650,8 +637,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT,
CTL_LUN_PAT_NONE, 6, {0, 0, 0, 0, 0x07}},
@ -660,8 +646,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_NONE,
CTL_LUN_PAT_NONE, 6, {0, 0, 0, 0, 0x07}},
@ -675,8 +660,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
{ctl_mode_sense, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
CTL_LUN_PAT_NONE, 6, {0x08, 0xff, 0xff, 0xff, 0x07}},
@ -685,7 +669,6 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
{ctl_start_stop, CTL_SERIDX_START, CTL_CMD_FLAG_OK_ON_SLUN |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_FLAG_DATA_NONE |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE, 6, {0x01, 0, 0, 0x03, 0x07}},
@ -721,7 +704,6 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
{ctl_read_capacity, CTL_SERIDX_RD_CAP, CTL_CMD_FLAG_OK_ON_SLUN|
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_READCAP, 10, {0, 0, 0, 0, 0, 0, 0, 0, 0x07}},
@ -812,7 +794,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
{ctl_write_buffer, CTL_SERIDX_MD_SEL, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT,
CTL_LUN_PAT_NONE,
10, {0x1f, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07}},
@ -821,7 +803,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
{ctl_read_buffer, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
CTL_LUN_PAT_NONE,
@ -911,8 +893,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
{ctl_mode_select, CTL_SERIDX_MD_SEL, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT,
CTL_LUN_PAT_NONE, 10, {0x11, 0, 0, 0, 0, 0, 0xff, 0xff, 0x07} },
@ -921,8 +902,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT,
CTL_LUN_PAT_NONE, 10, {0x02, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0x07} },
@ -931,8 +911,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_OUT,
CTL_LUN_PAT_NONE, 10, {0x02, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0x07} },
@ -946,8 +925,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
{ctl_mode_sense, CTL_SERIDX_MD_SNS, CTL_CMD_FLAG_OK_ON_BOTH |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_WRESV,
CTL_LUN_PAT_NONE, 10, {0x18, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0x07} },
@ -1199,8 +1177,8 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
CTL_CMD_FLAG_NO_SENSE |
CTL_CMD_FLAG_OK_ON_STOPPED |
CTL_CMD_FLAG_OK_ON_INOPERABLE |
CTL_CMD_FLAG_OK_ON_OFFLINE |
CTL_CMD_FLAG_OK_ON_SECONDARY |
CTL_CMD_FLAG_OK_ON_STANDBY |
CTL_CMD_FLAG_OK_ON_UNAVAIL |
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
@ -1315,33 +1293,17 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
/* BF VOLUME SET OUT */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
/* C0 - ISC_SEND_MSG_SHORT */
//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_NONE,
CTL_LUN_PAT_NONE,
16, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
/* C0 */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
/* C1 - ISC_SEND_MSG */
//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_OUT,
CTL_LUN_PAT_NONE,
16, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
/* C1 */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
/* C2 - ISC_WRITE */
//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_OUT,
CTL_LUN_PAT_NONE,
16, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
/* C2 */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
/* C3 - ISC_READ */
//{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE},
{ctl_isc, CTL_SERIDX_READ, CTL_CMD_FLAG_OK_ON_PROC | CTL_FLAG_DATA_IN,
CTL_LUN_PAT_NONE,
16, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
/* C3 */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
/* C4 */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},

View File

@ -722,6 +722,18 @@ ctl_set_illegal_pr_release(struct ctl_scsiio *ctsio)
SSD_ELEM_NONE);
}
void
ctl_set_lun_transit(struct ctl_scsiio *ctsio)
{
/* "Logical unit not ready, asymmetric access state transition" */
ctl_set_sense(ctsio,
/*current_error*/ 1,
/*sense_key*/ SSD_KEY_NOT_READY,
/*asc*/ 0x04,
/*ascq*/ 0x0a,
SSD_ELEM_NONE);
}
void
ctl_set_lun_standby(struct ctl_scsiio *ctsio)
{
@ -734,6 +746,18 @@ ctl_set_lun_standby(struct ctl_scsiio *ctsio)
SSD_ELEM_NONE);
}
void
ctl_set_lun_unavail(struct ctl_scsiio *ctsio)
{
/* "Logical unit not ready, target port in unavailable state" */
ctl_set_sense(ctsio,
/*current_error*/ 1,
/*sense_key*/ SSD_KEY_NOT_READY,
/*asc*/ 0x04,
/*ascq*/ 0x0c,
SSD_ELEM_NONE);
}
void
ctl_set_medium_format_corrupted(struct ctl_scsiio *ctsio)
{

View File

@ -67,7 +67,9 @@ void ctl_set_invalid_opcode(struct ctl_scsiio *ctsio);
void ctl_set_param_len_error(struct ctl_scsiio *ctsio);
void ctl_set_already_locked(struct ctl_scsiio *ctsio);
void ctl_set_unsupported_lun(struct ctl_scsiio *ctsio);
void ctl_set_lun_transit(struct ctl_scsiio *ctsio);
void ctl_set_lun_standby(struct ctl_scsiio *ctsio);
void ctl_set_lun_unavail(struct ctl_scsiio *ctsio);
void ctl_set_internal_failure(struct ctl_scsiio *ctsio, int sks_valid,
uint16_t retry_count);
void ctl_set_medium_error(struct ctl_scsiio *ctsio);

View File

@ -140,6 +140,7 @@ int
ctl_port_register(struct ctl_port *port)
{
struct ctl_softc *softc = control_softc;
struct ctl_port *tport, *nport;
void *pool;
int port_num;
int retval;
@ -149,10 +150,13 @@ ctl_port_register(struct ctl_port *port)
KASSERT(softc != NULL, ("CTL is not initialized"));
mtx_lock(&softc->ctl_lock);
port_num = ctl_ffz(softc->ctl_port_mask, CTL_MAX_PORTS);
if ((port_num == -1)
|| (ctl_set_mask(softc->ctl_port_mask, port_num) == -1)) {
port->targ_port = -1;
if (port->targ_port >= 0)
port_num = port->targ_port;
else
port_num = ctl_ffz(softc->ctl_port_mask,
softc->port_min, softc->port_max);
if ((port_num < 0) ||
(ctl_set_mask(softc->ctl_port_mask, port_num) < 0)) {
mtx_unlock(&softc->ctl_lock);
return (1);
}
@ -195,10 +199,17 @@ error:
STAILQ_INIT(&port->options);
mtx_lock(&softc->ctl_lock);
port->targ_port = port_num + softc->port_offset;
port->targ_port = port_num;
STAILQ_INSERT_TAIL(&port->frontend->port_list, port, fe_links);
STAILQ_INSERT_TAIL(&softc->port_list, port, links);
softc->ctl_ports[port_num] = port;
for (tport = NULL, nport = STAILQ_FIRST(&softc->port_list);
nport != NULL && nport->targ_port < port_num;
tport = nport, nport = STAILQ_NEXT(tport, links)) {
}
if (tport)
STAILQ_INSERT_AFTER(&softc->port_list, tport, port, links);
else
STAILQ_INSERT_HEAD(&softc->port_list, port, links);
softc->ctl_ports[port->targ_port] = port;
mtx_unlock(&softc->ctl_lock);
return (retval);
@ -209,7 +220,7 @@ ctl_port_deregister(struct ctl_port *port)
{
struct ctl_softc *softc = control_softc;
struct ctl_io_pool *pool;
int port_num, retval, i;
int retval, i;
retval = 0;
@ -224,10 +235,8 @@ ctl_port_deregister(struct ctl_port *port)
STAILQ_REMOVE(&softc->port_list, port, ctl_port, links);
STAILQ_REMOVE(&port->frontend->port_list, port, ctl_port, fe_links);
softc->num_ports--;
port_num = (port->targ_port < CTL_MAX_PORTS) ? port->targ_port :
port->targ_port - CTL_MAX_PORTS;
ctl_clear_mask(softc->ctl_port_mask, port_num);
softc->ctl_ports[port_num] = NULL;
ctl_clear_mask(softc->ctl_port_mask, port->targ_port);
softc->ctl_ports[port->targ_port] = NULL;
mtx_unlock(&softc->ctl_lock);
ctl_pool_free(pool);
@ -321,6 +330,7 @@ ctl_port_online(struct ctl_port *port)
port->port_online(port->onoff_arg);
/* XXX KDM need a lock here? */
port->status |= CTL_PORT_STATUS_ONLINE;
ctl_isc_announce_port(port);
}
void
@ -347,6 +357,7 @@ ctl_port_offline(struct ctl_port *port)
}
/* XXX KDM need a lock here? */
port->status &= ~CTL_PORT_STATUS_ONLINE;
ctl_isc_announce_port(port);
}
/*

View File

@ -125,12 +125,12 @@ struct ctl_wwpn_iid {
* port_online(): This function is called, with onoff_arg as its
* argument, by the CTL layer when it wants the FETD
* to start responding to selections on the specified
* target ID. (targ_target)
* target ID.
*
* port_offline(): This function is called, with onoff_arg as its
* argument, by the CTL layer when it wants the FETD
* to stop responding to selection on the specified
* target ID. (targ_target)
* target ID.
*
* onoff_arg: This is supplied as an argument to port_online()
* and port_offline(). This is specified by the

View File

@ -157,6 +157,7 @@ cfcs_init(void)
/* XXX These should probably be fetched from CTL. */
port->max_targets = 1;
port->max_target_id = 15;
port->targ_port = -1;
retval = ctl_port_register(port);
if (retval != 0) {
@ -546,12 +547,8 @@ cfcs_action(struct cam_sim *sim, union ccb *ccb)
* down via the XPT_RESET_BUS/LUN CCBs below.
*/
io->io_hdr.io_type = CTL_IO_SCSI;
io->io_hdr.nexus.initid.id = 1;
io->io_hdr.nexus.initid = 1;
io->io_hdr.nexus.targ_port = softc->port.targ_port;
/*
* XXX KDM how do we handle target IDs?
*/
io->io_hdr.nexus.targ_target.id = ccb->ccb_h.target_id;
io->io_hdr.nexus.targ_lun = ccb->ccb_h.target_lun;
/*
* This tag scheme isn't the best, since we could in theory
@ -639,9 +636,8 @@ cfcs_action(struct cam_sim *sim, union ccb *ccb)
ccb->ccb_h.io_ptr = io;
io->io_hdr.io_type = CTL_IO_TASK;
io->io_hdr.nexus.initid.id = 1;
io->io_hdr.nexus.initid = 1;
io->io_hdr.nexus.targ_port = softc->port.targ_port;
io->io_hdr.nexus.targ_target.id = ccb->ccb_h.target_id;
io->io_hdr.nexus.targ_lun = ccb->ccb_h.target_lun;
io->taskio.task_action = CTL_TASK_ABORT_TASK;
io->taskio.tag_num = abort_ccb->csio.tag_id;
@ -735,9 +731,8 @@ cfcs_action(struct cam_sim *sim, union ccb *ccb)
ccb->ccb_h.io_ptr = io;
io->io_hdr.io_type = CTL_IO_TASK;
io->io_hdr.nexus.initid.id = 0;
io->io_hdr.nexus.initid = 1;
io->io_hdr.nexus.targ_port = softc->port.targ_port;
io->io_hdr.nexus.targ_target.id = ccb->ccb_h.target_id;
io->io_hdr.nexus.targ_lun = ccb->ccb_h.target_lun;
if (ccb->ccb_h.func_code == XPT_RESET_BUS)
io->taskio.task_action = CTL_TASK_BUS_RESET;

View File

@ -93,6 +93,7 @@ cfi_init(void)
port->fe_done = cfi_done;
port->max_targets = 1;
port->max_target_id = 0;
port->targ_port = -1;
port->max_initiators = 1;
if (ctl_port_register(port) != 0) {

View File

@ -564,9 +564,8 @@ cfiscsi_pdu_handle_scsi_command(struct icl_pdu *request)
ctl_zero_io(io);
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = request;
io->io_hdr.io_type = CTL_IO_SCSI;
io->io_hdr.nexus.initid.id = cs->cs_ctl_initid;
io->io_hdr.nexus.initid = cs->cs_ctl_initid;
io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port;
io->io_hdr.nexus.targ_target.id = 0;
io->io_hdr.nexus.targ_lun = cfiscsi_decode_lun(bhssc->bhssc_lun);
io->scsiio.tag_num = bhssc->bhssc_initiator_task_tag;
switch ((bhssc->bhssc_flags & BHSSC_FLAGS_ATTR)) {
@ -621,9 +620,8 @@ cfiscsi_pdu_handle_task_request(struct icl_pdu *request)
ctl_zero_io(io);
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = request;
io->io_hdr.io_type = CTL_IO_TASK;
io->io_hdr.nexus.initid.id = cs->cs_ctl_initid;
io->io_hdr.nexus.initid = cs->cs_ctl_initid;
io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port;
io->io_hdr.nexus.targ_target.id = 0;
io->io_hdr.nexus.targ_lun = cfiscsi_decode_lun(bhstmr->bhstmr_lun);
io->taskio.tag_type = CTL_TAG_SIMPLE; /* XXX */
@ -1120,9 +1118,8 @@ cfiscsi_session_terminate_tasks(struct cfiscsi_session *cs)
ctl_zero_io(io);
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = cs;
io->io_hdr.io_type = CTL_IO_TASK;
io->io_hdr.nexus.initid.id = cs->cs_ctl_initid;
io->io_hdr.nexus.initid = cs->cs_ctl_initid;
io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port;
io->io_hdr.nexus.targ_target.id = 0;
io->io_hdr.nexus.targ_lun = 0;
io->taskio.tag_type = CTL_TAG_SIMPLE; /* XXX */
io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET;
@ -2105,6 +2102,7 @@ cfiscsi_ioctl_port_create(struct ctl_req *req)
/* XXX These should probably be fetched from CTL. */
port->max_targets = 1;
port->max_target_id = 15;
port->targ_port = -1;
port->options = opts;
STAILQ_INIT(&opts);

958
sys/cam/ctl/ctl_ha.c Normal file
View File

@ -0,0 +1,958 @@
/*-
* Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification, immediately at the beginning of the file.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
#include <sys/types.h>
#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/condvar.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/proc.h>
#include <sys/conf.h>
#include <sys/queue.h>
#include <sys/sysctl.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/uio.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <vm/uma.h>
#include <cam/cam.h>
#include <cam/scsi/scsi_all.h>
#include <cam/scsi/scsi_da.h>
#include <cam/ctl/ctl_io.h>
#include <cam/ctl/ctl.h>
#include <cam/ctl/ctl_frontend.h>
#include <cam/ctl/ctl_util.h>
#include <cam/ctl/ctl_backend.h>
#include <cam/ctl/ctl_ioctl.h>
#include <cam/ctl/ctl_ha.h>
#include <cam/ctl/ctl_private.h>
#include <cam/ctl/ctl_debug.h>
#include <cam/ctl/ctl_error.h>
#if (__FreeBSD_version < 1100000)
struct mbufq {
struct mbuf *head;
struct mbuf *tail;
};
static void
mbufq_init(struct mbufq *q, int limit)
{
q->head = q->tail = NULL;
}
static void
mbufq_drain(struct mbufq *q)
{
struct mbuf *m;
while ((m = q->head) != NULL) {
q->head = m->m_nextpkt;
m_freem(m);
}
q->tail = NULL;
}
static struct mbuf *
mbufq_dequeue(struct mbufq *q)
{
struct mbuf *m;
m = q->head;
if (m) {
if (q->tail == m)
q->tail = NULL;
q->head = m->m_nextpkt;
m->m_nextpkt = NULL;
}
return (m);
}
static void
mbufq_enqueue(struct mbufq *q, struct mbuf *m)
{
m->m_nextpkt = NULL;
if (q->tail)
q->tail->m_nextpkt = m;
else
q->head = m;
q->tail = m;
}
static u_int
sbavail(struct sockbuf *sb)
{
return (sb->sb_cc);
}
#if (__FreeBSD_version < 1000000)
#define mtodo(m, o) ((void *)(((m)->m_data) + (o)))
#endif
#endif
struct ha_msg_wire {
uint32_t channel;
uint32_t length;
};
struct ha_dt_msg_wire {
ctl_ha_dt_cmd command;
uint32_t size;
uint8_t *local;
uint8_t *remote;
};
struct ha_softc {
struct ctl_softc *ha_ctl_softc;
ctl_evt_handler ha_handler[CTL_HA_CHAN_MAX];
char ha_peer[128];
struct sockaddr_in ha_peer_in;
struct socket *ha_lso;
struct socket *ha_so;
struct mbufq ha_sendq;
struct mbuf *ha_sending;
struct mtx ha_lock;
int ha_connect;
int ha_listen;
int ha_connected;
int ha_receiving;
int ha_wakeup;
int ha_disconnect;
TAILQ_HEAD(, ctl_ha_dt_req) ha_dts;
} ha_softc;
extern struct ctl_softc *control_softc;
static void
ctl_ha_conn_wake(struct ha_softc *softc)
{
mtx_lock(&softc->ha_lock);
softc->ha_wakeup = 1;
mtx_unlock(&softc->ha_lock);
wakeup(&softc->ha_wakeup);
}
static int
ctl_ha_lupcall(struct socket *so, void *arg, int waitflag)
{
struct ha_softc *softc = arg;
ctl_ha_conn_wake(softc);
return (SU_OK);
}
static int
ctl_ha_rupcall(struct socket *so, void *arg, int waitflag)
{
struct ha_softc *softc = arg;
wakeup(&softc->ha_receiving);
return (SU_OK);
}
static int
ctl_ha_supcall(struct socket *so, void *arg, int waitflag)
{
struct ha_softc *softc = arg;
ctl_ha_conn_wake(softc);
return (SU_OK);
}
static void
ctl_ha_evt(struct ha_softc *softc, ctl_ha_channel ch, ctl_ha_event evt,
int param)
{
int i;
if (ch < CTL_HA_CHAN_MAX) {
if (softc->ha_handler[ch])
softc->ha_handler[ch](ch, evt, param);
return;
}
for (i = 0; i < CTL_HA_CHAN_MAX; i++) {
if (softc->ha_handler[i])
softc->ha_handler[i](i, evt, param);
}
}
static void
ctl_ha_close(struct ha_softc *softc)
{
struct socket *so = softc->ha_so;
int report = 0;
if (softc->ha_connected || softc->ha_disconnect) {
softc->ha_connected = 0;
mbufq_drain(&softc->ha_sendq);
m_freem(softc->ha_sending);
softc->ha_sending = NULL;
report = 1;
}
if (so) {
SOCKBUF_LOCK(&so->so_rcv);
soupcall_clear(so, SO_RCV);
while (softc->ha_receiving) {
wakeup(&softc->ha_receiving);
msleep(&softc->ha_receiving, SOCKBUF_MTX(&so->so_rcv),
0, "ha_rx exit", 0);
}
SOCKBUF_UNLOCK(&so->so_rcv);
SOCKBUF_LOCK(&so->so_snd);
soupcall_clear(so, SO_SND);
SOCKBUF_UNLOCK(&so->so_snd);
softc->ha_so = NULL;
if (softc->ha_connect)
pause("reconnect", hz / 2);
soclose(so);
}
if (report) {
ctl_ha_evt(softc, CTL_HA_CHAN_MAX, CTL_HA_EVT_LINK_CHANGE,
(softc->ha_connect || softc->ha_listen) ?
CTL_HA_LINK_UNKNOWN : CTL_HA_LINK_OFFLINE);
}
}
static void
ctl_ha_lclose(struct ha_softc *softc)
{
if (softc->ha_lso) {
SOCKBUF_LOCK(&softc->ha_lso->so_rcv);
soupcall_clear(softc->ha_lso, SO_RCV);
SOCKBUF_UNLOCK(&softc->ha_lso->so_rcv);
soclose(softc->ha_lso);
softc->ha_lso = NULL;
}
}
static void
ctl_ha_rx_thread(void *arg)
{
struct ha_softc *softc = arg;
struct socket *so = softc->ha_so;
struct ha_msg_wire wire_hdr;
struct uio uio;
struct iovec iov;
int error, flags, next;
bzero(&wire_hdr, sizeof(wire_hdr));
while (1) {
if (wire_hdr.length > 0)
next = wire_hdr.length;
else
next = sizeof(wire_hdr);
SOCKBUF_LOCK(&so->so_rcv);
while (sbavail(&so->so_rcv) < next) {
if (softc->ha_connected == 0 || so->so_error ||
(so->so_rcv.sb_state & SBS_CANTRCVMORE)) {
goto errout;
}
so->so_rcv.sb_lowat = next;
msleep(&softc->ha_receiving, SOCKBUF_MTX(&so->so_rcv),
0, "-", 0);
}
SOCKBUF_UNLOCK(&so->so_rcv);
if (wire_hdr.length == 0) {
iov.iov_base = &wire_hdr;
iov.iov_len = sizeof(wire_hdr);
uio.uio_iov = &iov;
uio.uio_iovcnt = 1;
uio.uio_rw = UIO_READ;
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_td = curthread;
uio.uio_resid = sizeof(wire_hdr);
flags = MSG_DONTWAIT;
error = soreceive(softc->ha_so, NULL, &uio, NULL,
NULL, &flags);
if (error != 0) {
printf("%s: header receive error %d\n",
__func__, error);
SOCKBUF_LOCK(&so->so_rcv);
goto errout;
}
} else {
ctl_ha_evt(softc, wire_hdr.channel,
CTL_HA_EVT_MSG_RECV, wire_hdr.length);
wire_hdr.length = 0;
}
}
errout:
softc->ha_receiving = 0;
wakeup(&softc->ha_receiving);
SOCKBUF_UNLOCK(&so->so_rcv);
ctl_ha_conn_wake(softc);
kthread_exit();
}
static void
ctl_ha_send(struct ha_softc *softc)
{
struct socket *so = softc->ha_so;
int error;
while (1) {
if (softc->ha_sending == NULL) {
mtx_lock(&softc->ha_lock);
softc->ha_sending = mbufq_dequeue(&softc->ha_sendq);
mtx_unlock(&softc->ha_lock);
if (softc->ha_sending == NULL) {
so->so_snd.sb_lowat = so->so_snd.sb_hiwat + 1;
break;
}
}
SOCKBUF_LOCK(&so->so_snd);
if (sbspace(&so->so_snd) < softc->ha_sending->m_pkthdr.len) {
so->so_snd.sb_lowat = softc->ha_sending->m_pkthdr.len;
SOCKBUF_UNLOCK(&so->so_snd);
break;
}
SOCKBUF_UNLOCK(&so->so_snd);
error = sosend(softc->ha_so, NULL, NULL, softc->ha_sending,
NULL, MSG_DONTWAIT, curthread);
softc->ha_sending = NULL;
if (error != 0) {
printf("%s: sosend() error %d\n", __func__, error);
return;
}
};
}
static void
ctl_ha_sock_setup(struct ha_softc *softc)
{
struct sockopt opt;
struct socket *so = softc->ha_so;
int error, val;
val = 1024 * 1024;
error = soreserve(so, val, val);
if (error)
printf("%s: soreserve failed %d\n", __func__, error);
SOCKBUF_LOCK(&so->so_rcv);
so->so_rcv.sb_lowat = sizeof(struct ha_msg_wire);
soupcall_set(so, SO_RCV, ctl_ha_rupcall, softc);
SOCKBUF_UNLOCK(&so->so_rcv);
SOCKBUF_LOCK(&so->so_snd);
so->so_snd.sb_lowat = sizeof(struct ha_msg_wire);
soupcall_set(so, SO_SND, ctl_ha_supcall, softc);
SOCKBUF_UNLOCK(&so->so_snd);
bzero(&opt, sizeof(struct sockopt));
opt.sopt_dir = SOPT_SET;
opt.sopt_level = SOL_SOCKET;
opt.sopt_name = SO_KEEPALIVE;
opt.sopt_val = &val;
opt.sopt_valsize = sizeof(val);
val = 1;
error = sosetopt(so, &opt);
if (error)
printf("%s: KEEPALIVE setting failed %d\n", __func__, error);
opt.sopt_level = IPPROTO_TCP;
opt.sopt_name = TCP_NODELAY;
val = 1;
error = sosetopt(so, &opt);
if (error)
printf("%s: NODELAY setting failed %d\n", __func__, error);
opt.sopt_name = TCP_KEEPINIT;
val = 3;
error = sosetopt(so, &opt);
if (error)
printf("%s: KEEPINIT setting failed %d\n", __func__, error);
opt.sopt_name = TCP_KEEPIDLE;
val = 1;
error = sosetopt(so, &opt);
if (error)
printf("%s: KEEPIDLE setting failed %d\n", __func__, error);
opt.sopt_name = TCP_KEEPINTVL;
val = 1;
error = sosetopt(so, &opt);
if (error)
printf("%s: KEEPINTVL setting failed %d\n", __func__, error);
opt.sopt_name = TCP_KEEPCNT;
val = 5;
error = sosetopt(so, &opt);
if (error)
printf("%s: KEEPCNT setting failed %d\n", __func__, error);
}
static int
ctl_ha_connect(struct ha_softc *softc)
{
struct thread *td = curthread;
struct socket *so;
int error;
/* Create the socket */
error = socreate(PF_INET, &so, SOCK_STREAM,
IPPROTO_TCP, td->td_ucred, td);
if (error != 0) {
printf("%s: socreate() error %d\n", __func__, error);
return (error);
}
softc->ha_so = so;
ctl_ha_sock_setup(softc);
error = soconnect(so, (struct sockaddr *)&softc->ha_peer_in, td);
if (error != 0) {
printf("%s: soconnect() error %d\n", __func__, error);
goto out;
}
return (0);
out:
ctl_ha_close(softc);
return (error);
}
static int
ctl_ha_accept(struct ha_softc *softc)
{
struct socket *so;
struct sockaddr *sap;
int error;
ACCEPT_LOCK();
if (softc->ha_lso->so_rcv.sb_state & SBS_CANTRCVMORE)
softc->ha_lso->so_error = ECONNABORTED;
if (softc->ha_lso->so_error) {
error = softc->ha_lso->so_error;
softc->ha_lso->so_error = 0;
ACCEPT_UNLOCK();
printf("%s: socket error %d\n", __func__, error);
goto out;
}
so = TAILQ_FIRST(&softc->ha_lso->so_comp);
if (so == NULL) {
ACCEPT_UNLOCK();
return (EWOULDBLOCK);
}
KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP"));
KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP"));
/*
* Before changing the flags on the socket, we have to bump the
* reference count. Otherwise, if the protocol calls sofree(),
* the socket will be released due to a zero refcount.
*/
SOCK_LOCK(so); /* soref() and so_state update */
soref(so); /* file descriptor reference */
TAILQ_REMOVE(&softc->ha_lso->so_comp, so, so_list);
softc->ha_lso->so_qlen--;
so->so_state |= SS_NBIO;
so->so_qstate &= ~SQ_COMP;
so->so_head = NULL;
SOCK_UNLOCK(so);
ACCEPT_UNLOCK();
sap = NULL;
error = soaccept(so, &sap);
if (error != 0) {
printf("%s: soaccept() error %d\n", __func__, error);
if (sap != NULL)
free(sap, M_SONAME);
goto out;
}
if (sap != NULL)
free(sap, M_SONAME);
softc->ha_so = so;
ctl_ha_sock_setup(softc);
return (0);
out:
ctl_ha_lclose(softc);
return (error);
}
static int
ctl_ha_listen(struct ha_softc *softc)
{
struct thread *td = curthread;
struct sockopt opt;
int error, val;
/* Create the socket */
if (softc->ha_lso == NULL) {
error = socreate(PF_INET, &softc->ha_lso, SOCK_STREAM,
IPPROTO_TCP, td->td_ucred, td);
if (error != 0) {
printf("%s: socreate() error %d\n", __func__, error);
return (error);
}
bzero(&opt, sizeof(struct sockopt));
opt.sopt_dir = SOPT_SET;
opt.sopt_level = SOL_SOCKET;
opt.sopt_name = SO_REUSEADDR;
opt.sopt_val = &val;
opt.sopt_valsize = sizeof(val);
val = 1;
error = sosetopt(softc->ha_lso, &opt);
if (error) {
printf("%s: REUSEADDR setting failed %d\n",
__func__, error);
}
SOCKBUF_LOCK(&softc->ha_lso->so_rcv);
soupcall_set(softc->ha_lso, SO_RCV, ctl_ha_lupcall, softc);
SOCKBUF_UNLOCK(&softc->ha_lso->so_rcv);
}
error = sobind(softc->ha_lso, (struct sockaddr *)&softc->ha_peer_in, td);
if (error != 0) {
printf("%s: sobind() error %d\n", __func__, error);
goto out;
}
error = solisten(softc->ha_lso, 1, td);
if (error != 0) {
printf("%s: solisten() error %d\n", __func__, error);
goto out;
}
return (0);
out:
ctl_ha_lclose(softc);
return (error);
}
static void
ctl_ha_conn_thread(void *arg)
{
struct ha_softc *softc = arg;
int error;
while (1) {
if (softc->ha_disconnect) {
ctl_ha_close(softc);
ctl_ha_lclose(softc);
softc->ha_disconnect = 0;
} else if (softc->ha_so != NULL &&
(softc->ha_so->so_error ||
softc->ha_so->so_rcv.sb_state & SBS_CANTRCVMORE))
ctl_ha_close(softc);
if (softc->ha_so == NULL) {
if (softc->ha_lso != NULL)
ctl_ha_accept(softc);
else if (softc->ha_listen)
ctl_ha_listen(softc);
else if (softc->ha_connect)
ctl_ha_connect(softc);
}
if (softc->ha_so != NULL) {
if (softc->ha_connected == 0 &&
softc->ha_so->so_error == 0 &&
(softc->ha_so->so_state & SS_ISCONNECTING) == 0) {
softc->ha_connected = 1;
ctl_ha_evt(softc, CTL_HA_CHAN_MAX,
CTL_HA_EVT_LINK_CHANGE,
CTL_HA_LINK_ONLINE);
softc->ha_receiving = 1;
error = kproc_kthread_add(ctl_ha_rx_thread,
softc, &softc->ha_ctl_softc->ctl_proc,
NULL, 0, 0, "ctl", "ha_rx");
if (error != 0) {
printf("Error creating CTL HA rx thread!\n");
softc->ha_receiving = 0;
softc->ha_disconnect = 1;
}
}
ctl_ha_send(softc);
}
mtx_lock(&softc->ha_lock);
if (softc->ha_so != NULL &&
(softc->ha_so->so_error ||
softc->ha_so->so_rcv.sb_state & SBS_CANTRCVMORE))
;
else if (!softc->ha_wakeup)
msleep(&softc->ha_wakeup, &softc->ha_lock, 0, "-", hz);
softc->ha_wakeup = 0;
mtx_unlock(&softc->ha_lock);
}
}
static int
ctl_ha_peer_sysctl(SYSCTL_HANDLER_ARGS)
{
struct ha_softc *softc = (struct ha_softc *)arg1;
struct sockaddr_in *sa;
int error, b1, b2, b3, b4, p, num;
error = sysctl_handle_string(oidp, softc->ha_peer,
sizeof(softc->ha_peer), req);
if ((error != 0) || (req->newptr == NULL))
return (error);
sa = &softc->ha_peer_in;
mtx_lock(&softc->ha_lock);
if ((num = sscanf(softc->ha_peer, "connect %d.%d.%d.%d:%d",
&b1, &b2, &b3, &b4, &p)) >= 4) {
softc->ha_connect = 1;
softc->ha_listen = 0;
} else if ((num = sscanf(softc->ha_peer, "listen %d.%d.%d.%d:%d",
&b1, &b2, &b3, &b4, &p)) >= 4) {
softc->ha_connect = 0;
softc->ha_listen = 1;
} else {
softc->ha_connect = 0;
softc->ha_listen = 0;
if (softc->ha_peer[0] != 0)
error = EINVAL;
}
if (softc->ha_connect || softc->ha_listen) {
memset(sa, 0, sizeof(*sa));
sa->sin_len = sizeof(struct sockaddr_in);
sa->sin_family = AF_INET;
sa->sin_port = htons((num >= 5) ? p : 999);
sa->sin_addr.s_addr =
htonl((b1 << 24) + (b2 << 16) + (b3 << 8) + b4);
}
softc->ha_disconnect = 1;
softc->ha_wakeup = 1;
mtx_unlock(&softc->ha_lock);
wakeup(&softc->ha_wakeup);
return (error);
}
ctl_ha_status
ctl_ha_msg_register(ctl_ha_channel channel, ctl_evt_handler handler)
{
struct ha_softc *softc = &ha_softc;
KASSERT(channel < CTL_HA_CHAN_MAX,
("Wrong CTL HA channel %d", channel));
softc->ha_handler[channel] = handler;
return (CTL_HA_STATUS_SUCCESS);
}
ctl_ha_status
ctl_ha_msg_deregister(ctl_ha_channel channel)
{
struct ha_softc *softc = &ha_softc;
KASSERT(channel < CTL_HA_CHAN_MAX,
("Wrong CTL HA channel %d", channel));
softc->ha_handler[channel] = NULL;
return (CTL_HA_STATUS_SUCCESS);
}
/*
* Receive a message of the specified size.
*/
ctl_ha_status
ctl_ha_msg_recv(ctl_ha_channel channel, void *addr, size_t len,
int wait)
{
struct ha_softc *softc = &ha_softc;
struct uio uio;
struct iovec iov;
int error, flags;
if (!softc->ha_connected)
return (CTL_HA_STATUS_DISCONNECT);
iov.iov_base = addr;
iov.iov_len = len;
uio.uio_iov = &iov;
uio.uio_iovcnt = 1;
uio.uio_rw = UIO_READ;
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_td = curthread;
uio.uio_resid = len;
flags = wait ? 0 : MSG_DONTWAIT;
error = soreceive(softc->ha_so, NULL, &uio, NULL, NULL, &flags);
if (error == 0)
return (CTL_HA_STATUS_SUCCESS);
/* Consider all errors fatal for HA sanity. */
mtx_lock(&softc->ha_lock);
if (softc->ha_connected) {
softc->ha_disconnect = 1;
softc->ha_wakeup = 1;
wakeup(&softc->ha_wakeup);
}
mtx_unlock(&softc->ha_lock);
return (CTL_HA_STATUS_ERROR);
}
/*
* Send a message of the specified size.
*/
ctl_ha_status
ctl_ha_msg_send2(ctl_ha_channel channel, const void *addr, size_t len,
const void *addr2, size_t len2, int wait)
{
struct ha_softc *softc = &ha_softc;
struct mbuf *mb, *newmb;
struct ha_msg_wire hdr;
size_t copylen, off;
if (!softc->ha_connected)
return (CTL_HA_STATUS_DISCONNECT);
newmb = m_getm2(NULL, sizeof(hdr) + len + len2, wait, MT_DATA,
M_PKTHDR);
if (newmb == NULL) {
/* Consider all errors fatal for HA sanity. */
mtx_lock(&softc->ha_lock);
if (softc->ha_connected) {
softc->ha_disconnect = 1;
softc->ha_wakeup = 1;
wakeup(&softc->ha_wakeup);
}
mtx_unlock(&softc->ha_lock);
printf("%s: Can't allocate mbuf chain\n", __func__);
return (CTL_HA_STATUS_ERROR);
}
hdr.channel = channel;
hdr.length = len + len2;
mb = newmb;
memcpy(mtodo(mb, 0), &hdr, sizeof(hdr));
mb->m_len += sizeof(hdr);
off = 0;
for (; mb != NULL && off < len; mb = mb->m_next) {
copylen = min(M_TRAILINGSPACE(mb), len - off);
memcpy(mtodo(mb, mb->m_len), (const char *)addr + off, copylen);
mb->m_len += copylen;
off += copylen;
if (off == len)
break;
}
KASSERT(off == len, ("%s: off (%zu) != len (%zu)", __func__,
off, len));
off = 0;
for (; mb != NULL && off < len2; mb = mb->m_next) {
copylen = min(M_TRAILINGSPACE(mb), len2 - off);
memcpy(mtodo(mb, mb->m_len), (const char *)addr2 + off, copylen);
mb->m_len += copylen;
off += copylen;
}
KASSERT(off == len2, ("%s: off (%zu) != len2 (%zu)", __func__,
off, len2));
newmb->m_pkthdr.len = sizeof(hdr) + len + len2;
mtx_lock(&softc->ha_lock);
if (!softc->ha_connected) {
mtx_unlock(&softc->ha_lock);
m_freem(newmb);
return (CTL_HA_STATUS_DISCONNECT);
}
mbufq_enqueue(&softc->ha_sendq, newmb);
softc->ha_wakeup = 1;
mtx_unlock(&softc->ha_lock);
wakeup(&softc->ha_wakeup);
return (CTL_HA_STATUS_SUCCESS);
}
ctl_ha_status
ctl_ha_msg_send(ctl_ha_channel channel, const void *addr, size_t len,
int wait)
{
return (ctl_ha_msg_send2(channel, addr, len, NULL, 0, wait));
}
/*
* Allocate a data transfer request structure.
*/
struct ctl_ha_dt_req *
ctl_dt_req_alloc(void)
{
return (malloc(sizeof(struct ctl_ha_dt_req), M_CTL, M_WAITOK | M_ZERO));
}
/*
* Free a data transfer request structure.
*/
void
ctl_dt_req_free(struct ctl_ha_dt_req *req)
{
free(req, M_CTL);
}
/*
* Issue a DMA request for a single buffer.
*/
ctl_ha_status
ctl_dt_single(struct ctl_ha_dt_req *req)
{
struct ha_softc *softc = &ha_softc;
struct ha_dt_msg_wire wire_dt;
ctl_ha_status status;
wire_dt.command = req->command;
wire_dt.size = req->size;
wire_dt.local = req->local;
wire_dt.remote = req->remote;
if (req->command == CTL_HA_DT_CMD_READ && req->callback != NULL) {
mtx_lock(&softc->ha_lock);
TAILQ_INSERT_TAIL(&softc->ha_dts, req, links);
mtx_unlock(&softc->ha_lock);
ctl_ha_msg_send(CTL_HA_CHAN_DATA, &wire_dt, sizeof(wire_dt),
M_WAITOK);
return (CTL_HA_STATUS_WAIT);
}
if (req->command == CTL_HA_DT_CMD_READ) {
status = ctl_ha_msg_send(CTL_HA_CHAN_DATA, &wire_dt,
sizeof(wire_dt), M_WAITOK);
} else {
status = ctl_ha_msg_send2(CTL_HA_CHAN_DATA, &wire_dt,
sizeof(wire_dt), req->local, req->size, M_WAITOK);
}
return (status);
}
static void
ctl_dt_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
{
struct ha_softc *softc = &ha_softc;
struct ctl_ha_dt_req *req;
ctl_ha_status isc_status;
if (event == CTL_HA_EVT_MSG_RECV) {
struct ha_dt_msg_wire wire_dt;
uint8_t *tmp;
int size;
size = min(sizeof(wire_dt), param);
isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_DATA, &wire_dt,
size, M_WAITOK);
if (isc_status != CTL_HA_STATUS_SUCCESS) {
printf("%s: Error receiving message: %d\n",
__func__, isc_status);
return;
}
if (wire_dt.command == CTL_HA_DT_CMD_READ) {
wire_dt.command = CTL_HA_DT_CMD_WRITE;
tmp = wire_dt.local;
wire_dt.local = wire_dt.remote;
wire_dt.remote = tmp;
ctl_ha_msg_send2(CTL_HA_CHAN_DATA, &wire_dt,
sizeof(wire_dt), wire_dt.local, wire_dt.size,
M_WAITOK);
} else if (wire_dt.command == CTL_HA_DT_CMD_WRITE) {
isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_DATA,
wire_dt.remote, wire_dt.size, M_WAITOK);
mtx_lock(&softc->ha_lock);
TAILQ_FOREACH(req, &softc->ha_dts, links) {
if (req->local == wire_dt.remote) {
TAILQ_REMOVE(&softc->ha_dts, req, links);
break;
}
}
mtx_unlock(&softc->ha_lock);
if (req) {
req->ret = isc_status;
req->callback(req);
}
}
} else if (event == CTL_HA_EVT_LINK_CHANGE) {
CTL_DEBUG_PRINT(("%s: Link state change to %d\n", __func__,
param));
if (param != CTL_HA_LINK_ONLINE) {
mtx_lock(&softc->ha_lock);
while ((req = TAILQ_FIRST(&softc->ha_dts)) != NULL) {
TAILQ_REMOVE(&softc->ha_dts, req, links);
mtx_unlock(&softc->ha_lock);
req->ret = CTL_HA_STATUS_DISCONNECT;
req->callback(req);
mtx_lock(&softc->ha_lock);
}
mtx_unlock(&softc->ha_lock);
}
} else {
printf("%s: Unknown event %d\n", __func__, event);
}
}
ctl_ha_status
ctl_ha_msg_init(struct ctl_softc *ctl_softc)
{
struct ha_softc *softc = &ha_softc;
int error;
softc->ha_ctl_softc = ctl_softc;
mtx_init(&softc->ha_lock, "CTL HA mutex", NULL, MTX_DEF);
mbufq_init(&softc->ha_sendq, INT_MAX);
TAILQ_INIT(&softc->ha_dts);
error = kproc_kthread_add(ctl_ha_conn_thread, softc,
&ctl_softc->ctl_proc, NULL, 0, 0, "ctl", "ha_tx");
if (error != 0) {
printf("error creating CTL HA connection thread!\n");
mtx_destroy(&softc->ha_lock);
return (CTL_HA_STATUS_ERROR);
}
SYSCTL_ADD_PROC(&ctl_softc->sysctl_ctx,
SYSCTL_CHILDREN(ctl_softc->sysctl_tree),
OID_AUTO, "ha_peer", CTLTYPE_STRING | CTLFLAG_RWTUN,
softc, 0, ctl_ha_peer_sysctl, "A", "HA peer connection method");
if (ctl_ha_msg_register(CTL_HA_CHAN_DATA, ctl_dt_event_handler)
!= CTL_HA_STATUS_SUCCESS) {
printf("%s: ctl_ha_msg_register failed.\n", __func__);
}
return (CTL_HA_STATUS_SUCCESS);
};
ctl_ha_status
ctl_ha_msg_shutdown(struct ctl_softc *ctl_softc)
{
struct ha_softc *softc = &ha_softc;
if (ctl_ha_msg_deregister(CTL_HA_CHAN_DATA) != CTL_HA_STATUS_SUCCESS) {
printf("%s: ctl_ha_msg_deregister failed.\n", __func__);
}
mtx_destroy(&softc->ha_lock);
return (CTL_HA_STATUS_SUCCESS);
};

View File

@ -1,6 +1,7 @@
/*-
* Copyright (c) 2003-2009 Silicon Graphics International Corp.
* Copyright (c) 2011 Spectra Logic Corporation
* Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -38,80 +39,27 @@
/*
* CTL High Availability Modes:
*
* CTL_HA_MODE_ACT_STBY: One side is in Active state and processing commands,
* the other side is in Standby state, returning errors.
* CTL_HA_MODE_SER_ONLY: Commands are serialized to the other side. Write
* mirroring and read re-direction are assumed to
* happen in the back end.
* CTL_HA_MODE_XFER: Commands are serialized and data is transferred
* for write mirroring and read re-direction.
* CTL_HA_MODE_ACT_STBY: Commands are serialized to the master side.
* No media access commands on slave side (Standby).
* CTL_HA_MODE_SER_ONLY: Commands are serialized to the master side.
* Media can be accessed on both sides.
* CTL_HA_MODE_XFER: Commands and data are forwarded to the
* master side for execution.
*/
typedef enum {
CTL_HA_MODE_ACT_STBY,
CTL_HA_MODE_SER_ONLY,
CTL_HA_MODE_XFER
} ctl_ha_mode;
/*
* This is a stubbed out High Availability interface. It assumes two nodes
* staying in sync.
*
* The reason this interface is here, and stubbed out, is that CTL was
* originally written with support for Copan's (now SGI) high availability
* framework. That framework was not released by SGI, and would not have
* been generally applicable to FreeBSD anyway.
*
* The idea here is to show the kind of API that would need to be in place
* in a HA framework to work with CTL's HA hooks. This API is very close
* to the Copan/SGI API, so that the code using it could stay in place
* as-is.
*
* So, in summary, this is a shell without real substance, and much more
* work would be needed to actually make HA work. The implementation
* inside CTL will also need to change to fit the eventual implementation.
* The additional pieces we would need are:
*
* - HA "Supervisor" framework that can startup the components of the
* system, and initiate failover (i.e. active/active to single mode)
* and failback (single to active/active mode) state transitions.
* This framework would be able to recognize when an event happens
* that requires it to initiate state transitions in the components it
* manages.
*
* - HA communication framework. This framework should have the following
* features:
* - Separate channels for separate system components. The CTL
* instance on one node should communicate with the CTL instance
* on another node.
* - Short message passing. These messages would be fixed length, so
* they could be preallocated and easily passed between the nodes.
* i.e. conceptually like an ethernet packet.
* - DMA/large buffer capability. This would require some negotiation
* with the other node to define the destination. It could
* allow for "push" (i.e. initiated by the requesting node) DMA or
* "pull" (i.e. initiated by the target controller) DMA or both.
* - Communication channel status change notification.
* - HA capability in other portions of the storage stack. Having two CTL
* instances communicate is just one part of an overall HA solution.
* State needs to be synchronized at multiple levels of the system in
* order for failover to actually work. For instance, if CTL is using a
* file on a ZFS filesystem as its backing store, the ZFS array state
* should be synchronized with the other node, so that the other node
* can immediately take over if the node that is primary for a particular
* array fails.
*/
/*
* Communication channel IDs for various system components. This is to
* make sure one CTL instance talks with another, one ZFS instance talks
* with another, etc.
*/
typedef enum {
CTL_HA_CHAN_NONE,
CTL_HA_CHAN_CTL,
CTL_HA_CHAN_ZFS,
CTL_HA_CHAN_DATA,
CTL_HA_CHAN_MAX
} ctl_ha_channel;
@ -120,18 +68,12 @@ typedef enum {
* HA communication subsystem.
*
* CTL_HA_EVT_MSG_RECV: Message received by the other node.
* CTL_HA_EVT_MSG_SENT: Message sent to the other node.
* CTL_HA_EVT_DISCONNECT: Communication channel disconnected.
* CTL_HA_EVT_DMA_SENT: DMA successfully sent to other node (push).
* CTL_HA_EVT_DMA_RECEIVED: DMA successfully received by other node (pull).
* CTL_HA_EVT_LINK_CHANGE: Communication channel status changed.
*/
typedef enum {
CTL_HA_EVT_NONE,
CTL_HA_EVT_MSG_RECV,
CTL_HA_EVT_MSG_SENT,
CTL_HA_EVT_DISCONNECT,
CTL_HA_EVT_DMA_SENT,
CTL_HA_EVT_DMA_RECEIVED,
CTL_HA_EVT_LINK_CHANGE,
CTL_HA_EVT_MAX
} ctl_ha_event;
@ -145,12 +87,6 @@ typedef enum {
CTL_HA_STATUS_MAX
} ctl_ha_status;
typedef enum {
CTL_HA_DATA_CTL,
CTL_HA_DATA_ZFS,
CTL_HA_DATA_MAX
} ctl_ha_dtid;
typedef enum {
CTL_HA_DT_CMD_READ,
CTL_HA_DT_CMD_WRITE,
@ -164,110 +100,40 @@ struct ctl_ha_dt_req {
ctl_ha_dt_cmd command;
void *context;
ctl_ha_dt_cb callback;
ctl_ha_dtid id;
int ret;
uint32_t size;
uint8_t *local;
uint8_t *remote;
TAILQ_ENTRY(ctl_ha_dt_req) links;
};
struct ctl_softc;
ctl_ha_status ctl_ha_msg_init(struct ctl_softc *softc);
ctl_ha_status ctl_ha_msg_shutdown(struct ctl_softc *softc);
typedef void (*ctl_evt_handler)(ctl_ha_channel channel, ctl_ha_event event,
int param);
void ctl_ha_register_evthandler(ctl_ha_channel channel,
ctl_evt_handler handler);
static inline ctl_ha_status
ctl_ha_msg_create(ctl_ha_channel channel, ctl_evt_handler handler)
{
return (CTL_HA_STATUS_SUCCESS);
}
ctl_ha_status ctl_ha_msg_register(ctl_ha_channel channel,
ctl_evt_handler handler);
ctl_ha_status ctl_ha_msg_recv(ctl_ha_channel channel, void *addr,
size_t len, int wait);
ctl_ha_status ctl_ha_msg_send(ctl_ha_channel channel, const void *addr,
size_t len, int wait);
ctl_ha_status ctl_ha_msg_send2(ctl_ha_channel channel, const void *addr,
size_t len, const void *addr2, size_t len2, int wait);
ctl_ha_status ctl_ha_msg_deregister(ctl_ha_channel channel);
/*
* Receive a message of the specified size.
*/
static inline ctl_ha_status
ctl_ha_msg_recv(ctl_ha_channel channel, void *buffer, unsigned int size,
int wait)
{
return (CTL_HA_STATUS_SUCCESS);
}
/*
* Send a message of the specified size.
*/
static inline ctl_ha_status
ctl_ha_msg_send(ctl_ha_channel channel, void *buffer, unsigned int size,
int wait)
{
return (CTL_HA_STATUS_SUCCESS);
}
/*
* Allocate a data transfer request structure.
*/
static inline struct ctl_ha_dt_req *
ctl_dt_req_alloc(void)
{
return (NULL);
}
/*
* Free a data transfer request structure.
*/
static inline void
ctl_dt_req_free(struct ctl_ha_dt_req *req)
{
return;
}
/*
* Issue a DMA request for a single buffer.
*/
static inline ctl_ha_status
ctl_dt_single(struct ctl_ha_dt_req *req)
{
return (CTL_HA_STATUS_WAIT);
}
/*
* SINGLE: One node
* HA: Two nodes (Active/Active implied)
* SLAVE/MASTER: The component can set these flags to indicate which side
* is in control. It has no effect on the HA framework.
*/
typedef enum {
CTL_HA_STATE_UNKNOWN = 0x00,
CTL_HA_STATE_SINGLE = 0x01,
CTL_HA_STATE_HA = 0x02,
CTL_HA_STATE_MASK = 0x0F,
CTL_HA_STATE_SLAVE = 0x10,
CTL_HA_STATE_MASTER = 0x20
} ctl_ha_state;
struct ctl_ha_dt_req * ctl_dt_req_alloc(void);
void ctl_dt_req_free(struct ctl_ha_dt_req *req);
ctl_ha_status ctl_dt_single(struct ctl_ha_dt_req *req);
typedef enum {
CTL_HA_COMP_STATUS_OK,
CTL_HA_COMP_STATUS_FAILED,
CTL_HA_COMP_STATUS_ERROR
} ctl_ha_comp_status;
struct ctl_ha_component;
typedef ctl_ha_comp_status (*ctl_hacmp_init_t)(struct ctl_ha_component *);
typedef ctl_ha_comp_status (*ctl_hacmp_start_t)(struct ctl_ha_component *,
ctl_ha_state);
struct ctl_ha_component {
char *name;
ctl_ha_state state;
ctl_ha_comp_status status;
ctl_hacmp_init_t init;
ctl_hacmp_start_t start;
ctl_hacmp_init_t quiesce;
};
#define CTL_HA_STATE_IS_SINGLE(state) ((state & CTL_HA_STATE_MASK) == \
CTL_HA_STATE_SINGLE)
#define CTL_HA_STATE_IS_HA(state) ((state & CTL_HA_STATE_MASK) == \
CTL_HA_STATE_HA)
CTL_HA_LINK_OFFLINE = 0x00,
CTL_HA_LINK_UNKNOWN = 0x01,
CTL_HA_LINK_ONLINE = 0x02
} ctl_ha_link_state;
#endif /* _CTL_HA_H_ */

View File

@ -58,13 +58,12 @@ EXTERN(int ctl_time_io_secs, CTL_TIME_IO_DEFAULT_SECS);
#endif
/*
* Uncomment these next two lines to enable the CTL I/O delay feature. You
* Uncomment this next line to enable the CTL I/O delay feature. You
* can delay I/O at two different points -- datamove and done. This is
* useful for diagnosing abort conditions (for hosts that send an abort on a
* timeout), and for determining how long a host's timeout is.
*/
#define CTL_IO_DELAY
#define CTL_TIMER_BYTES sizeof(struct callout)
//#define CTL_IO_DELAY
typedef enum {
CTL_STATUS_NONE, /* No status */
@ -93,13 +92,11 @@ typedef enum {
CTL_FLAG_EDPTR_SGLIST = 0x00000010, /* ext_data_ptr is S/G list */
CTL_FLAG_DO_AUTOSENSE = 0x00000020, /* grab sense info */
CTL_FLAG_USER_REQ = 0x00000040, /* request came from userland */
CTL_FLAG_CONTROL_DEV = 0x00000080, /* processor device */
CTL_FLAG_ALLOCATED = 0x00000100, /* data space allocated */
CTL_FLAG_BLOCKED = 0x00000200, /* on the blocked queue */
CTL_FLAG_ABORT_STATUS = 0x00000400, /* return TASK ABORTED status */
CTL_FLAG_ABORT = 0x00000800, /* this I/O should be aborted */
CTL_FLAG_DMA_INPROG = 0x00001000, /* DMA in progress */
CTL_FLAG_NO_DATASYNC = 0x00002000, /* don't cache flush data */
CTL_FLAG_DELAY_DONE = 0x00004000, /* delay injection done */
CTL_FLAG_INT_COPY = 0x00008000, /* internal copy, no done call*/
CTL_FLAG_SENT_2OTHER_SC = 0x00010000,
@ -109,9 +106,6 @@ typedef enum {
addresses, not virtual ones*/
CTL_FLAG_IO_CONT = 0x00100000, /* Continue I/O instead of
completing */
CTL_FLAG_AUTO_MIRROR = 0x00200000, /* Automatically use memory
from the RC cache mirrored
address area. */
#if 0
CTL_FLAG_ALREADY_DONE = 0x00200000 /* I/O already completed */
#endif
@ -119,14 +113,8 @@ typedef enum {
CTL_FLAG_DMA_QUEUED = 0x00800000, /* DMA queued but not started*/
CTL_FLAG_STATUS_QUEUED = 0x01000000, /* Status queued but not sent*/
CTL_FLAG_REDIR_DONE = 0x02000000, /* Redirection has already
been done. */
CTL_FLAG_FAILOVER = 0x04000000, /* Killed by a failover */
CTL_FLAG_IO_ACTIVE = 0x08000000, /* I/O active on this SC */
CTL_FLAG_RDMA_MASK = CTL_FLAG_NO_DATASYNC | CTL_FLAG_BUS_ADDR |
CTL_FLAG_AUTO_MIRROR | CTL_FLAG_REDIR_DONE,
/* Flags we care about for
remote DMA */
CTL_FLAG_STATUS_SENT = 0x10000000 /* Status sent by datamove */
} ctl_io_flags;
@ -184,11 +172,6 @@ struct ctl_sg_entry {
size_t len;
};
struct ctl_id {
uint32_t id;
uint64_t wwid[2];
};
typedef enum {
CTL_IO_NONE,
CTL_IO_SCSI,
@ -196,9 +179,8 @@ typedef enum {
} ctl_io_type;
struct ctl_nexus {
struct ctl_id initid; /* Initiator ID */
uint32_t initid; /* Initiator ID */
uint32_t targ_port; /* Target port, filled in by PORT */
struct ctl_id targ_target; /* Destination target */
uint32_t targ_lun; /* Destination lun */
uint32_t targ_mapped_lun; /* Destination lun CTL-wide */
};
@ -210,15 +192,16 @@ typedef enum {
CTL_MSG_BAD_JUJU,
CTL_MSG_MANAGE_TASKS,
CTL_MSG_PERS_ACTION,
CTL_MSG_SYNC_FE,
CTL_MSG_DATAMOVE,
CTL_MSG_DATAMOVE_DONE
CTL_MSG_DATAMOVE_DONE,
CTL_MSG_UA, /* Set/clear UA on secondary. */
CTL_MSG_PORT_SYNC, /* Information about port. */
CTL_MSG_LUN_SYNC, /* Information about LUN. */
CTL_MSG_FAILOVER /* Fake, never sent though the wire */
} ctl_msg_type;
struct ctl_scsiio;
#define CTL_NUM_SG_ENTRIES 9
struct ctl_io_hdr {
uint32_t version; /* interface version XXX */
ctl_io_type io_type; /* task I/O, SCSI I/O, etc. */
@ -231,7 +214,7 @@ struct ctl_io_hdr {
uint32_t timeout; /* timeout in ms */
uint32_t retries; /* retry count */
#ifdef CTL_IO_DELAY
uint8_t timer_bytes[CTL_TIMER_BYTES]; /* timer kludge */
struct callout delay_callout;
#endif /* CTL_IO_DELAY */
#ifdef CTL_TIME_IO
time_t start_time; /* I/O start time */
@ -244,10 +227,8 @@ struct ctl_io_hdr {
union ctl_io *serializing_sc;
void *pool; /* I/O pool */
union ctl_priv ctl_private[CTL_NUM_PRIV];/* CTL private area */
struct ctl_sg_entry remote_sglist[CTL_NUM_SG_ENTRIES];
struct ctl_sg_entry remote_dma_sglist[CTL_NUM_SG_ENTRIES];
struct ctl_sg_entry local_sglist[CTL_NUM_SG_ENTRIES];
struct ctl_sg_entry local_dma_sglist[CTL_NUM_SG_ENTRIES];
struct ctl_sg_entry *remote_sglist;
struct ctl_sg_entry *local_sglist;
STAILQ_ENTRY(ctl_io_hdr) links; /* linked list pointer */
TAILQ_ENTRY(ctl_io_hdr) ooa_links;
TAILQ_ENTRY(ctl_io_hdr) blocked_links;
@ -393,10 +374,10 @@ struct ctl_ha_msg_hdr {
union ctl_io *serializing_sc;
struct ctl_nexus nexus; /* Initiator, port, target, lun */
uint32_t status; /* transaction status */
TAILQ_ENTRY(ctl_ha_msg_hdr) links;
};
#define CTL_HA_MAX_SG_ENTRIES 16
#define CTL_HA_DATAMOVE_SEGMENT 131072
/*
* Used for CTL_MSG_PERS_ACTION.
@ -406,6 +387,16 @@ struct ctl_ha_msg_pr {
struct ctl_pr_info pr_info;
};
/*
* Used for CTL_MSG_UA.
*/
struct ctl_ha_msg_ua {
struct ctl_ha_msg_hdr hdr;
int ua_all;
int ua_set;
int ua_type;
};
/*
* The S/G handling here is a little different than the standard ctl_scsiio
* structure, because we can't pass data by reference in between controllers.
@ -438,17 +429,18 @@ struct ctl_ha_msg_dt {
*/
struct ctl_ha_msg_scsi {
struct ctl_ha_msg_hdr hdr;
uint8_t cdb[CTL_MAX_CDBLEN]; /* CDB */
uint32_t tag_num; /* tag number */
ctl_tag_type tag_type; /* simple, ordered, etc. */
uint8_t cdb[CTL_MAX_CDBLEN]; /* CDB */
uint8_t cdb_len; /* CDB length */
uint8_t scsi_status; /* SCSI status byte */
struct scsi_sense_data sense_data; /* sense data */
uint8_t sense_len; /* Returned sense length */
uint8_t sense_residual; /* sense residual length */
uint32_t residual; /* data residual length */
uint32_t fetd_status; /* trans status, set by FETD,
0 = good*/
struct ctl_lba_len lbalen; /* used for stats */
struct scsi_sense_data sense_data; /* sense data */
};
/*
@ -461,12 +453,50 @@ struct ctl_ha_msg_task {
ctl_tag_type tag_type; /* simple, ordered, etc. */
};
/*
* Used for CTL_MSG_PORT_SYNC.
*/
struct ctl_ha_msg_port {
struct ctl_ha_msg_hdr hdr;
int port_type;
int physical_port;
int virtual_port;
int status;
int name_len;
int lun_map_len;
int port_devid_len;
int target_devid_len;
uint8_t data[];
};
/*
* Used for CTL_MSG_LUN_SYNC.
*/
struct ctl_ha_msg_lun {
struct ctl_ha_msg_hdr hdr;
int flags;
unsigned int pr_generation;
uint32_t pr_res_idx;
uint8_t pr_res_type;
int lun_devid_len;
int pr_key_count;
uint8_t data[];
};
struct ctl_ha_msg_lun_pr_key {
uint32_t pr_iid;
uint64_t pr_key;
};
union ctl_ha_msg {
struct ctl_ha_msg_hdr hdr;
struct ctl_ha_msg_task task;
struct ctl_ha_msg_scsi scsi;
struct ctl_ha_msg_dt dt;
struct ctl_ha_msg_pr pr;
struct ctl_ha_msg_ua ua;
struct ctl_ha_msg_port port;
struct ctl_ha_msg_lun lun;
};

View File

@ -86,7 +86,6 @@ typedef enum {
} ctl_ooa_status;
struct ctl_ooa_info {
uint32_t target_id; /* Passed in to CTL */
uint32_t lun_id; /* Passed in to CTL */
uint32_t num_entries; /* Returned from CTL */
ctl_ooa_status status; /* Returned from CTL */
@ -114,7 +113,6 @@ typedef enum {
} ctl_delay_status;
struct ctl_io_delay_info {
uint32_t target_id;
uint32_t lun_id;
ctl_delay_type delay_type;
ctl_delay_location delay_loc;
@ -133,7 +131,6 @@ typedef enum {
* means that we will let through every N SYNCHRONIZE CACHE commands.
*/
struct ctl_sync_info {
uint32_t target_id; /* passed to kernel */
uint32_t lun_id; /* passed to kernel */
int sync_interval; /* depends on whether get/set */
ctl_gs_sync_status status; /* passed from kernel */
@ -262,7 +259,6 @@ struct ctl_error_desc_cmd {
/*
* Error injection descriptor.
*
* target_id: Target ID to act on.
* lun_id LUN to act on.
* lun_error: The type of error to inject. See above for descriptions.
* error_pattern: What kind of command to act on. See above.
@ -273,7 +269,6 @@ struct ctl_error_desc_cmd {
* links: Kernel use only.
*/
struct ctl_error_desc {
uint32_t target_id; /* To kernel */
uint32_t lun_id; /* To kernel */
ctl_lun_error lun_error; /* To kernel */
ctl_lun_error_pattern error_pattern; /* To kernel */

View File

@ -106,8 +106,8 @@ typedef enum {
CTL_CMD_FLAG_OK_ON_BOTH = 0x0300,
CTL_CMD_FLAG_OK_ON_STOPPED = 0x0400,
CTL_CMD_FLAG_OK_ON_INOPERABLE = 0x0800,
CTL_CMD_FLAG_OK_ON_OFFLINE = 0x1000,
CTL_CMD_FLAG_OK_ON_SECONDARY = 0x2000,
CTL_CMD_FLAG_OK_ON_STANDBY = 0x1000,
CTL_CMD_FLAG_OK_ON_UNAVAIL = 0x2000,
CTL_CMD_FLAG_ALLOW_ON_PR_RESV = 0x4000,
CTL_CMD_FLAG_SA5 = 0x8000
} ctl_cmd_flags;
@ -157,7 +157,8 @@ typedef enum {
CTL_LUN_PR_RESERVED = 0x100,
CTL_LUN_PRIMARY_SC = 0x200,
CTL_LUN_SENSE_DESC = 0x400,
CTL_LUN_READONLY = 0x800
CTL_LUN_READONLY = 0x800,
CTL_LUN_PEER_SC_PRIMARY = 0x1000
} ctl_lun_flags;
typedef enum {
@ -398,7 +399,7 @@ struct ctl_lun {
struct ctl_lun_io_stats stats;
uint32_t res_idx;
unsigned int PRGeneration;
uint64_t *pr_keys[2 * CTL_MAX_PORTS];
uint64_t *pr_keys[CTL_MAX_PORTS];
int pr_key_count;
uint32_t pr_res_idx;
uint8_t res_type;
@ -434,11 +435,13 @@ struct ctl_softc {
ctl_gen_flags flags;
ctl_ha_mode ha_mode;
int ha_id;
int ha_state;
int is_single;
int port_offset;
int persis_offset;
int inquiry_pq_no_lun;
ctl_ha_link_state ha_link;
int port_min;
int port_max;
int port_cnt;
int init_min;
int init_max;
struct sysctl_ctx_list sysctl_ctx;
struct sysctl_oid *sysctl_tree;
void *othersc_pool;
@ -469,8 +472,6 @@ struct ctl_softc {
extern const struct ctl_cmd_entry ctl_cmd_table[256];
uint32_t ctl_get_initindex(struct ctl_nexus *nexus);
uint32_t ctl_get_resindex(struct ctl_nexus *nexus);
uint32_t ctl_port_idx(int port_num);
int ctl_lun_map_init(struct ctl_port *port);
int ctl_lun_map_deinit(struct ctl_port *port);
int ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun);
@ -508,7 +509,6 @@ int ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio);
int ctl_report_supported_opcodes(struct ctl_scsiio *ctsio);
int ctl_report_supported_tmf(struct ctl_scsiio *ctsio);
int ctl_report_timestamp(struct ctl_scsiio *ctsio);
int ctl_isc(struct ctl_scsiio *ctsio);
int ctl_get_lba_status(struct ctl_scsiio *ctsio);
void ctl_tpc_init(struct ctl_softc *softc);

View File

@ -114,7 +114,7 @@ ctl_scsi_path_string(union ctl_io *io, char *path_str, int len)
{
snprintf(path_str, len, "(%u:%u:%u/%u): ",
io->io_hdr.nexus.initid.id, io->io_hdr.nexus.targ_port,
io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port,
io->io_hdr.nexus.targ_lun, io->io_hdr.nexus.targ_mapped_lun);
}

View File

@ -534,7 +534,7 @@ ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
list_id = cdb->list_identifier;
mtx_lock(&lun->lun_lock);
list = tpc_find_list(lun, list_id,
ctl_get_resindex(&ctsio->io_hdr.nexus));
ctl_get_initindex(&ctsio->io_hdr.nexus));
if (list == NULL) {
mtx_unlock(&lun->lun_lock);
ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
@ -616,7 +616,7 @@ ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
list_id = cdb->list_identifier;
mtx_lock(&lun->lun_lock);
list = tpc_find_list(lun, list_id,
ctl_get_resindex(&ctsio->io_hdr.nexus));
ctl_get_initindex(&ctsio->io_hdr.nexus));
if (list == NULL || !list->completed) {
mtx_unlock(&lun->lun_lock);
ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
@ -688,7 +688,7 @@ ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
list_id = scsi_4btoul(cdb->list_identifier);
mtx_lock(&lun->lun_lock);
list = tpc_find_list(lun, list_id,
ctl_get_resindex(&ctsio->io_hdr.nexus));
ctl_get_initindex(&ctsio->io_hdr.nexus));
if (list == NULL) {
mtx_unlock(&lun->lun_lock);
ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
@ -771,7 +771,7 @@ ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
list_id = scsi_4btoul(cdb->list_identifier);
mtx_lock(&lun->lun_lock);
list = tpc_find_list(lun, list_id,
ctl_get_resindex(&ctsio->io_hdr.nexus));
ctl_get_initindex(&ctsio->io_hdr.nexus));
if (list == NULL) {
mtx_unlock(&lun->lun_lock);
ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
@ -1645,7 +1645,7 @@ ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
list->init_port = -1;
else
list->init_port = ctsio->io_hdr.nexus.targ_port;
list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
list->list_id = data->list_identifier;
list->flags = data->flags;
list->params = ctsio->kern_data_ptr;
@ -1772,7 +1772,7 @@ ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
list->init_port = -1;
else
list->init_port = ctsio->io_hdr.nexus.targ_port;
list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
list->list_id = scsi_4btoul(data->list_identifier);
list->flags = data->flags;
list->params = ctsio->kern_data_ptr;
@ -1890,7 +1890,7 @@ ctl_populate_token(struct ctl_scsiio *ctsio)
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
softc = lun->ctl_softc;
port = softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
port = softc->ctl_ports[ctsio->io_hdr.nexus.targ_port];
cdb = (struct scsi_populate_token *)ctsio->cdb;
len = scsi_4btoul(cdb->length);
@ -1944,7 +1944,7 @@ ctl_populate_token(struct ctl_scsiio *ctsio)
list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
list->service_action = cdb->service_action;
list->init_port = ctsio->io_hdr.nexus.targ_port;
list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
list->list_id = scsi_4btoul(cdb->list_identifier);
list->flags = data->flags;
list->ctsio = ctsio;
@ -2070,7 +2070,7 @@ ctl_write_using_token(struct ctl_scsiio *ctsio)
list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
list->service_action = cdb->service_action;
list->init_port = ctsio->io_hdr.nexus.targ_port;
list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
list->list_id = scsi_4btoul(cdb->list_identifier);
list->flags = data->flags;
list->params = ctsio->kern_data_ptr;
@ -2162,7 +2162,7 @@ ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
list_id = scsi_4btoul(cdb->list_identifier);
mtx_lock(&lun->lun_lock);
list = tpc_find_list(lun, list_id,
ctl_get_resindex(&ctsio->io_hdr.nexus));
ctl_get_initindex(&ctsio->io_hdr.nexus));
if (list == NULL) {
mtx_unlock(&lun->lun_lock);
ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,

View File

@ -97,11 +97,11 @@ tpcl_init(void)
port->fe_done = tpcl_done;
port->max_targets = 1;
port->max_target_id = 0;
port->targ_port = -1;
port->max_initiators = 1;
if (ctl_port_register(port) != 0)
{
printf("%s: tpc frontend registration failed\n", __func__);
if (ctl_port_register(port) != 0) {
printf("%s: ctl_port_register() failed with error\n", __func__);
return (0);
}
@ -287,7 +287,7 @@ tpcl_resolve(struct ctl_softc *softc, int init_port,
cscdid = (struct scsi_ec_cscd_id *)cscd;
mtx_lock(&softc->ctl_lock);
if (init_port >= 0)
port = softc->ctl_ports[ctl_port_idx(init_port)];
port = softc->ctl_ports[init_port];
else
port = NULL;
STAILQ_FOREACH(lun, &softc->lun_list, links) {
@ -328,9 +328,8 @@ tpcl_queue(union ctl_io *io, uint64_t lun)
{
struct tpcl_softc *tsoftc = &tpcl_softc;
io->io_hdr.nexus.initid.id = 0;
io->io_hdr.nexus.initid = 0;
io->io_hdr.nexus.targ_port = tsoftc->port.targ_port;
io->io_hdr.nexus.targ_target.id = 0;
io->io_hdr.nexus.targ_lun = lun;
io->scsiio.tag_num = atomic_fetchadd_int(&tsoftc->cur_tag_num, 1);
io->scsiio.ext_data_filled = 0;

View File

@ -679,7 +679,7 @@ ctl_scsi_maintenance_in(union ctl_io *io, uint8_t *data_ptr, uint32_t data_len,
#ifndef _KERNEL
union ctl_io *
ctl_scsi_alloc_io(struct ctl_id initid)
ctl_scsi_alloc_io(uint32_t initid)
{
union ctl_io *io;

View File

@ -94,7 +94,7 @@ void ctl_scsi_maintenance_in(union ctl_io *io, uint8_t *data_ptr,
uint32_t data_len, uint8_t action,
ctl_tag_type tag_type, uint8_t control);
#ifndef _KERNEL
union ctl_io *ctl_scsi_alloc_io(struct ctl_id initid);
union ctl_io *ctl_scsi_alloc_io(uint32_t initid);
void ctl_scsi_free_io(union ctl_io *io);
#endif /* !_KERNEL */
void ctl_scsi_zero_io(union ctl_io *io);

View File

@ -400,6 +400,7 @@ ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
*/
port->max_targets = cpi->max_target;
port->max_target_id = cpi->max_target;
port->targ_port = -1;
/*
* XXX KDM need to figure out whether we're the master or
@ -1164,9 +1165,8 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
* down the immediate notify path below.
*/
io->io_hdr.io_type = CTL_IO_SCSI;
io->io_hdr.nexus.initid.id = atio->init_id;
io->io_hdr.nexus.initid = atio->init_id;
io->io_hdr.nexus.targ_port = bus_softc->port.targ_port;
io->io_hdr.nexus.targ_target.id = atio->ccb_h.target_id;
io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun;
io->scsiio.tag_num = atio->tag_id;
switch (atio->tag_action) {
@ -1200,10 +1200,9 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
io->scsiio.cdb_len);
#ifdef CTLFEDEBUG
printf("%s: %ju:%d:%ju:%d: tag %04x CDB %02x\n", __func__,
(uintmax_t)io->io_hdr.nexus.initid.id,
printf("%s: %u:%u:%u: tag %04x CDB %02x\n", __func__,
io->io_hdr.nexus.initid,
io->io_hdr.nexus.targ_port,
(uintmax_t)io->io_hdr.nexus.targ_target.id,
io->io_hdr.nexus.targ_lun,
io->scsiio.tag_num, io->scsiio.cdb[0]);
#endif
@ -1440,9 +1439,8 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
io->io_hdr.io_type = CTL_IO_TASK;
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr =done_ccb;
inot->ccb_h.io_ptr = io;
io->io_hdr.nexus.initid.id = inot->initiator_id;
io->io_hdr.nexus.initid = inot->initiator_id;
io->io_hdr.nexus.targ_port = bus_softc->port.targ_port;
io->io_hdr.nexus.targ_target.id = inot->ccb_h.target_id;
io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun;
/* XXX KDM should this be the tag_id? */
io->taskio.tag_num = inot->seq_id;

View File

@ -23,7 +23,7 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2015, Joyent, Inc. All rights reserved.
* Copyright (c) 2012, 2014 by Delphix. All rights reserved.
*/
@ -2146,7 +2146,7 @@ retry:
* this hash chain, or another CPU is deleting an element from this
* hash chain. The simplest way to deal with both of these cases
* (though not necessarily the most efficient) is to free our
* allocated block and tail-call ourselves. Note that the free is
* allocated block and re-attempt it all. Note that the free is
* to the dirty list and _not_ to the free list. This is to prevent
* races with allocators, above.
*/
@ -2159,7 +2159,7 @@ retry:
dvar->dtdv_next = free;
} while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate));
goto top;
}
/*ARGSUSED*/

View File

@ -29,7 +29,7 @@
*/
/*
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2015, Joyent, Inc. All rights reserved.
*/
#include <sys/atomic.h>
@ -1190,11 +1190,21 @@ fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
mutex_enter(&pidlock);
p = prfind(probe->ftp_pid);
if (p == NULL) {
/*
* So it's not that the target process is being born,
* it's that it isn't there at all (and we simply
* happen to be forking). Anyway, we know that the
* target is definitely gone, so bail out.
*/
mutex_exit(&pidlock);
return (0);
}
/*
* Confirm that curproc is indeed forking the process in which
* we're trying to enable probes.
*/
ASSERT(p != NULL);
ASSERT(p->p_parent == curproc);
ASSERT(p->p_stat == SIDL);

View File

@ -440,7 +440,7 @@ dtrace_getarg(int arg, int aframes)
}
arg -= (inreg + 1);
stack = (uintptr_t *)fp + 2;
stack = (uintptr_t *)&fp[1];
load:
DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);

View File

@ -85,6 +85,7 @@ cam/ctl/ctl_frontend.c optional ctl
cam/ctl/ctl_frontend_cam_sim.c optional ctl
cam/ctl/ctl_frontend_ioctl.c optional ctl
cam/ctl/ctl_frontend_iscsi.c optional ctl
cam/ctl/ctl_ha.c optional ctl
cam/ctl/ctl_scsi_all.c optional ctl
cam/ctl/ctl_tpc.c optional ctl
cam/ctl/ctl_tpc_local.c optional ctl

View File

@ -130,7 +130,6 @@ amd64/amd64/pmap.c standard
amd64/amd64/prof_machdep.c optional profiling-routine
amd64/amd64/ptrace_machdep.c standard
amd64/amd64/sigtramp.S standard
amd64/amd64/stack_machdep.c optional ddb | stack
amd64/amd64/support.S standard
amd64/amd64/sys_machdep.c standard
amd64/amd64/trap.c standard
@ -612,6 +611,7 @@ x86/x86/mp_x86.c optional smp
x86/x86/msi.c optional pci
x86/x86/nexus.c standard
x86/x86/pvclock.c standard
x86/x86/stack_machdep.c optional ddb | stack
x86/x86/tsc.c standard
x86/x86/delay.c standard
x86/xen/hvm.c optional xenhvm

View File

@ -476,7 +476,6 @@ i386/i386/mpboot.s optional smp
i386/i386/perfmon.c optional perfmon
i386/i386/pmap.c standard
i386/i386/ptrace_machdep.c standard
i386/i386/stack_machdep.c optional ddb | stack
i386/i386/support.s standard
i386/i386/swtch.s standard
i386/i386/sys_machdep.c standard
@ -603,6 +602,7 @@ x86/x86/mptable_pci.c optional apic pci
x86/x86/mp_x86.c optional smp
x86/x86/msi.c optional apic pci
x86/x86/nexus.c standard
x86/x86/stack_machdep.c optional ddb | stack
x86/x86/tsc.c standard
x86/x86/pvclock.c standard
x86/x86/delay.c standard

View File

@ -174,7 +174,6 @@ i386/i386/mpboot.s optional smp
i386/i386/perfmon.c optional perfmon
i386/i386/pmap.c standard
i386/i386/ptrace_machdep.c standard
i386/i386/stack_machdep.c optional ddb | stack
i386/i386/support.s standard
i386/i386/swtch.s standard
i386/i386/sys_machdep.c standard
@ -274,5 +273,6 @@ x86/x86/mptable.c optional apic
x86/x86/mptable_pci.c optional apic pci
x86/x86/msi.c optional apic pci
x86/x86/nexus.c standard
x86/x86/stack_machdep.c optional ddb | stack
x86/x86/tsc.c standard
x86/x86/delay.c standard

View File

@ -947,11 +947,11 @@ RCTL opt_global.h
# Random number generator(s)
# Which CSPRNG hash we get.
# If Yarrow is not chosen, Fortuna is selected.
RANDOM_YARROW opt_random.h
RANDOM_YARROW opt_global.h
# With this, no entropy processor is loaded, but the entropy
# harvesting infrastructure is present. This means an entropy
# processor may be loaded as a module.
RANDOM_LOADABLE opt_random.h
RANDOM_LOADABLE opt_global.h
# This turns on high-rate and potentially expensive harvesting in
# the uma slab allocator.
RANDOM_ENABLE_UMA opt_global.h

View File

@ -1,4 +1,4 @@
/* $FreeBSD$ */
/* $FreeBSD$ */
/*
* Copyright (C) 2012 by Darren Reed.
@ -1054,7 +1054,7 @@ ipf_state_putent(softc, softs, data)
/* to pointers and adjusts running stats for the hash table as appropriate. */
/* */
/* This function can fail if the filter rule has had a population policy of */
/* IP addresses used with stateful filteirng assigned to it. */
/* IP addresses used with stateful filtering assigned to it. */
/* */
/* Locking: it is assumed that some kind of lock on ipf_state is held. */
/* Exits with is_lock initialised and held - *EVEN IF ERROR*. */
@ -1081,7 +1081,7 @@ ipf_state_insert(softc, is, rev)
}
/*
* If we could trust is_hv, then the modulous would not be needed,
* If we could trust is_hv, then the modulus would not be needed,
* but when running with IPFILTER_SYNC, this stops bad values.
*/
hv = is->is_hv % softs->ipf_state_size;
@ -1672,6 +1672,10 @@ ipf_state_add(softc, fin, stsave, flags)
SBUMPD(ipf_state_stats, iss_bucket_full);
return 4;
}
/*
* No existing state; create new
*/
KMALLOC(is, ipstate_t *);
if (is == NULL) {
SBUMPD(ipf_state_stats, iss_nomem);
@ -1683,7 +1687,7 @@ ipf_state_add(softc, fin, stsave, flags)
is->is_rule = fr;
/*
* Do not do the modulous here, it is done in ipf_state_insert().
* Do not do the modulus here, it is done in ipf_state_insert().
*/
if (fr != NULL) {
ipftq_t *tq;
@ -1711,7 +1715,7 @@ ipf_state_add(softc, fin, stsave, flags)
/*
* It may seem strange to set is_ref to 2, but if stsave is not NULL
* then a copy of the pointer is being stored somewhere else and in
* the end, it will expect to be able to do osmething with it.
* the end, it will expect to be able to do something with it.
*/
is->is_me = stsave;
if (stsave != NULL) {
@ -3642,7 +3646,8 @@ ipf_state_del(softc, is, why)
is->is_me = NULL;
is->is_ref--;
}
if (is->is_ref > 1) {
is->is_ref--;
if (is->is_ref > 0) {
int refs;
is->is_ref--;
@ -3652,7 +3657,6 @@ ipf_state_del(softc, is, why)
softs->ipf_state_stats.iss_orphan++;
return refs;
}
MUTEX_EXIT(&is->is_lock);
fr = is->is_rule;
is->is_rule = NULL;
@ -3664,6 +3668,7 @@ ipf_state_del(softc, is, why)
}
is->is_ref = 0;
MUTEX_EXIT(&is->is_lock);
if (is->is_tqehead[0] != NULL) {
if (ipf_deletetimeoutqueue(is->is_tqehead[0]) == 0)

View File

@ -405,6 +405,19 @@ handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len)
}
tp = intotcpcb(inp);
/*
* For RX_DDP_COMPLETE, len will be zero and rcv_nxt is the
* sequence number of the next byte to receive. The length of
* the data received for this message must be computed by
* comparing the new and old values of rcv_nxt.
*
* For RX_DATA_DDP, len might be non-zero, but it is only the
* length of the most recent DMA. It does not include the
* total length of the data received since the previous update
* for this DDP buffer. rcv_nxt is the sequence number of the
* first received byte from the most recent DMA.
*/
len += be32toh(rcv_nxt) - tp->rcv_nxt;
tp->rcv_nxt += len;
tp->t_rcvtime = ticks;

View File

@ -1342,14 +1342,10 @@ int radeon_suspend_kms(struct drm_device *dev)
radeon_agp_suspend(rdev);
pci_save_state(device_get_parent(dev->dev));
#ifdef FREEBSD_WIP
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
pci_disable_device(dev->pdev);
#endif /* FREEBSD_WIP */
pci_set_powerstate(dev->dev, PCI_POWERSTATE_D3);
#ifdef FREEBSD_WIP
}
console_lock();
#endif /* FREEBSD_WIP */
@ -1380,10 +1376,6 @@ int radeon_resume_kms(struct drm_device *dev)
#ifdef FREEBSD_WIP
console_lock();
#endif /* FREEBSD_WIP */
pci_set_powerstate(device_get_parent(dev->dev), PCI_POWERSTATE_D0);
pci_restore_state(device_get_parent(dev->dev));
#ifdef FREEBSD_WIP
if (pci_enable_device(dev->pdev)) {
console_unlock();
return -1;

View File

@ -789,8 +789,7 @@ int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
{
int pending;
taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, &pending);
if (pending)
if (taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, &pending))
taskqueue_drain_timeout(taskqueue_thread, &bdev->wq);
return (pending);
}

View File

@ -163,6 +163,7 @@ isci_attach(device_t device)
g_isci = isci;
isci->device = device;
pci_enable_busmaster(device);
isci_allocate_pci_memory(isci);
@ -272,6 +273,7 @@ isci_detach(device_t device)
pci_release_msi(device);
}
pci_disable_busmaster(device);
return (0);
}

View File

@ -136,8 +136,8 @@ isci_interrupt_setup(struct isci_softc *isci)
pci_msix_count(isci->device) >= max_msix_messages) {
isci->num_interrupts = max_msix_messages;
pci_alloc_msix(isci->device, &isci->num_interrupts);
if (isci->num_interrupts == max_msix_messages)
if (pci_alloc_msix(isci->device, &isci->num_interrupts) == 0 &&
isci->num_interrupts == max_msix_messages)
use_msix = TRUE;
}

View File

@ -2841,10 +2841,12 @@ void
netmap_detach(struct ifnet *ifp)
{
struct netmap_adapter *na = NA(ifp);
int skip;
if (!na)
return;
skip = 0;
NMG_LOCK();
netmap_disable_all_rings(ifp);
na->ifp = NULL;
@ -2856,10 +2858,11 @@ netmap_detach(struct ifnet *ifp)
* the driver is gone.
*/
if (na->na_flags & NAF_NATIVE) {
netmap_adapter_put(na);
skip = netmap_adapter_put(na);
}
/* give them a chance to notice */
netmap_enable_all_rings(ifp);
if (skip == 0)
netmap_enable_all_rings(ifp);
NMG_UNLOCK();
}

View File

@ -126,7 +126,7 @@ rcc_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps)
struct rcc_gpio_softc *sc;
sc = device_get_softc(dev);
if (pin > sc->sc_gpio_npins)
if (pin >= sc->sc_gpio_npins)
return (EINVAL);
*caps = rcc_pins[pin].caps;
@ -140,7 +140,7 @@ rcc_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags)
struct rcc_gpio_softc *sc;
sc = device_get_softc(dev);
if (pin > sc->sc_gpio_npins)
if (pin >= sc->sc_gpio_npins)
return (EINVAL);
/* Flags cannot be changed. */
@ -155,7 +155,7 @@ rcc_gpio_pin_getname(device_t dev, uint32_t pin, char *name)
struct rcc_gpio_softc *sc;
sc = device_get_softc(dev);
if (pin > sc->sc_gpio_npins)
if (pin >= sc->sc_gpio_npins)
return (EINVAL);
memcpy(name, rcc_pins[pin].name, GPIOMAXNAME);
@ -169,7 +169,7 @@ rcc_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
struct rcc_gpio_softc *sc;
sc = device_get_softc(dev);
if (pin > sc->sc_gpio_npins)
if (pin >= sc->sc_gpio_npins)
return (EINVAL);
/* Flags cannot be changed - risk of short-circuit!!! */
@ -183,7 +183,10 @@ rcc_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value)
struct rcc_gpio_softc *sc;
sc = device_get_softc(dev);
if (pin > sc->sc_gpio_npins)
if (pin >= sc->sc_gpio_npins)
return (EINVAL);
if ((rcc_pins[pin].caps & GPIO_PIN_OUTPUT) == 0)
return (EINVAL);
RCC_GPIO_LOCK(sc);
@ -204,7 +207,7 @@ rcc_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val)
uint32_t value;
sc = device_get_softc(dev);
if (pin > sc->sc_gpio_npins)
if (pin >= sc->sc_gpio_npins)
return (EINVAL);
RCC_GPIO_LOCK(sc);
@ -224,7 +227,10 @@ rcc_gpio_pin_toggle(device_t dev, uint32_t pin)
struct rcc_gpio_softc *sc;
sc = device_get_softc(dev);
if (pin > sc->sc_gpio_npins)
if (pin >= sc->sc_gpio_npins)
return (EINVAL);
if ((rcc_pins[pin].caps & GPIO_PIN_OUTPUT) == 0)
return (EINVAL);
RCC_GPIO_LOCK(sc);

View File

@ -134,6 +134,7 @@ uart_cpu_getdev(int devtype, struct uart_devinfo *di)
phandle_t node, chosen;
pcell_t shift, br, rclk;
u_long start, size, pbase, psize;
char *cp;
int err;
uart_bus_space_mem = fdtbus_bs_tag;
@ -148,18 +149,25 @@ uart_cpu_getdev(int devtype, struct uart_devinfo *di)
if (devtype != UART_DEV_CONSOLE)
return (ENXIO);
/*
* Retrieve /chosen/std{in,out}.
*/
node = -1;
if ((chosen = OF_finddevice("/chosen")) != -1) {
for (name = propnames; *name != NULL; name++) {
if (phandle_chosen_propdev(chosen, *name, &node) == 0)
break;
/* Has the user forced a specific device node? */
cp = kern_getenv("hw.fdt.console");
if (cp == NULL) {
/*
* Retrieve /chosen/std{in,out}.
*/
node = -1;
if ((chosen = OF_finddevice("/chosen")) != -1) {
for (name = propnames; *name != NULL; name++) {
if (phandle_chosen_propdev(chosen, *name,
&node) == 0)
break;
}
}
if (chosen == -1 || *name == NULL)
node = OF_finddevice("serial0"); /* Last ditch */
} else {
node = OF_finddevice(cp);
}
if (chosen == -1 || *name == NULL)
node = OF_finddevice("serial0"); /* Last ditch */
if (node == -1) /* Can't find anything */
return (ENXIO);

View File

@ -316,6 +316,7 @@ static const STRUCT_USB_HOST_ID u3g_devs[] = {
U3G_DEV(HUAWEI, E220, U3GINIT_HUAWEI),
U3G_DEV(HUAWEI, E220BIS, U3GINIT_HUAWEI),
U3G_DEV(HUAWEI, E392, U3GINIT_HUAWEISCSI),
U3G_DEV(HUAWEI, ME909U, U3GINIT_HUAWEISCSI2),
U3G_DEV(HUAWEI, MOBILE, U3GINIT_HUAWEI),
U3G_DEV(HUAWEI, E1752, U3GINIT_HUAWEISCSI),
U3G_DEV(HUAWEI, E1820, U3GINIT_HUAWEISCSI),
@ -494,6 +495,7 @@ static const STRUCT_USB_HOST_ID u3g_devs[] = {
U3G_DEV(SIERRA, AC595U, 0),
U3G_DEV(SIERRA, AC313U, 0),
U3G_DEV(SIERRA, AC597E, 0),
U3G_DEV(SIERRA, AC875, 0),
U3G_DEV(SIERRA, AC875E, 0),
U3G_DEV(SIERRA, AC875U, 0),
U3G_DEV(SIERRA, AC875U_2, 0),
@ -508,7 +510,6 @@ static const STRUCT_USB_HOST_ID u3g_devs[] = {
U3G_DEV(SIERRA, AC885U, 0),
U3G_DEV(SIERRA, AIRCARD580, 0),
U3G_DEV(SIERRA, AIRCARD595, 0),
U3G_DEV(SIERRA, AIRCARD875, 0),
U3G_DEV(SIERRA, C22, 0),
U3G_DEV(SIERRA, C597, 0),
U3G_DEV(SIERRA, C888, 0),

View File

@ -2390,6 +2390,7 @@ product HUAWEI K3765_INIT 0x1520 K3765 Initial
product HUAWEI K4505_INIT 0x1521 K4505 Initial
product HUAWEI K3772_INIT 0x1526 K3772 Initial
product HUAWEI E3272_INIT 0x155b LTE modem initial
product HUAWEI ME909U 0x1573 LTE modem
product HUAWEI R215_INIT 0x1582 LTE modem initial
product HUAWEI R215 0x1588 LTE modem
product HUAWEI ETS2055 0x1803 CDMA modem
@ -4042,8 +4043,7 @@ product SIERRA C22 0x6891 C22
product SIERRA E6892 0x6892 E6892
product SIERRA E6893 0x6893 E6893
product SIERRA MC8700 0x68A3 MC8700
product SIERRA MC7354 0x6820 MC7354
product SIERRA AIRCARD875 0x6820 Aircard 875 HSDPA
product SIERRA MC7354 0x68C0 MC7354
product SIERRA AC313U 0x68aa Sierra Wireless AirCard 313U
product SIERRA TRUINSTALL 0x0fff Aircard Tru Installer

View File

@ -88,8 +88,7 @@ SYSCTL_INT(_hw_usb_run, OID_AUTO, debug, CTLFLAG_RWTUN, &run_debug, 0,
"run debug level");
#endif
#define IEEE80211_HAS_ADDR4(wh) \
(((wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
#define IEEE80211_HAS_ADDR4(wh) IEEE80211_IS_DSTODS(wh)
/*
* Because of LOR in run_key_delete(), use atomic instead.
@ -382,8 +381,6 @@ static int run_media_change(struct ifnet *);
static int run_newstate(struct ieee80211vap *, enum ieee80211_state, int);
static int run_wme_update(struct ieee80211com *);
static void run_wme_update_cb(void *);
static void run_key_update_begin(struct ieee80211vap *);
static void run_key_update_end(struct ieee80211vap *);
static void run_key_set_cb(void *);
static int run_key_set(struct ieee80211vap *, struct ieee80211_key *,
const uint8_t mac[IEEE80211_ADDR_LEN]);
@ -434,6 +431,8 @@ static void run_updateprot_cb(void *);
static void run_usb_timeout_cb(void *);
static void run_reset_livelock(struct run_softc *);
static void run_enable_tsf_sync(struct run_softc *);
static void run_enable_tsf(struct run_softc *);
static void run_get_tsf(struct run_softc *, uint64_t *);
static void run_enable_mrr(struct run_softc *);
static void run_set_txpreamble(struct run_softc *);
static void run_set_basicrates(struct run_softc *);
@ -926,8 +925,6 @@ run_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
return (NULL);
}
vap->iv_key_update_begin = run_key_update_begin;
vap->iv_key_update_end = run_key_update_end;
vap->iv_update_beacon = run_update_beacon;
vap->iv_max_aid = RT2870_WCID_MAX;
/*
@ -2002,7 +1999,7 @@ run_media_change(struct ifnet *ifp)
if (rt2860_rates[ridx].rate == rate)
break;
ni = ieee80211_ref_node(vap->iv_bss);
rn = (struct run_node *)ni;
rn = RUN_NODE(ni);
rn->fix_ridx = ridx;
DPRINTF("rate=%d, fix_ridx=%d\n", rate, rn->fix_ridx);
ieee80211_free_node(ni);
@ -2130,7 +2127,8 @@ run_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE)
ratectl |= bid;
}
} else
run_enable_tsf(sc);
/* turn link LED on */
run_set_leds(sc, RT2860_LED_RADIO |
@ -2227,26 +2225,10 @@ run_wme_update(struct ieee80211com *ic)
run_wme_update_cb(ic);
RUN_UNLOCK(sc);
/* return whatever, upper layer desn't care anyway */
/* return whatever, upper layer doesn't care anyway */
return (0);
}
static void
run_key_update_begin(struct ieee80211vap *vap)
{
/*
* To avoid out-of-order events, both run_key_set() and
* _delete() are deferred and handled by run_cmdq_cb().
* So, there is nothing we need to do here.
*/
}
static void
run_key_update_end(struct ieee80211vap *vap)
{
/* null */
}
static void
run_key_set_cb(void *arg)
{
@ -2256,6 +2238,7 @@ run_key_set_cb(void *arg)
struct ieee80211com *ic = vap->iv_ic;
struct run_softc *sc = ic->ic_softc;
struct ieee80211_node *ni;
u_int cipher = k->wk_cipher->ic_cipher;
uint32_t attr;
uint16_t base, associd;
uint8_t mode, wcid, iv[8];
@ -2269,7 +2252,7 @@ run_key_set_cb(void *arg)
associd = (ni != NULL) ? ni->ni_associd : 0;
/* map net80211 cipher to RT2860 security mode */
switch (k->wk_cipher->ic_cipher) {
switch (cipher) {
case IEEE80211_CIPHER_WEP:
if(k->wk_keylen < 8)
mode = RT2860_MODE_WEP40;
@ -2302,7 +2285,7 @@ run_key_set_cb(void *arg)
base = RT2860_PKEY(wcid);
}
if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP) {
if (cipher == IEEE80211_CIPHER_TKIP) {
if(run_write_region_1(sc, base, k->wk_key, 16))
return;
if(run_write_region_1(sc, base + 16, &k->wk_key[16], 8)) /* wk_txmic */
@ -2318,11 +2301,11 @@ run_key_set_cb(void *arg)
if (!(k->wk_flags & IEEE80211_KEY_GROUP) ||
(k->wk_flags & (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV))) {
/* set initial packet number in IV+EIV */
if (k->wk_cipher == IEEE80211_CIPHER_WEP) {
if (cipher == IEEE80211_CIPHER_WEP) {
memset(iv, 0, sizeof iv);
iv[3] = vap->iv_def_txkey << 6;
} else {
if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP) {
if (cipher == IEEE80211_CIPHER_TKIP) {
iv[0] = k->wk_keytsc >> 8;
iv[1] = (iv[0] | 0x20) & 0x7f;
iv[2] = k->wk_keytsc;
@ -2576,7 +2559,7 @@ run_iter_func(void *arg, struct ieee80211_node *ni)
{
struct run_softc *sc = arg;
struct ieee80211vap *vap = ni->ni_vap;
struct run_node *rn = (void *)ni;
struct run_node *rn = RUN_NODE(ni);
union run_stats sta[2];
uint16_t (*wstat)[3];
int txcnt, success, retrycnt, error;
@ -2650,7 +2633,7 @@ run_newassoc_cb(void *arg)
static void
run_newassoc(struct ieee80211_node *ni, int isnew)
{
struct run_node *rn = (void *)ni;
struct run_node *rn = RUN_NODE(ni);
struct ieee80211_rateset *rs = &ni->ni_rates;
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = vap->iv_ic;
@ -2831,6 +2814,7 @@ run_rx_frame(struct run_softc *sc, struct mbuf *m, uint32_t dmalen)
tap->wr_antenna = ant;
tap->wr_dbm_antsignal = run_rssi2dbm(sc, rssi, ant);
tap->wr_rate = 2; /* in case it can't be found below */
run_get_tsf(sc, &tap->wr_tsf);
phy = le16toh(rxwi->phy);
switch (phy & RT2860_PHY_MODE) {
case RT2860_PHY_CCK:
@ -3078,6 +3062,7 @@ tr_setup:
(struct rt2860_txwi *)(&data->desc + sizeof(struct rt2870_txd));
tap->wt_flags = 0;
tap->wt_rate = rt2860_rates[data->ridx].rate;
run_get_tsf(sc, &tap->wt_tsf);
tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq);
tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags);
tap->wt_hwqueue = index;
@ -3243,7 +3228,7 @@ run_tx(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
struct ieee80211_frame *wh;
struct ieee80211_channel *chan;
const struct ieee80211_txparam *tp;
struct run_node *rn = (void *)ni;
struct run_node *rn = RUN_NODE(ni);
struct run_tx_data *data;
struct rt2870_txd *txd;
struct rt2860_txwi *txwi;
@ -3407,7 +3392,7 @@ static int
run_tx_mgt(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
{
struct ieee80211com *ic = &sc->sc_ic;
struct run_node *rn = (void *)ni;
struct run_node *rn = RUN_NODE(ni);
struct run_tx_data *data;
struct ieee80211_frame *wh;
struct rt2870_txd *txd;
@ -5066,6 +5051,25 @@ run_enable_tsf_sync(struct run_softc *sc)
run_write(sc, RT2860_BCN_TIME_CFG, tmp);
}
static void
run_enable_tsf(struct run_softc *sc)
{
uint32_t tmp;
if (run_read(sc, RT2860_BCN_TIME_CFG, &tmp) == 0) {
tmp &= ~(RT2860_BCN_TX_EN | RT2860_TBTT_TIMER_EN);
tmp |= RT2860_TSF_TIMER_EN;
run_write(sc, RT2860_BCN_TIME_CFG, tmp);
}
}
static void
run_get_tsf(struct run_softc *sc, uint64_t *buf)
{
run_read_region_1(sc, RT2860_TSF_TIMER_DW0, (uint8_t *)buf,
sizeof(*buf));
}
static void
run_enable_mrr(struct run_softc *sc)
{

View File

@ -45,6 +45,7 @@
struct run_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
uint64_t wr_tsf;
uint8_t wr_flags;
uint8_t wr_rate;
uint16_t wr_chan_freq;
@ -55,7 +56,8 @@ struct run_rx_radiotap_header {
} __packed __aligned(8);
#define RUN_RX_RADIOTAP_PRESENT \
(1 << IEEE80211_RADIOTAP_FLAGS | \
(1 << IEEE80211_RADIOTAP_TSFT | \
1 << IEEE80211_RADIOTAP_FLAGS | \
1 << IEEE80211_RADIOTAP_RATE | \
1 << IEEE80211_RADIOTAP_CHANNEL | \
1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL | \
@ -64,6 +66,7 @@ struct run_rx_radiotap_header {
struct run_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
uint64_t wt_tsf;
uint8_t wt_flags;
uint8_t wt_rate;
uint16_t wt_chan_freq;
@ -74,7 +77,8 @@ struct run_tx_radiotap_header {
#define IEEE80211_RADIOTAP_HWQUEUE 15
#define RUN_TX_RADIOTAP_PRESENT \
(1 << IEEE80211_RADIOTAP_FLAGS | \
(1 << IEEE80211_RADIOTAP_TSFT | \
1 << IEEE80211_RADIOTAP_FLAGS | \
1 << IEEE80211_RADIOTAP_RATE | \
1 << IEEE80211_RADIOTAP_CHANNEL | \
1 << IEEE80211_RADIOTAP_HWQUEUE)
@ -101,6 +105,7 @@ struct run_node {
uint8_t mgt_ridx;
uint8_t fix_ridx;
};
#define RUN_NODE(ni) ((struct run_node *)(ni))
struct run_cmdq {
void *arg0;

View File

@ -451,7 +451,7 @@
#define R92C_RQPN_LD 0x80000000
/* Bits for R92C_TDECTRL. */
#define R92C_TDECTRL_BLK_DESC_NUM_M 0x0000000f
#define R92C_TDECTRL_BLK_DESC_NUM_M 0x000000f0
#define R92C_TDECTRL_BLK_DESC_NUM_S 4
/* Bits for R92C_FWHW_TXQ_CTRL. */

View File

@ -1,87 +0,0 @@
/*-
* Copyright (c) 2005 Antoine Brodin
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/stack.h>
#include <machine/pcb.h>
#include <machine/stack.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
static void
stack_capture(struct thread *td, struct stack *st, register_t ebp)
{
struct i386_frame *frame;
vm_offset_t callpc;
stack_zero(st);
frame = (struct i386_frame *)ebp;
while (1) {
if (!INKERNEL(frame))
break;
callpc = frame->f_retaddr;
if (!INKERNEL(callpc))
break;
if (stack_put(st, callpc) == -1)
break;
if (frame->f_frame <= frame ||
(vm_offset_t)frame->f_frame >= td->td_kstack +
td->td_kstack_pages * PAGE_SIZE)
break;
frame = frame->f_frame;
}
}
void
stack_save_td(struct stack *st, struct thread *td)
{
register_t ebp;
if (TD_IS_SWAPPED(td))
panic("stack_save_td: swapped");
if (TD_IS_RUNNING(td))
panic("stack_save_td: running");
ebp = td->td_pcb->pcb_ebp;
stack_capture(td, st, ebp);
}
void
stack_save(struct stack *st)
{
register_t ebp;
__asm __volatile("movl %%ebp,%0" : "=r" (ebp));
stack_capture(curthread, st, ebp);
}

View File

@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$");
#include "opt_isa.h"
#include "opt_kdb.h"
#include "opt_npx.h"
#include "opt_stack.h"
#include "opt_trap.h"
#include <sys/param.h>
@ -94,6 +95,7 @@ PMC_SOFT_DEFINE( , , page_fault, write);
#ifdef SMP
#include <machine/smp.h>
#endif
#include <machine/stack.h>
#include <machine/tss.h>
#include <machine/vm86.h>
@ -219,19 +221,26 @@ trap(struct trapframe *frame)
goto out;
}
#ifdef HWPMC_HOOKS
/*
* CPU PMCs interrupt using an NMI so we check for that first.
* If the HWPMC module is active, 'pmc_hook' will point to
* the function to be called. A return value of '1' from the
* hook means that the NMI was handled by it and that we can
* return immediately.
*/
if (type == T_NMI && pmc_intr &&
(*pmc_intr)(PCPU_GET(cpuid), frame))
goto out;
if (type == T_NMI) {
#ifdef HWPMC_HOOKS
/*
* CPU PMCs interrupt using an NMI so we check for that first.
* If the HWPMC module is active, 'pmc_hook' will point to
* the function to be called. A non-zero return value from the
* hook means that the NMI was consumed by it and that we can
* return immediately.
*/
if (pmc_intr != NULL &&
(*pmc_intr)(PCPU_GET(cpuid), frame) != 0)
goto out;
#endif
#ifdef STACK
if (stack_nmi_handler(frame) != 0)
goto out;
#endif
}
if (type == T_MCHK) {
mca_intr();
goto out;
@ -782,7 +791,6 @@ trap_pfault(frame, usermode, eva)
vm_offset_t eva;
{
vm_offset_t va;
struct vmspace *vm;
vm_map_t map;
int rv = 0;
vm_prot_t ftype;
@ -852,14 +860,7 @@ trap_pfault(frame, usermode, eva)
map = kernel_map;
} else {
/*
* This is a fault on non-kernel virtual memory. If either
* p or p->p_vmspace is NULL, then the fault is fatal.
*/
if (p == NULL || (vm = p->p_vmspace) == NULL)
goto nogo;
map = &vm->vm_map;
map = &p->p_vmspace->vm_map;
/*
* When accessing a user-space address, kernel must be
@ -888,28 +889,8 @@ trap_pfault(frame, usermode, eva)
else
ftype = VM_PROT_READ;
if (map != kernel_map) {
/*
* Keep swapout from messing with us during this
* critical time.
*/
PROC_LOCK(p);
++p->p_lock;
PROC_UNLOCK(p);
/* Fault in the user page: */
rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
PROC_LOCK(p);
--p->p_lock;
PROC_UNLOCK(p);
} else {
/*
* Don't have to worry about process locking or stacks in the
* kernel.
*/
rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
}
/* Fault in the page. */
rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
if (rv == KERN_SUCCESS) {
#ifdef HWPMC_HOOKS
if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {

View File

@ -1,42 +1,6 @@
/*-
* Mach Operating System
* Copyright (c) 1991,1990 Carnegie Mellon University
* All Rights Reserved.
*
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $FreeBSD$
*/
#ifndef _MACHINE_STACK_H_
#define _MACHINE_STACK_H_
/*
* Stack trace.
* This file is in the public domain.
*/
/* $FreeBSD$ */
struct i386_frame {
struct i386_frame *f_frame;
int f_retaddr;
int f_arg0;
};
#endif /* !_MACHINE_STACK_H_ */
#include <x86/stack.h>

View File

@ -1902,11 +1902,6 @@ __elfN(note_procstat_proc)(void *arg, struct sbuf *sb, size_t *sizep)
CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE);
#endif
static int pack_fileinfo = 1;
SYSCTL_INT(_kern, OID_AUTO, coredump_pack_fileinfo, CTLFLAG_RWTUN,
&pack_fileinfo, 0,
"Enable file path packing in 'procstat -f' coredump notes");
static void
note_procstat_files(void *arg, struct sbuf *sb, size_t *sizep)
{
@ -1915,7 +1910,7 @@ note_procstat_files(void *arg, struct sbuf *sb, size_t *sizep)
ssize_t start_len, sect_len;
int structsize, filedesc_flags;
if (pack_fileinfo)
if (coredump_pack_fileinfo)
filedesc_flags = KERN_FILEDESC_PACK_KINFO;
else
filedesc_flags = 0;

View File

@ -911,7 +911,7 @@ kern_dup(struct thread *td, u_int mode, int flags, int old, int new)
#endif
filecaps_free(&newfde->fde_caps);
memcpy(newfde, oldfde, fde_change_size);
filecaps_copy(&oldfde->fde_caps, &newfde->fde_caps);
filecaps_copy(&oldfde->fde_caps, &newfde->fde_caps, true);
if ((flags & FDDUP_FLAG_CLOEXEC) != 0)
newfde->fde_flags = oldfde->fde_flags | UF_EXCLOSE;
else
@ -1433,21 +1433,31 @@ filecaps_init(struct filecaps *fcaps)
/*
* Copy filecaps structure allocating memory for ioctls array if needed.
*
* The last parameter indicates whether the fdtable is locked. If it is not and
* ioctls are encountered, copying fails and the caller must lock the table.
*
* Note that if the table was not locked, the caller has to check the relevant
* sequence counter to determine whether the operation was successful.
*/
void
filecaps_copy(const struct filecaps *src, struct filecaps *dst)
int
filecaps_copy(const struct filecaps *src, struct filecaps *dst, bool locked)
{
size_t size;
*dst = *src;
if (src->fc_ioctls != NULL) {
KASSERT(src->fc_nioctls > 0,
("fc_ioctls != NULL, but fc_nioctls=%hd", src->fc_nioctls));
if (src->fc_ioctls == NULL)
return (0);
if (!locked)
return (1);
size = sizeof(src->fc_ioctls[0]) * src->fc_nioctls;
dst->fc_ioctls = malloc(size, M_FILECAPS, M_WAITOK);
bcopy(src->fc_ioctls, dst->fc_ioctls, size);
}
KASSERT(src->fc_nioctls > 0,
("fc_ioctls != NULL, but fc_nioctls=%hd", src->fc_nioctls));
size = sizeof(src->fc_ioctls[0]) * src->fc_nioctls;
dst->fc_ioctls = malloc(size, M_FILECAPS, M_WAITOK);
bcopy(src->fc_ioctls, dst->fc_ioctls, size);
return (0);
}
/*
@ -1956,7 +1966,7 @@ fdcopy(struct filedesc *fdp)
}
nfde = &newfdp->fd_ofiles[i];
*nfde = *ofde;
filecaps_copy(&ofde->fde_caps, &nfde->fde_caps);
filecaps_copy(&ofde->fde_caps, &nfde->fde_caps, true);
fhold(nfde->fde_file);
fdused_init(newfdp, i);
newfdp->fd_lastfile = i;
@ -2012,7 +2022,7 @@ fdcopy_remapped(struct filedesc *fdp, const int *fds, size_t nfds,
}
nfde = &newfdp->fd_ofiles[i];
*nfde = *ofde;
filecaps_copy(&ofde->fde_caps, &nfde->fde_caps);
filecaps_copy(&ofde->fde_caps, &nfde->fde_caps, true);
fhold(nfde->fde_file);
fdused_init(newfdp, i);
newfdp->fd_lastfile = i;
@ -2711,11 +2721,9 @@ fgetvp_rights(struct thread *td, int fd, cap_rights_t *needrightsp,
return (EBADF);
#ifdef CAPABILITIES
if (needrightsp != NULL) {
error = cap_check(cap_rights(fdp, fd), needrightsp);
if (error != 0)
return (error);
}
error = cap_check(cap_rights(fdp, fd), needrightsp);
if (error != 0)
return (error);
#endif
if (fp->f_vnode == NULL)
@ -2723,7 +2731,7 @@ fgetvp_rights(struct thread *td, int fd, cap_rights_t *needrightsp,
*vpp = fp->f_vnode;
vref(*vpp);
filecaps_copy(&fdp->fd_ofiles[fd].fde_caps, havecaps);
filecaps_copy(&fdp->fd_ofiles[fd].fde_caps, havecaps, true);
return (0);
}
@ -2938,7 +2946,7 @@ dupfdopen(struct thread *td, struct filedesc *fdp, int dfd, int mode,
seq_write_begin(&newfde->fde_seq);
#endif
memcpy(newfde, oldfde, fde_change_size);
filecaps_copy(&oldfde->fde_caps, &newfde->fde_caps);
filecaps_copy(&oldfde->fde_caps, &newfde->fde_caps, true);
#ifdef CAPABILITIES
seq_write_end(&newfde->fde_seq);
#endif

View File

@ -100,6 +100,11 @@ SDT_PROBE_DEFINE1(proc, kernel, , exec__success, "char *");
MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
int coredump_pack_fileinfo = 1;
SYSCTL_INT(_kern, OID_AUTO, coredump_pack_fileinfo, CTLFLAG_RWTUN,
&coredump_pack_fileinfo, 0,
"Enable file path packing in 'procstat -f' coredump notes");
static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS);
static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS);
static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS);

View File

@ -2517,11 +2517,14 @@ repeat:
sizeof(kkstp->kkst_trace), SBUF_FIXEDLEN);
thread_lock(td);
kkstp->kkst_tid = td->td_tid;
if (TD_IS_SWAPPED(td))
if (TD_IS_SWAPPED(td)) {
kkstp->kkst_state = KKST_STATE_SWAPPED;
else if (TD_IS_RUNNING(td))
kkstp->kkst_state = KKST_STATE_RUNNING;
else {
} else if (TD_IS_RUNNING(td)) {
if (stack_save_td_running(st, td) == 0)
kkstp->kkst_state = KKST_STATE_STACKOK;
else
kkstp->kkst_state = KKST_STATE_RUNNING;
} else {
kkstp->kkst_state = KKST_STATE_STACKOK;
stack_save_td(st, td);
}

View File

@ -265,7 +265,8 @@ init_param2(long physpages)
if (maxfiles > (physpages / 4))
maxfiles = physpages / 4;
maxfilesperproc = (maxfiles / 10) * 9;
TUNABLE_INT_FETCH("kern.maxfilesperproc", &maxfilesperproc);
/*
* Cannot be changed after boot.
*/

View File

@ -1972,7 +1972,7 @@ unp_internalize(struct mbuf **controlp, struct thread *td)
fdep[i] = fdev;
fdep[i]->fde_file = fde->fde_file;
filecaps_copy(&fde->fde_caps,
&fdep[i]->fde_caps);
&fdep[i]->fde_caps, true);
unp_internalize_fp(fdep[i]->fde_file);
}
FILEDESC_SUNLOCK(fdesc);

View File

@ -3768,74 +3768,6 @@ bufwait(struct buf *bp)
}
}
/*
* Call back function from struct bio back up to struct buf.
*/
static void
bufdonebio(struct bio *bip)
{
struct buf *bp;
bp = bip->bio_caller2;
bp->b_resid = bip->bio_resid;
bp->b_ioflags = bip->bio_flags;
bp->b_error = bip->bio_error;
if (bp->b_error)
bp->b_ioflags |= BIO_ERROR;
bufdone(bp);
g_destroy_bio(bip);
}
void
dev_strategy(struct cdev *dev, struct buf *bp)
{
struct cdevsw *csw;
int ref;
KASSERT(dev->si_refcount > 0,
("dev_strategy on un-referenced struct cdev *(%s) %p",
devtoname(dev), dev));
csw = dev_refthread(dev, &ref);
dev_strategy_csw(dev, csw, bp);
dev_relthread(dev, ref);
}
void
dev_strategy_csw(struct cdev *dev, struct cdevsw *csw, struct buf *bp)
{
struct bio *bip;
KASSERT(bp->b_iocmd == BIO_READ || bp->b_iocmd == BIO_WRITE,
("b_iocmd botch"));
KASSERT(((dev->si_flags & SI_ETERNAL) != 0 && csw != NULL) ||
dev->si_threadcount > 0,
("dev_strategy_csw threadcount cdev *(%s) %p", devtoname(dev),
dev));
if (csw == NULL) {
bp->b_error = ENXIO;
bp->b_ioflags = BIO_ERROR;
bufdone(bp);
return;
}
for (;;) {
bip = g_new_bio();
if (bip != NULL)
break;
/* Try again later */
tsleep(&bp, PRIBIO, "dev_strat", hz/10);
}
bip->bio_cmd = bp->b_iocmd;
bip->bio_offset = bp->b_iooffset;
bip->bio_length = bp->b_bcount;
bip->bio_bcount = bp->b_bcount; /* XXX: remove */
bdata2bio(bp, bip);
bip->bio_done = bufdonebio;
bip->bio_caller2 = bp;
bip->bio_dev = dev;
(*csw->d_strategy)(bip);
}
/*
* bufdone:
*

View File

@ -195,7 +195,10 @@ vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, u_int vn_open_flags,
restart:
fmode = *flagp;
if (fmode & O_CREAT) {
if ((fmode & (O_CREAT | O_EXCL | O_DIRECTORY)) == (O_CREAT |
O_EXCL | O_DIRECTORY))
return (EINVAL);
else if ((fmode & (O_CREAT | O_DIRECTORY)) == O_CREAT) {
ndp->ni_cnd.cn_nameiop = CREATE;
/*
* Set NOCACHE to avoid flushing the cache when

View File

@ -142,6 +142,13 @@ stack_save_td(struct stack *st, struct thread *td)
stack_capture(st, pc, sp);
}
int
stack_save_td_running(struct stack *st, struct thread *td)
{
return (EOPNOTSUPP);
}
void
stack_save(struct stack *st)
{

View File

@ -714,19 +714,7 @@ dofault:
goto nogo;
}
/*
* Keep swapout from messing with us during this
* critical time.
*/
PROC_LOCK(p);
++p->p_lock;
PROC_UNLOCK(p);
rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
PROC_LOCK(p);
--p->p_lock;
PROC_UNLOCK(p);
/*
* XXXDTRACE: add dtrace_doubletrap_func here?
*/

View File

@ -13,6 +13,7 @@ SRCS+= ctl_frontend.c
SRCS+= ctl_frontend_cam_sim.c
SRCS+= ctl_frontend_ioctl.c
SRCS+= ctl_frontend_iscsi.c
SRCS+= ctl_ha.c
SRCS+= ctl_scsi_all.c
SRCS+= ctl_tpc.c
SRCS+= ctl_tpc_local.c

View File

@ -4,7 +4,7 @@ SYSDIR?=${.CURDIR}/../..
KMOD= zfs
SRCS= bus_if.h device_if.h vnode_if.h opt_kstack_pages.h opt_random.h
SRCS= bus_if.h device_if.h vnode_if.h opt_kstack_pages.h
SUNW= ${SYSDIR}/cddl/contrib/opensolaris

View File

@ -421,13 +421,8 @@ gif_transmit(struct ifnet *ifp, struct mbuf *m)
}
eth = mtod(m, struct etherip_header *);
eth->eip_resvh = 0;
if ((sc->gif_options & GIF_SEND_REVETHIP) != 0) {
eth->eip_ver = 0;
eth->eip_resvl = ETHERIP_VERSION;
} else {
eth->eip_ver = ETHERIP_VERSION;
eth->eip_resvl = 0;
}
eth->eip_ver = ETHERIP_VERSION;
eth->eip_resvl = 0;
break;
default:
error = EAFNOSUPPORT;
@ -635,19 +630,10 @@ gif_input(struct mbuf *m, struct ifnet *ifp, int proto, uint8_t ecn)
if (m == NULL)
goto drop;
eip = mtod(m, struct etherip_header *);
/*
* GIF_ACCEPT_REVETHIP (enabled by default) intentionally
* accepts an EtherIP packet with revered version field in
* the header. This is a knob for backward compatibility
* with FreeBSD 7.2R or prior.
*/
if (eip->eip_ver != ETHERIP_VERSION) {
if ((gif_options & GIF_ACCEPT_REVETHIP) == 0 ||
eip->eip_resvl != ETHERIP_VERSION) {
/* discard unknown versions */
m_freem(m);
goto drop;
}
/* discard unknown versions */
m_freem(m);
goto drop;
}
m_adj(m, sizeof(struct etherip_header));
@ -768,50 +754,32 @@ gif_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
goto bad;
/* validate sa_len */
/* check sa_family looks sane for the cmd */
switch (src->sa_family) {
#ifdef INET
case AF_INET:
if (src->sa_len != sizeof(struct sockaddr_in))
goto bad;
if (cmd != SIOCSIFPHYADDR) {
error = EAFNOSUPPORT;
goto bad;
}
if (satosin(src)->sin_addr.s_addr == INADDR_ANY ||
satosin(dst)->sin_addr.s_addr == INADDR_ANY) {
error = EADDRNOTAVAIL;
goto bad;
}
break;
#endif
#ifdef INET6
case AF_INET6:
if (src->sa_len != sizeof(struct sockaddr_in6))
goto bad;
break;
#endif
default:
error = EAFNOSUPPORT;
goto bad;
}
/* check sa_family looks sane for the cmd */
error = EAFNOSUPPORT;
switch (cmd) {
#ifdef INET
case SIOCSIFPHYADDR:
if (src->sa_family == AF_INET)
break;
goto bad;
#endif
#ifdef INET6
case SIOCSIFPHYADDR_IN6:
if (src->sa_family == AF_INET6)
break;
goto bad;
#endif
}
error = EADDRNOTAVAIL;
switch (src->sa_family) {
#ifdef INET
case AF_INET:
if (satosin(src)->sin_addr.s_addr == INADDR_ANY ||
satosin(dst)->sin_addr.s_addr == INADDR_ANY)
if (cmd != SIOCSIFPHYADDR_IN6) {
error = EAFNOSUPPORT;
goto bad;
break;
#endif
#ifdef INET6
case AF_INET6:
}
error = EADDRNOTAVAIL;
if (IN6_IS_ADDR_UNSPECIFIED(&satosin6(src)->sin6_addr)
||
IN6_IS_ADDR_UNSPECIFIED(&satosin6(dst)->sin6_addr))
@ -827,8 +795,12 @@ gif_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
error = sa6_embedscope(satosin6(dst), 0);
if (error != 0)
goto bad;
break;
#endif
};
default:
error = EAFNOSUPPORT;
goto bad;
}
error = gif_set_tunnel(ifp, src, dst);
break;
case SIOCDIFPHYADDR:

View File

@ -126,10 +126,7 @@ int in6_gif_attach(struct gif_softc *);
#define GIFGOPTS _IOWR('i', 150, struct ifreq)
#define GIFSOPTS _IOW('i', 151, struct ifreq)
#define GIF_ACCEPT_REVETHIP 0x0001
#define GIF_IGNORE_SOURCE 0x0002
#define GIF_SEND_REVETHIP 0x0010
#define GIF_OPTMASK (GIF_ACCEPT_REVETHIP|GIF_SEND_REVETHIP| \
GIF_IGNORE_SOURCE)
#define GIF_OPTMASK (GIF_IGNORE_SOURCE)
#endif /* _NET_IF_GIF_H_ */

View File

@ -168,6 +168,13 @@ static const struct ng_cmdlist ng_pppoe_cmds[] = {
&ng_parse_enaddr_type,
NULL
},
{
NGM_PPPOE_COOKIE,
NGM_PPPOE_SETMAXP,
"setmaxp",
&ng_parse_uint16_type,
NULL
},
{ 0 }
};
@ -262,6 +269,7 @@ struct PPPoE {
struct ether_header eh;
LIST_HEAD(, sess_con) listeners;
struct sess_hash_entry sesshash[SESSHASHSIZE];
struct maxptag max_payload; /* PPP-Max-Payload (RFC4638) */
};
typedef struct PPPoE *priv_p;
@ -1004,6 +1012,13 @@ ng_pppoe_rcvmsg(node_p node, item_p item, hook_p lasthook)
bcopy(msg->data, &privp->eh.ether_shost,
ETHER_ADDR_LEN);
break;
case NGM_PPPOE_SETMAXP:
if (msg->header.arglen != sizeof(uint16_t))
LEAVE(EINVAL);
privp->max_payload.hdr.tag_type = PTT_MAX_PAYL;
privp->max_payload.hdr.tag_len = htons(sizeof(uint16_t));
privp->max_payload.data = htons(*((uint16_t *)msg->data));
break;
default:
LEAVE(EINVAL);
}
@ -1071,6 +1086,8 @@ pppoe_start(sessp sp)
init_tags(sp);
insert_tag(sp, &uniqtag.hdr);
insert_tag(sp, &neg->service.hdr);
if (privp->max_payload.data != 0)
insert_tag(sp, &privp->max_payload.hdr);
make_packet(sp);
/*
* Send packet and prepare to retransmit it after timeout.
@ -1124,6 +1141,28 @@ send_sessionid(sessp sp)
return (error);
}
static int
send_maxp(sessp sp, const struct pppoe_tag *tag)
{
int error;
struct ng_mesg *msg;
struct ngpppoe_maxp *maxp;
CTR2(KTR_NET, "%20s: called %d", __func__, sp->Session_ID);
NG_MKMESSAGE(msg, NGM_PPPOE_COOKIE, NGM_PPPOE_SETMAXP,
sizeof(struct ngpppoe_maxp), M_NOWAIT);
if (msg == NULL)
return (ENOMEM);
maxp = (struct ngpppoe_maxp *)msg->data;
strncpy(maxp->hook, NG_HOOK_NAME(sp->hook), NG_HOOKSIZ);
maxp->data = ntohs(((const struct maxptag *)tag)->data);
NG_SEND_MSG_ID(error, NG_HOOK_NODE(sp->hook), msg, sp->creator, 0);
return (error);
}
/*
* Receive data from session hook and do something with it.
*/
@ -1464,6 +1503,9 @@ ng_pppoe_rcvdata_ether(hook_p hook, item_p item)
insert_tag(sp, tag); /* return it */
send_acname(sp, tag);
}
if ((tag = get_tag(ph, PTT_MAX_PAYL)) &&
(privp->max_payload.data != 0))
insert_tag(sp, tag); /* return it */
insert_tag(sp, &neg->service.hdr); /* Service */
scan_tags(sp, ph);
make_packet(sp);
@ -1602,6 +1644,9 @@ ng_pppoe_rcvdata_ether(hook_p hook, item_p item)
m_freem(neg->m);
free(sp->neg, M_NETGRAPH_PPPOE);
sp->neg = NULL;
if ((tag = get_tag(ph, PTT_MAX_PAYL)) &&
(privp->max_payload.data != 0))
send_maxp(sp, tag);
pppoe_send_event(sp, NGM_PPPOE_SUCCESS);
break;
case PADT_CODE:

Some files were not shown because too many files have changed in this diff Show More