Merge ^/head r357350 through r357367.
This commit is contained in:
commit
a78eada5df
@ -53,7 +53,8 @@ static void
|
||||
usage(void)
|
||||
{
|
||||
|
||||
errx(EX_USAGE, "usage: pwait [-t timeout] [-ov] pid ...");
|
||||
fprintf(stderr, "usage: pwait [-t timeout] [-ov] pid ...\n");
|
||||
exit(EX_USAGE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -35,7 +35,7 @@
|
||||
.\"
|
||||
.\" $FreeBSD$
|
||||
.\"
|
||||
.Dd October 16, 2017
|
||||
.Dd February 01, 2020
|
||||
.Dt IF_BRIDGE 4
|
||||
.Os
|
||||
.Sh NAME
|
||||
@ -480,13 +480,6 @@ ifconfig gif0 tunnel 1.2.3.4 5.6.7.8 up
|
||||
ifconfig bridge0 create
|
||||
ifconfig bridge0 addm fxp0 addm gif0 up
|
||||
.Ed
|
||||
.Pp
|
||||
Note that
|
||||
.Fx
|
||||
6.1, 6.2, 6.3, 7.0, 7.1, and 7.2 have a bug in the EtherIP protocol.
|
||||
For more details and workaround, see the
|
||||
.Xr gif 4
|
||||
manual page.
|
||||
.Sh SEE ALSO
|
||||
.Xr gif 4 ,
|
||||
.Xr ipf 4 ,
|
||||
|
@ -106,7 +106,7 @@ struct zfsvfs {
|
||||
#define ZFS_WUNLOCK_TEARDOWN_INACTIVE(zfsvfs) \
|
||||
rms_wunlock(&(zfsvfs)->z_teardown_inactive_lock)
|
||||
|
||||
#define ZFS_WLOCK_TEARDOWN_INACTIVE_WLOCKED(zfsvfs) \
|
||||
#define ZFS_TEARDOWN_INACTIVE_WLOCKED(zfsvfs) \
|
||||
rms_wowned(&(zfsvfs)->z_teardown_inactive_lock)
|
||||
|
||||
/*
|
||||
|
@ -2437,7 +2437,7 @@ zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
|
||||
znode_t *zp;
|
||||
|
||||
ASSERT(RRM_WRITE_HELD(&zfsvfs->z_teardown_lock));
|
||||
ASSERT(ZFS_WLOCK_TEARDOWN_INACTIVE_WLOCKED(zp->z_zfsvfs));
|
||||
ASSERT(ZFS_TEARDOWN_INACTIVE_WLOCKED(zfsvfs));
|
||||
|
||||
/*
|
||||
* We already own this, so just update the objset_t, as the one we
|
||||
|
@ -606,7 +606,7 @@ zfs_znode_dmu_fini(znode_t *zp)
|
||||
{
|
||||
ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zp->z_zfsvfs, zp->z_id)) ||
|
||||
zp->z_unlinked ||
|
||||
ZFS_WLOCK_TEARDOWN_INACTIVE_WLOCKED(zp->z_zfsvfs));
|
||||
ZFS_TEARDOWN_INACTIVE_WLOCKED(zp->z_zfsvfs));
|
||||
|
||||
sa_handle_destroy(zp->z_sa_hdl);
|
||||
zp->z_sa_hdl = NULL;
|
||||
|
@ -323,8 +323,7 @@ dev/syscons/scvesactl.c optional sc vga vesa
|
||||
dev/syscons/scvgarndr.c optional sc vga
|
||||
dev/tpm/tpm.c optional tpm
|
||||
dev/tpm/tpm20.c optional tpm
|
||||
dev/tpm/tpm_crb.c optional tpm acpi \
|
||||
compile-with "${NORMAL_C} ${NO_WINT_IN_BOOL_CONTEXT}"
|
||||
dev/tpm/tpm_crb.c optional tpm acpi
|
||||
dev/tpm/tpm_tis.c optional tpm acpi
|
||||
dev/tpm/tpm_acpi.c optional tpm acpi
|
||||
dev/tpm/tpm_isa.c optional tpm isa
|
||||
|
@ -37,9 +37,6 @@ CWARNEXTRA+= -Wno-error-shift-negative-value
|
||||
.if ${COMPILER_VERSION} >= 40000
|
||||
CWARNEXTRA+= -Wno-address-of-packed-member
|
||||
.endif
|
||||
.if ${COMPILER_VERSION} >= 100000
|
||||
NO_WINT_IN_BOOL_CONTEXT= -Wno-int-in-bool-context
|
||||
.endif
|
||||
.endif
|
||||
|
||||
.if ${COMPILER_TYPE} == "gcc"
|
||||
|
@ -389,7 +389,7 @@ kernel-cleandepend: .PHONY
|
||||
|
||||
kernel-tags:
|
||||
@ls .depend.* > /dev/null 2>&1 || \
|
||||
{ echo "you must make depend first"; exit 1; }
|
||||
{ echo "you must make all first"; exit 1; }
|
||||
sh $S/conf/systags.sh
|
||||
|
||||
kernel-install: .PHONY
|
||||
|
@ -298,7 +298,7 @@ tpmcrb_cancel_cmd(struct tpm_sc *sc)
|
||||
return (false);
|
||||
}
|
||||
|
||||
WR4(sc, TPM_CRB_CTRL_CANCEL, !TPM_CRB_CTRL_CANCEL_CMD);
|
||||
WR4(sc, TPM_CRB_CTRL_CANCEL, ~TPM_CRB_CTRL_CANCEL_CMD);
|
||||
return (true);
|
||||
}
|
||||
|
||||
@ -330,7 +330,7 @@ tpmcrb_transmit(struct tpm_sc *sc, size_t length)
|
||||
return (EIO);
|
||||
}
|
||||
/* Clear cancellation bit */
|
||||
WR4(sc, TPM_CRB_CTRL_CANCEL, !TPM_CRB_CTRL_CANCEL_CMD);
|
||||
WR4(sc, TPM_CRB_CTRL_CANCEL, ~TPM_CRB_CTRL_CANCEL_CMD);
|
||||
|
||||
/* Switch device to idle state if necessary */
|
||||
if (!(RD4(sc, TPM_CRB_CTRL_STS) & TPM_CRB_CTRL_STS_IDLE_BIT)) {
|
||||
|
@ -268,7 +268,6 @@ g_disk_ioctl(struct g_provider *pp, u_long cmd, void * data, int fflag, struct t
|
||||
{
|
||||
struct disk *dp;
|
||||
struct g_disk_softc *sc;
|
||||
int error;
|
||||
|
||||
sc = pp->private;
|
||||
dp = sc->dp;
|
||||
@ -277,8 +276,7 @@ g_disk_ioctl(struct g_provider *pp, u_long cmd, void * data, int fflag, struct t
|
||||
|
||||
if (dp->d_ioctl == NULL)
|
||||
return (ENOIOCTL);
|
||||
error = dp->d_ioctl(dp, cmd, data, fflag, td);
|
||||
return (error);
|
||||
return (dp->d_ioctl(dp, cmd, data, fflag, td));
|
||||
}
|
||||
|
||||
static off_t
|
||||
|
@ -870,7 +870,7 @@ do_execve(struct thread *td, struct image_args *args, struct mac *mac_p)
|
||||
/* Set values passed into the program in registers. */
|
||||
(*p->p_sysent->sv_setregs)(td, imgp, stack_base);
|
||||
|
||||
vfs_mark_atime(imgp->vp, td->td_ucred);
|
||||
VOP_MMAPPED(imgp->vp);
|
||||
|
||||
SDT_PROBE1(proc, , , exec__success, args->fname);
|
||||
|
||||
|
@ -195,7 +195,7 @@ smr_advance(smr_t smr)
|
||||
* odd and an observed value of 0 in a particular CPU means
|
||||
* it is not currently in a read section.
|
||||
*/
|
||||
s = smr->c_shared;
|
||||
s = zpcpu_get(smr)->c_shared;
|
||||
goal = atomic_fetchadd_int(&s->s_wr_seq, SMR_SEQ_INCR) + SMR_SEQ_INCR;
|
||||
|
||||
/*
|
||||
@ -242,14 +242,19 @@ smr_poll(smr_t smr, smr_seq_t goal, bool wait)
|
||||
*/
|
||||
success = true;
|
||||
critical_enter();
|
||||
s = smr->c_shared;
|
||||
s = zpcpu_get(smr)->c_shared;
|
||||
|
||||
/*
|
||||
* Acquire barrier loads s_wr_seq after s_rd_seq so that we can not
|
||||
* observe an updated read sequence that is larger than write.
|
||||
*/
|
||||
s_rd_seq = atomic_load_acq_int(&s->s_rd_seq);
|
||||
s_wr_seq = smr_current(smr);
|
||||
|
||||
/*
|
||||
* wr_seq must be loaded prior to any c_seq value so that a stale
|
||||
* c_seq can only reference time after this wr_seq.
|
||||
*/
|
||||
s_wr_seq = atomic_load_acq_int(&s->s_wr_seq);
|
||||
|
||||
/*
|
||||
* Detect whether the goal is valid and has already been observed.
|
||||
@ -336,6 +341,12 @@ smr_poll(smr_t smr, smr_seq_t goal, bool wait)
|
||||
out:
|
||||
critical_exit();
|
||||
|
||||
/*
|
||||
* Serialize with smr_advance()/smr_exit(). The caller is now free
|
||||
* to modify memory as expected.
|
||||
*/
|
||||
atomic_thread_fence_acq();
|
||||
|
||||
return (success);
|
||||
}
|
||||
|
||||
|
@ -254,7 +254,7 @@ namei_cleanup_cnp(struct componentname *cnp)
|
||||
}
|
||||
|
||||
static int
|
||||
namei_handle_root(struct nameidata *ndp, struct vnode **dpp)
|
||||
namei_handle_root(struct nameidata *ndp, struct vnode **dpp, u_int n)
|
||||
{
|
||||
struct componentname *cnp;
|
||||
|
||||
@ -276,7 +276,7 @@ namei_handle_root(struct nameidata *ndp, struct vnode **dpp)
|
||||
ndp->ni_pathlen--;
|
||||
}
|
||||
*dpp = ndp->ni_rootdir;
|
||||
vrefact(*dpp);
|
||||
vrefactn(*dpp, n);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -395,8 +395,11 @@ namei(struct nameidata *ndp)
|
||||
* Get starting point for the translation.
|
||||
*/
|
||||
FILEDESC_SLOCK(fdp);
|
||||
/*
|
||||
* The reference on ni_rootdir is acquired in the block below to avoid
|
||||
* back-to-back atomics for absolute lookups.
|
||||
*/
|
||||
ndp->ni_rootdir = fdp->fd_rdir;
|
||||
vrefact(ndp->ni_rootdir);
|
||||
ndp->ni_topdir = fdp->fd_jdir;
|
||||
|
||||
/*
|
||||
@ -412,15 +415,29 @@ namei(struct nameidata *ndp)
|
||||
cnp->cn_nameptr = cnp->cn_pnbuf;
|
||||
if (cnp->cn_pnbuf[0] == '/') {
|
||||
ndp->ni_resflags |= NIRES_ABS;
|
||||
error = namei_handle_root(ndp, &dp);
|
||||
error = namei_handle_root(ndp, &dp, 2);
|
||||
if (error != 0) {
|
||||
/*
|
||||
* Simplify error handling, we should almost never be
|
||||
* here.
|
||||
*/
|
||||
vrefact(ndp->ni_rootdir);
|
||||
}
|
||||
} else {
|
||||
if (ndp->ni_startdir != NULL) {
|
||||
vrefact(ndp->ni_rootdir);
|
||||
dp = ndp->ni_startdir;
|
||||
startdir_used = 1;
|
||||
} else if (ndp->ni_dirfd == AT_FDCWD) {
|
||||
dp = fdp->fd_cdir;
|
||||
vrefact(dp);
|
||||
if (dp == ndp->ni_rootdir) {
|
||||
vrefactn(dp, 2);
|
||||
} else {
|
||||
vrefact(ndp->ni_rootdir);
|
||||
vrefact(dp);
|
||||
}
|
||||
} else {
|
||||
vrefact(ndp->ni_rootdir);
|
||||
rights = ndp->ni_rightsneeded;
|
||||
cap_rights_set(&rights, CAP_LOOKUP);
|
||||
|
||||
@ -567,7 +584,7 @@ namei(struct nameidata *ndp)
|
||||
cnp->cn_nameptr = cnp->cn_pnbuf;
|
||||
if (*(cnp->cn_nameptr) == '/') {
|
||||
vrele(dp);
|
||||
error = namei_handle_root(ndp, &dp);
|
||||
error = namei_handle_root(ndp, &dp, 1);
|
||||
if (error != 0)
|
||||
goto out;
|
||||
}
|
||||
|
@ -3046,6 +3046,19 @@ vrefact(struct vnode *vp)
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
vrefactn(struct vnode *vp, u_int n)
|
||||
{
|
||||
|
||||
CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
|
||||
#ifdef INVARIANTS
|
||||
int old = atomic_fetchadd_int(&vp->v_usecount, n);
|
||||
VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old));
|
||||
#else
|
||||
atomic_add_int(&vp->v_usecount, n);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Return reference count of a vnode.
|
||||
*
|
||||
@ -5942,23 +5955,6 @@ vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off)
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark for update the access time of the file if the filesystem
|
||||
* supports VOP_MARKATIME. This functionality is used by execve and
|
||||
* mmap, so we want to avoid the I/O implied by directly setting
|
||||
* va_atime for the sake of efficiency.
|
||||
*/
|
||||
void
|
||||
vfs_mark_atime(struct vnode *vp, struct ucred *cred)
|
||||
{
|
||||
struct mount *mp;
|
||||
|
||||
mp = vp->v_mount;
|
||||
ASSERT_VOP_LOCKED(vp, "vfs_mark_atime");
|
||||
if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0)
|
||||
(void)VOP_MARKATIME(vp);
|
||||
}
|
||||
|
||||
/*
|
||||
* The purpose of this routine is to remove granularity from accmode_t,
|
||||
* reducing it into standard unix access bits - VEXEC, VREAD, VWRITE,
|
||||
|
@ -181,9 +181,9 @@ vop_setattr {
|
||||
};
|
||||
|
||||
|
||||
%% markatime vp L L L
|
||||
%% mmapped vp L L L
|
||||
|
||||
vop_markatime {
|
||||
vop_mmapped {
|
||||
IN struct vnode *vp;
|
||||
};
|
||||
|
||||
|
@ -11,5 +11,3 @@ SRCS+= tpm_isa.c tpm_acpi.c isa_if.h opt_acpi.h acpi_if.h
|
||||
SRCS+= tpm20.c tpm_crb.c tpm_tis.c opt_tpm.h
|
||||
|
||||
.include <bsd.kmod.mk>
|
||||
|
||||
CWARNFLAGS.tpm_crb.c+= ${NO_WINT_IN_BOOL_CONTEXT}
|
||||
|
@ -921,7 +921,7 @@ vnet_vlan_uninit(const void *unused __unused)
|
||||
|
||||
if_clone_detach(V_vlan_cloner);
|
||||
}
|
||||
VNET_SYSUNINIT(vnet_vlan_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST,
|
||||
VNET_SYSUNINIT(vnet_vlan_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY,
|
||||
vnet_vlan_uninit, NULL);
|
||||
#endif
|
||||
|
||||
|
@ -69,11 +69,18 @@ struct smr {
|
||||
/*
|
||||
* Return the current write sequence number.
|
||||
*/
|
||||
static inline smr_seq_t
|
||||
smr_shared_current(smr_shared_t s)
|
||||
{
|
||||
|
||||
return (atomic_load_int(&s->s_wr_seq));
|
||||
}
|
||||
|
||||
static inline smr_seq_t
|
||||
smr_current(smr_t smr)
|
||||
{
|
||||
|
||||
return (atomic_load_int(&smr->c_shared->s_wr_seq));
|
||||
return (smr_shared_current(zpcpu_get(smr)->c_shared));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -106,7 +113,7 @@ smr_enter(smr_t smr)
|
||||
* is detected and handled there.
|
||||
*/
|
||||
/* This is an add because we do not have atomic_store_acq_int */
|
||||
atomic_add_acq_int(&smr->c_seq, smr_current(smr));
|
||||
atomic_add_acq_int(&smr->c_seq, smr_shared_current(smr->c_shared));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -900,6 +900,7 @@ void vrele(struct vnode *vp);
|
||||
void vref(struct vnode *vp);
|
||||
void vrefl(struct vnode *vp);
|
||||
void vrefact(struct vnode *vp);
|
||||
void vrefactn(struct vnode *vp, u_int n);
|
||||
int vrefcnt(struct vnode *vp);
|
||||
void v_addpollinfo(struct vnode *vp);
|
||||
|
||||
@ -936,7 +937,6 @@ void vfs_hash_rehash(struct vnode *vp, u_int hash);
|
||||
void vfs_hash_remove(struct vnode *vp);
|
||||
|
||||
int vfs_kqfilter(struct vop_kqfilter_args *);
|
||||
void vfs_mark_atime(struct vnode *vp, struct ucred *cred);
|
||||
struct dirent;
|
||||
int vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off);
|
||||
int vfs_emptydir(struct vnode *vp);
|
||||
|
@ -108,7 +108,7 @@ static vop_getattr_t ufs_getattr;
|
||||
static vop_ioctl_t ufs_ioctl;
|
||||
static vop_link_t ufs_link;
|
||||
static int ufs_makeinode(int mode, struct vnode *, struct vnode **, struct componentname *, const char *);
|
||||
static vop_markatime_t ufs_markatime;
|
||||
static vop_mmapped_t ufs_mmapped;
|
||||
static vop_mkdir_t ufs_mkdir;
|
||||
static vop_mknod_t ufs_mknod;
|
||||
static vop_open_t ufs_open;
|
||||
@ -676,19 +676,22 @@ ufs_update_nfs4_acl_after_mode_change(struct vnode *vp, int mode,
|
||||
}
|
||||
#endif /* UFS_ACL */
|
||||
|
||||
/*
|
||||
* Mark this file's access time for update for vfs_mark_atime(). This
|
||||
* is called from execve() and mmap().
|
||||
*/
|
||||
static int
|
||||
ufs_markatime(ap)
|
||||
struct vop_markatime_args /* {
|
||||
ufs_mmapped(ap)
|
||||
struct vop_mmapped_args /* {
|
||||
struct vnode *a_vp;
|
||||
} */ *ap;
|
||||
{
|
||||
struct inode *ip = VTOI(ap->a_vp);
|
||||
struct vnode *vp;
|
||||
struct inode *ip;
|
||||
struct mount *mp;
|
||||
|
||||
UFS_INODE_SET_FLAG_SHARED(ip, IN_ACCESS);
|
||||
vp = ap->a_vp;
|
||||
ip = VTOI(vp);
|
||||
mp = vp->v_mount;
|
||||
|
||||
if ((mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0)
|
||||
UFS_INODE_SET_FLAG_SHARED(ip, IN_ACCESS);
|
||||
/*
|
||||
* XXXKIB No UFS_UPDATE(ap->a_vp, 0) there.
|
||||
*/
|
||||
@ -2741,7 +2744,7 @@ struct vop_vector ufs_vnodeops = {
|
||||
.vop_ioctl = ufs_ioctl,
|
||||
.vop_link = ufs_link,
|
||||
.vop_lookup = vfs_cache_lookup,
|
||||
.vop_markatime = ufs_markatime,
|
||||
.vop_mmapped = ufs_mmapped,
|
||||
.vop_mkdir = ufs_mkdir,
|
||||
.vop_mknod = ufs_mknod,
|
||||
.vop_need_inactive = ufs_need_inactive,
|
||||
@ -2783,7 +2786,6 @@ struct vop_vector ufs_fifoops = {
|
||||
.vop_getattr = ufs_getattr,
|
||||
.vop_inactive = ufs_inactive,
|
||||
.vop_kqfilter = ufsfifo_kqfilter,
|
||||
.vop_markatime = ufs_markatime,
|
||||
.vop_pathconf = ufs_pathconf,
|
||||
.vop_print = ufs_print,
|
||||
.vop_read = VOP_PANIC,
|
||||
|
@ -1354,7 +1354,7 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
|
||||
*objp = obj;
|
||||
*flagsp = flags;
|
||||
|
||||
vfs_mark_atime(vp, cred);
|
||||
VOP_MMAPPED(vp);
|
||||
|
||||
done:
|
||||
if (error != 0 && *writecounted) {
|
||||
|
@ -25,17 +25,19 @@
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/linker.h>
|
||||
#include <sys/module.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <net/if.h>
|
||||
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <stdio.h>
|
||||
#include <strings.h>
|
||||
|
||||
#include <atf-c.h>
|
||||
|
||||
@ -51,7 +53,7 @@ ATF_TC_BODY(params, tc)
|
||||
int s;
|
||||
|
||||
s = kldload("if_epair");
|
||||
if (s != 0)
|
||||
if (s != 0 && errno != EEXIST)
|
||||
atf_tc_fail("Failed to load if_epair");
|
||||
|
||||
s = socket(AF_INET, SOCK_DGRAM, 0);
|
||||
|
@ -84,7 +84,7 @@ smrs_read(void)
|
||||
/* Wait for the writer to exit. */
|
||||
while (smrs_completed == 0) {
|
||||
smr_enter(smrs_smr);
|
||||
cur = (void *)atomic_load_ptr(&smrs_current);
|
||||
cur = (void *)atomic_load_acq_ptr(&smrs_current);
|
||||
if (cur->generation == -1)
|
||||
smrs_error(cur, "read early: Use after free!\n");
|
||||
atomic_add_int(&cur->count, 1);
|
||||
@ -107,6 +107,7 @@ smrs_write(void)
|
||||
|
||||
for (i = 0; i < smrs_iterations; i++) {
|
||||
cur = uma_zalloc_smr(smrs_zone, M_WAITOK);
|
||||
atomic_thread_fence_rel();
|
||||
cur = (void *)atomic_swap_ptr(&smrs_current, (uintptr_t)cur);
|
||||
uma_zfree_smr(smrs_zone, cur);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user