Merge ^/head r338015 through r338025.

This commit is contained in:
Dimitry Andric 2018-08-18 20:43:53 +00:00
commit f42813cfc8
21 changed files with 324 additions and 120 deletions

View File

@ -70,14 +70,14 @@ ldr_bootinfo(struct bootinfo *bi, uint64_t *bi_addr)
UINTN mmsz, pages, sz;
UINT32 mmver;
bi->bi_systab = (uint64_t)ST;
bi->bi_hcdp = (uint64_t)efi_get_table(&hcdp_guid);
bi->bi_systab = (uintptr_t)ST;
bi->bi_hcdp = (uintptr_t)efi_get_table(&hcdp_guid);
sz = sizeof(EFI_HANDLE);
status = BS->LocateHandle(ByProtocol, &fpswa_guid, 0, &sz, &handle);
if (status == 0)
status = BS->HandleProtocol(handle, &fpswa_guid, &fpswa);
bi->bi_fpswa = (status == 0) ? (uint64_t)fpswa : 0;
bi->bi_fpswa = (status == 0) ? (uintptr_t)fpswa : 0;
bisz = (sizeof(struct bootinfo) + 0x0f) & ~0x0f;
@ -109,7 +109,7 @@ ldr_bootinfo(struct bootinfo *bi, uint64_t *bi_addr)
* aligned).
*/
*bi_addr = addr;
mm = (void *)(addr + bisz);
mm = (void *)(uintptr_t)(addr + bisz);
sz = (EFI_PAGE_SIZE * pages) - bisz;
status = BS->GetMemoryMap(&sz, mm, &mapkey, &mmsz, &mmver);
if (EFI_ERROR(status)) {
@ -117,12 +117,12 @@ ldr_bootinfo(struct bootinfo *bi, uint64_t *bi_addr)
(long)status);
return (EINVAL);
}
bi->bi_memmap = (uint64_t)mm;
bi->bi_memmap = (uintptr_t)mm;
bi->bi_memmap_size = sz;
bi->bi_memdesc_size = mmsz;
bi->bi_memdesc_version = mmver;
bcopy(bi, (void *)(*bi_addr), sizeof(*bi));
bcopy(bi, (void *)(uintptr_t)(*bi_addr), sizeof(*bi));
return (0);
}

View File

@ -363,7 +363,7 @@ bi_load_efi_data(struct preloaded_file *kfp)
* memory map on a 16-byte boundary (the bootinfo block is page
* aligned).
*/
efihdr = (struct efi_map_header *)addr;
efihdr = (struct efi_map_header *)(uintptr_t)addr;
mm = (void *)((uint8_t *)efihdr + efisz);
sz = (EFI_PAGE_SIZE * pages) - efisz;

View File

@ -278,9 +278,9 @@ efi_copy_finish(void)
{
uint64_t *src, *dst, *last;
src = (uint64_t *)staging;
dst = (uint64_t *)(staging - stage_offset);
last = (uint64_t *)staging_end;
src = (uint64_t *)(uintptr_t)staging;
dst = (uint64_t *)(uintptr_t)(staging - stage_offset);
last = (uint64_t *)(uintptr_t)staging_end;
while (src < last)
*dst++ = *src++;

View File

@ -314,6 +314,7 @@ device wi # WaveLAN/Intersil/Symbol 802.11 wireless NICs.
device wpi # Intel 3945ABG wireless NICs.
# Pseudo devices.
device crypto # core crypto support
device loop # Network loopback
device random # Entropy device
device padlock_rng # VIA Padlock RNG
@ -376,6 +377,3 @@ device vmx # VMware VMXNET3 Ethernet
# Netmap provides direct access to TX/RX rings on supported NICs
device netmap # netmap(4) support
# The crypto framework is required by IPSEC
device crypto # Required by IPSEC

View File

@ -9,7 +9,7 @@ options VIMAGE # Subsystem virtualization, e.g. VNET
options INET # InterNETworking
options INET6 # IPv6 communications protocols
options TCP_HHOOK # hhook(9) framework for TCP
device crypto # IPSec && !crypto is nonsensical
device crypto # core crypto support
options IPSEC # IP (v4/v6) security
options SCTP # Stream Control Transmission Protocol
options FFS # Berkeley Fast Filesystem

View File

@ -9,7 +9,7 @@ options VIMAGE # Subsystem virtualization, e.g. VNET
options INET # InterNETworking
options INET6 # IPv6 communications protocols
options TCP_HHOOK # hhook(9) framework for TCP
device crypto # IPSec && !crypto is nonsensical
device crypto # core crypto support
options IPSEC # IP (v4/v6) security
options SCTP # Stream Control Transmission Protocol
options FFS # Berkeley Fast Filesystem

View File

@ -237,6 +237,7 @@ device kbdmux
device vt_efifb
# Pseudo devices.
device crypto # core crypto support
device loop # Network loopback
device random # Entropy device
device ether # Ethernet support
@ -266,6 +267,3 @@ options THUNDERX_PASS_1_1_ERRATA
options FDT
device acpi
# The crypto framework is required by IPSEC
device crypto # Required by IPSEC

View File

@ -548,7 +548,7 @@ static devclass_t bge_devclass;
DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
MODULE_PNP_INFO("U16:vendor;U16:device", pci, bge, bge_devs,
sizeof(bge_devs), nitems(bge_devs) - 1);
sizeof(bge_devs[0]), nitems(bge_devs) - 1);
DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
static int bge_allow_asf = 1;

View File

@ -399,6 +399,11 @@ static int pci_enable_ari = 1;
SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari,
0, "Enable support for PCIe Alternative RID Interpretation");
static int pci_clear_aer_on_attach = 0;
SYSCTL_INT(_hw_pci, OID_AUTO, clear_aer_on_attach, CTLFLAG_RWTUN,
&pci_clear_aer_on_attach, 0,
"Clear port and device AER state on driver attach");
static int
pci_has_quirk(uint32_t devid, int quirk)
{
@ -4204,17 +4209,98 @@ pci_create_iov_child_method(device_t bus, device_t pf, uint16_t rid,
}
#endif
static void
pci_add_child_clear_aer(device_t dev, struct pci_devinfo *dinfo)
{
int aer;
uint32_t r;
uint16_t r2;
if (dinfo->cfg.pcie.pcie_location != 0 &&
dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) {
r2 = pci_read_config(dev, dinfo->cfg.pcie.pcie_location +
PCIER_ROOT_CTL, 2);
r2 &= ~(PCIEM_ROOT_CTL_SERR_CORR |
PCIEM_ROOT_CTL_SERR_NONFATAL | PCIEM_ROOT_CTL_SERR_FATAL);
pci_write_config(dev, dinfo->cfg.pcie.pcie_location +
PCIER_ROOT_CTL, r2, 2);
}
if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) {
r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4);
pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4);
if (r != 0 && bootverbose) {
pci_printf(&dinfo->cfg,
"clearing AER UC 0x%08x -> 0x%08x\n",
r, pci_read_config(dev, aer + PCIR_AER_UC_STATUS,
4));
}
r = pci_read_config(dev, aer + PCIR_AER_UC_MASK, 4);
r &= ~(PCIM_AER_UC_TRAINING_ERROR |
PCIM_AER_UC_DL_PROTOCOL_ERROR |
PCIM_AER_UC_SURPRISE_LINK_DOWN |
PCIM_AER_UC_POISONED_TLP |
PCIM_AER_UC_FC_PROTOCOL_ERROR |
PCIM_AER_UC_COMPLETION_TIMEOUT |
PCIM_AER_UC_COMPLETER_ABORT |
PCIM_AER_UC_UNEXPECTED_COMPLETION |
PCIM_AER_UC_RECEIVER_OVERFLOW |
PCIM_AER_UC_MALFORMED_TLP |
PCIM_AER_UC_ECRC_ERROR |
PCIM_AER_UC_UNSUPPORTED_REQUEST |
PCIM_AER_UC_ACS_VIOLATION |
PCIM_AER_UC_INTERNAL_ERROR |
PCIM_AER_UC_MC_BLOCKED_TLP |
PCIM_AER_UC_ATOMIC_EGRESS_BLK |
PCIM_AER_UC_TLP_PREFIX_BLOCKED);
pci_write_config(dev, aer + PCIR_AER_UC_MASK, r, 4);
r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4);
pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4);
if (r != 0 && bootverbose) {
pci_printf(&dinfo->cfg,
"clearing AER COR 0x%08x -> 0x%08x\n",
r, pci_read_config(dev, aer + PCIR_AER_COR_STATUS,
4));
}
r = pci_read_config(dev, aer + PCIR_AER_COR_MASK, 4);
r &= ~(PCIM_AER_COR_RECEIVER_ERROR |
PCIM_AER_COR_BAD_TLP |
PCIM_AER_COR_BAD_DLLP |
PCIM_AER_COR_REPLAY_ROLLOVER |
PCIM_AER_COR_REPLAY_TIMEOUT |
PCIM_AER_COR_ADVISORY_NF_ERROR |
PCIM_AER_COR_INTERNAL_ERROR |
PCIM_AER_COR_HEADER_LOG_OVFLOW);
pci_write_config(dev, aer + PCIR_AER_COR_MASK, r, 4);
r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location +
PCIER_DEVICE_CTL, 2);
r |= PCIEM_CTL_COR_ENABLE | PCIEM_CTL_NFER_ENABLE |
PCIEM_CTL_FER_ENABLE | PCIEM_CTL_URR_ENABLE;
pci_write_config(dev, dinfo->cfg.pcie.pcie_location +
PCIER_DEVICE_CTL, r, 2);
}
}
void
pci_add_child(device_t bus, struct pci_devinfo *dinfo)
{
dinfo->cfg.dev = device_add_child(bus, NULL, -1);
device_set_ivars(dinfo->cfg.dev, dinfo);
device_t dev;
dinfo->cfg.dev = dev = device_add_child(bus, NULL, -1);
device_set_ivars(dev, dinfo);
resource_list_init(&dinfo->resources);
pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
pci_cfg_restore(dinfo->cfg.dev, dinfo);
pci_cfg_save(dev, dinfo, 0);
pci_cfg_restore(dev, dinfo);
pci_print_verbose(dinfo);
pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
pci_add_resources(bus, dev, 0, 0);
pci_child_added(dinfo->cfg.dev);
if (pci_clear_aer_on_attach)
pci_add_child_clear_aer(dev, dinfo);
EVENTHANDLER_INVOKE(pci_add_device, dinfo->cfg.dev);
}
@ -6280,3 +6366,128 @@ pci_match_device(device_t child, const struct pci_device_table *id, size_t nelt)
}
return (NULL);
}
static void
pci_print_faulted_dev_name(const struct pci_devinfo *dinfo)
{
const char *dev_name;
device_t dev;
dev = dinfo->cfg.dev;
printf("pci%d:%d:%d:%d", dinfo->cfg.domain, dinfo->cfg.bus,
dinfo->cfg.slot, dinfo->cfg.func);
dev_name = device_get_name(dev);
if (dev_name != NULL)
printf(" (%s%d)", dev_name, device_get_unit(dev));
}
void
pci_print_faulted_dev(void)
{
struct pci_devinfo *dinfo;
device_t dev;
int aer, i;
uint32_t r1, r2;
uint16_t status;
STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
dev = dinfo->cfg.dev;
status = pci_read_config(dev, PCIR_STATUS, 2);
status &= PCIM_STATUS_MDPERR | PCIM_STATUS_STABORT |
PCIM_STATUS_RTABORT | PCIM_STATUS_RMABORT |
PCIM_STATUS_SERR | PCIM_STATUS_PERR;
if (status != 0) {
pci_print_faulted_dev_name(dinfo);
printf(" error 0x%04x\n", status);
}
if (dinfo->cfg.pcie.pcie_location != 0) {
status = pci_read_config(dev,
dinfo->cfg.pcie.pcie_location +
PCIER_DEVICE_STA, 2);
if ((status & (PCIEM_STA_CORRECTABLE_ERROR |
PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR |
PCIEM_STA_UNSUPPORTED_REQ)) != 0) {
pci_print_faulted_dev_name(dinfo);
printf(" PCIe DEVCTL 0x%04x DEVSTA 0x%04x\n",
pci_read_config(dev,
dinfo->cfg.pcie.pcie_location +
PCIER_DEVICE_CTL, 2),
status);
}
}
if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) {
r1 = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4);
r2 = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4);
if (r1 != 0 || r2 != 0) {
pci_print_faulted_dev_name(dinfo);
printf(" AER UC 0x%08x Mask 0x%08x Svr 0x%08x\n"
" COR 0x%08x Mask 0x%08x Ctl 0x%08x\n",
r1, pci_read_config(dev, aer +
PCIR_AER_UC_MASK, 4),
pci_read_config(dev, aer +
PCIR_AER_UC_SEVERITY, 4),
r2, pci_read_config(dev, aer +
PCIR_AER_COR_MASK, 4),
pci_read_config(dev, aer +
PCIR_AER_CAP_CONTROL, 4));
for (i = 0; i < 4; i++) {
r1 = pci_read_config(dev, aer +
PCIR_AER_HEADER_LOG + i * 4, 4);
printf(" HL%d: 0x%08x\n", i, r1);
}
}
}
}
}
#ifdef DDB
DB_SHOW_COMMAND(pcierr, pci_print_faulted_dev_db)
{
pci_print_faulted_dev();
}
static void
db_clear_pcie_errors(const struct pci_devinfo *dinfo)
{
device_t dev;
int aer;
uint32_t r;
dev = dinfo->cfg.dev;
r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location +
PCIER_DEVICE_STA, 2);
pci_write_config(dev, dinfo->cfg.pcie.pcie_location +
PCIER_DEVICE_STA, r, 2);
if (pci_find_extcap(dev, PCIZ_AER, &aer) != 0)
return;
r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4);
if (r != 0)
pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4);
r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4);
if (r != 0)
pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4);
}
DB_COMMAND(pci_clearerr, db_pci_clearerr)
{
struct pci_devinfo *dinfo;
device_t dev;
uint16_t status, status1;
STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
dev = dinfo->cfg.dev;
status1 = status = pci_read_config(dev, PCIR_STATUS, 2);
status1 &= PCIM_STATUS_MDPERR | PCIM_STATUS_STABORT |
PCIM_STATUS_RTABORT | PCIM_STATUS_RMABORT |
PCIM_STATUS_SERR | PCIM_STATUS_PERR;
if (status1 != 0) {
status &= ~status1;
pci_write_config(dev, PCIR_STATUS, status, 2);
}
if (dinfo->cfg.pcie.pcie_location != 0)
db_clear_pcie_errors(dinfo);
}
}
#endif

View File

@ -682,6 +682,8 @@ bool pcie_flr(device_t dev, u_int max_delay, bool force);
int pcie_get_max_completion_timeout(device_t dev);
bool pcie_wait_for_pending_transactions(device_t dev, u_int max_delay);
void pci_print_faulted_dev(void);
#ifdef BUS_SPACE_MAXADDR
#if (BUS_SPACE_MAXADDR > 0xFFFFFFFF)
#define PCI_DMA_BOUNDARY 0x100000000

View File

@ -7979,7 +7979,7 @@ nfsrv_copymr(vnode_t vp, vnode_t fvp, vnode_t dvp, struct nfsdevice *ds,
struct nfslayouthash *lhyp;
struct nfslayout *lyp, *nlyp;
struct nfslayouthead thl;
struct mount *mp;
struct mount *mp, *tvmp;
struct acl *aclp;
struct vattr va;
struct timespec mtime;
@ -8042,6 +8042,7 @@ nfsrv_copymr(vnode_t vp, vnode_t fvp, vnode_t dvp, struct nfsdevice *ds,
NFSDRECALLUNLOCK();
ret = 0;
mp = tvmp = NULL;
didprintf = 0;
TAILQ_INIT(&thl);
/* Unlock the MDS vp, so that a LayoutReturn can be done on it. */
@ -8115,6 +8116,20 @@ tryagain2:
TAILQ_FOREACH_SAFE(lyp, &thl, lay_list, nlyp)
nfsrv_freelayout(&thl, lyp);
/*
* Do the vn_start_write() calls here, before the MDS vnode is
* locked and the tvp is created (locked) in the NFS file system
* that dvp is in.
* For tvmp, this probably isn't necessary, since it will be an
* NFS mount and they are not suspendable at this time.
*/
if (ret == 0)
ret = vn_start_write(vp, &mp, V_WAIT | PCATCH);
if (ret == 0) {
tvmp = dvp->v_mount;
ret = vn_start_write(NULL, &tvmp, V_WAIT | PCATCH);
}
/*
* LK_EXCLUSIVE lock the MDS vnode, so that any
* proxied writes through the MDS will be blocked until we have
@ -8123,7 +8138,7 @@ tryagain2:
* changed until the copy is complete.
*/
NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY);
if ((vp->v_iflag & VI_DOOMED) != 0) {
if (ret == 0 && (vp->v_iflag & VI_DOOMED) != 0) {
NFSD_DEBUG(4, "nfsrv_copymr: lk_exclusive doomed\n");
ret = ESTALE;
}
@ -8148,10 +8163,7 @@ tryagain2:
nfsrv_zeropnfsdat = malloc(PNFSDS_COPYSIZ, M_TEMP,
M_WAITOK | M_ZERO);
rdpos = wrpos = 0;
mp = NULL;
ret = vn_start_write(tvp, &mp, V_WAIT | PCATCH);
if (ret == 0)
ret = VOP_GETATTR(fvp, &va, cred);
ret = VOP_GETATTR(fvp, &va, cred);
aresid = 0;
while (ret == 0 && aresid == 0) {
ret = vn_rdwr(UIO_READ, fvp, dat, PNFSDS_COPYSIZ,
@ -8191,8 +8203,6 @@ tryagain2:
ret = 0;
}
if (mp != NULL)
vn_finished_write(mp);
if (ret == 0)
ret = VOP_FSYNC(tvp, MNT_WAIT, p);
@ -8210,18 +8220,16 @@ tryagain2:
acl_free(aclp);
free(dat, M_TEMP);
}
if (tvmp != NULL)
vn_finished_write(tvmp);
/* Update the extended attributes for the newly created DS file. */
if (ret == 0) {
mp = NULL;
ret = vn_start_write(vp, &mp, V_WAIT | PCATCH);
if (ret == 0)
ret = vn_extattr_set(vp, IO_NODELOCKED,
EXTATTR_NAMESPACE_SYSTEM, "pnfsd.dsfile",
sizeof(*wpf) * mirrorcnt, (char *)wpf, p);
if (mp != NULL)
vn_finished_write(mp);
}
if (ret == 0)
ret = vn_extattr_set(vp, IO_NODELOCKED,
EXTATTR_NAMESPACE_SYSTEM, "pnfsd.dsfile",
sizeof(*wpf) * mirrorcnt, (char *)wpf, p);
if (mp != NULL)
vn_finished_write(mp);
/* Get rid of the dontlist entry, so that Layouts can be issued. */
NFSDDONTLISTLOCK();

View File

@ -318,6 +318,7 @@ device wi # WaveLAN/Intersil/Symbol 802.11 wireless NICs.
device wpi # Intel 3945ABG wireless NICs.
# Pseudo devices.
device crypto # core crypto support
device loop # Network loopback
device random # Entropy device
device padlock_rng # VIA Padlock RNG
@ -377,6 +378,3 @@ device xenpci # Xen HVM Hypervisor services driver
# VMware support
device vmx # VMware VMXNET3 Ethernet
# The crypto framework is required by IPSEC
device crypto # Required by IPSEC

View File

@ -174,30 +174,37 @@ fallback:
if (dyn_used || fbacklvl >= FBACK_STATIC)
return (ENOENT);
if (fbacklvl <= FBACK_MDENV &&
_res_checkenv(md_envp)) {
hintp = md_envp;
goto found;
switch (fbacklvl) {
case FBACK_MDENV:
fbacklvl++;
if (_res_checkenv(md_envp)) {
hintp = md_envp;
break;
}
/* FALLTHROUGH */
case FBACK_STENV:
fbacklvl++;
if (!stenv_skip && _res_checkenv(kern_envp)) {
hintp = kern_envp;
break;
} else
stenv_skip = true;
/* FALLTHROUGH */
case FBACK_STATIC:
fbacklvl++;
/* We'll fallback to static_hints if needed/can */
if (!sthints_skip &&
_res_checkenv(static_hints))
hintp = static_hints;
else
sthints_skip = true;
break;
default:
return (ENOENT);
}
fbacklvl++;
if (!stenv_skip && fbacklvl <= FBACK_STENV &&
_res_checkenv(kern_envp)) {
hintp = kern_envp;
goto found;
} else
stenv_skip = true;
fbacklvl++;
/* We'll fallback to static_hints if needed/can */
if (!sthints_skip && fbacklvl <= FBACK_STATIC &&
_res_checkenv(static_hints))
hintp = static_hints;
else
sthints_skip = true;
found:
fbacklvl++;
}
if (hintp == NULL)

View File

@ -160,6 +160,7 @@ device dc # DEC/Intel 21143 and various workalikes
device fxp # Intel EtherExpress PRO/100B (82557, 82558)
# Pseudo devices.
device crypto # core crypto support
device loop # Network loopback
device random # Entropy device
device ether # Ethernet support
@ -225,6 +226,3 @@ device sound # Generic sound driver (required)
device snd_ai2s # Apple I2S audio
device snd_davbus # Apple DAVBUS audio
device snd_uaudio # USB Audio
# The crypto framework is required by IPSEC
device crypto # Required by IPSEC

View File

@ -36,7 +36,6 @@ options INET6 # IPv6 communications protocols
options TCP_HHOOK # hhook(9) framework for TCP
options IPSEC # IP (v4/v6) security
options IPSEC_SUPPORT # Allow kldload of ipsec and tcpmd5
device crypto # core crypto support (required for IPSEC)
options TCP_OFFLOAD # TCP offload
options SCTP # Stream Control Transmission Protocol
options FFS # Berkeley Fast Filesystem
@ -121,6 +120,7 @@ options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones
options ZSTDIO # zstd-compressed kernel and user dumps
# Pseudo devices.
device crypto # core crypto support
device loop # Network loopback
device random # Entropy device
device ether # Ethernet support

View File

@ -229,6 +229,7 @@ options AH_SUPPORT_AR5416 # enable AR5416 tx/rx descriptors
device ath_rate_sample # SampleRate tx rate control for ath
# Pseudo devices.
device crypto # core crypto support
device loop # Network loopback
device random # Entropy device
device ether # Ethernet support
@ -257,6 +258,3 @@ device sound # Generic sound driver (required)
device snd_audiocs # Crystal Semiconductor CS4231
device snd_es137x # Ensoniq AudioPCI ES137x
device snd_t4dwave # Acer Labs M5451
# The crypto framework is required by IPSEC
device crypto # Required by IPSEC

View File

@ -1169,7 +1169,7 @@ page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
void *p; /* Returned page */
*pflag = UMA_SLAB_KERNEL;
p = (void *) kmem_malloc_domain(kernel_arena, domain, bytes, wait);
p = (void *) kmem_malloc_domain(domain, bytes, wait);
return (p);
}
@ -3680,32 +3680,22 @@ uma_zone_exhausted_nolock(uma_zone_t zone)
void *
uma_large_malloc_domain(vm_size_t size, int domain, int wait)
{
struct vmem *arena;
vm_offset_t addr;
uma_slab_t slab;
#if VM_NRESERVLEVEL > 0
if (__predict_true((wait & M_EXEC) == 0))
arena = kernel_arena;
else
arena = kernel_rwx_arena;
#else
arena = kernel_arena;
#endif
slab = zone_alloc_item(slabzone, NULL, domain, wait);
if (slab == NULL)
return (NULL);
if (domain == UMA_ANYDOMAIN)
addr = kmem_malloc(arena, size, wait);
addr = kmem_malloc(NULL, size, wait);
else
addr = kmem_malloc_domain(arena, domain, size, wait);
addr = kmem_malloc_domain(domain, size, wait);
if (addr != 0) {
vsetslab(addr, slab);
slab->us_data = (void *)addr;
slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC;
#if VM_NRESERVLEVEL > 0
if (__predict_false(arena == kernel_rwx_arena))
if (__predict_false((wait & M_EXEC) != 0))
slab->us_flags |= UMA_SLAB_KRWX;
#endif
slab->us_size = size;

View File

@ -65,8 +65,7 @@ vm_offset_t kmem_alloc_contig_domain(int domain, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr);
vm_offset_t kmem_malloc(struct vmem *, vm_size_t size, int flags);
vm_offset_t kmem_malloc_domain(struct vmem *, int domain, vm_size_t size,
int flags);
vm_offset_t kmem_malloc_domain(int domain, vm_size_t size, int flags);
void kmem_free(struct vmem *, vm_offset_t, vm_size_t);
/* This provides memory for previously allocated address space. */

View File

@ -372,23 +372,18 @@ kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
* Allocate wired-down pages in the kernel's address space.
*/
vm_offset_t
kmem_malloc_domain(struct vmem *vmem, int domain, vm_size_t size, int flags)
kmem_malloc_domain(int domain, vm_size_t size, int flags)
{
vmem_t *arena;
vm_offset_t addr;
int rv;
#if VM_NRESERVLEVEL > 0
KASSERT(vmem == kernel_arena || vmem == kernel_rwx_arena,
("kmem_malloc_domain: Only kernel_arena or kernel_rwx_arena "
"are supported."));
if (__predict_true(vmem == kernel_arena))
if (__predict_true((flags & M_EXEC) == 0))
arena = vm_dom[domain].vmd_kernel_arena;
else
arena = vm_dom[domain].vmd_kernel_rwx_arena;
#else
KASSERT(vmem == kernel_arena,
("kmem_malloc_domain: Only kernel_arena is supported."));
arena = vm_dom[domain].vmd_kernel_arena;
#endif
size = round_page(size);
@ -404,7 +399,7 @@ kmem_malloc_domain(struct vmem *vmem, int domain, vm_size_t size, int flags)
}
vm_offset_t
kmem_malloc(struct vmem *vmem, vm_size_t size, int flags)
kmem_malloc(struct vmem *vmem __unused, vm_size_t size, int flags)
{
struct vm_domainset_iter di;
vm_offset_t addr;
@ -412,7 +407,7 @@ kmem_malloc(struct vmem *vmem, vm_size_t size, int flags)
vm_domainset_iter_malloc_init(&di, kernel_object, &domain, &flags);
do {
addr = kmem_malloc_domain(vmem, domain, size, flags);
addr = kmem_malloc_domain(domain, size, flags);
if (addr != 0)
break;
} while (vm_domainset_iter_malloc(&di, &domain, &flags) == 0);

View File

@ -989,6 +989,7 @@ printcpuinfo(void)
"\020"
"\033IBPB"
"\034STIBP"
"\035L1DFL"
"\036ARCH_CAP"
"\040SSBD"
);

View File

@ -125,8 +125,9 @@ void usage(void);
#define TIMESTAMP_ELAPSED 0x2
#define TIMESTAMP_RELATIVE 0x4
static int timestamp, decimal, fancy = 1, suppressdata, tail, threads, maxdata,
resolv = 0, abiflag = 0, syscallno = 0;
static bool abiflag, decimal, fancy = true, resolv, suppressdata, syscallno,
tail, threads;
static int timestamp, maxdata;
static const char *tracefile = DEF_TRACEFILE;
static struct ktr_header ktr_header;
@ -363,40 +364,40 @@ main(int argc, char *argv[])
while ((ch = getopt(argc,argv,"f:dElm:np:AHRrSsTt:")) != -1)
switch (ch) {
case 'A':
abiflag = 1;
abiflag = true;
break;
case 'f':
tracefile = optarg;
break;
case 'd':
decimal = 1;
decimal = true;
break;
case 'l':
tail = 1;
tail = true;
break;
case 'm':
maxdata = atoi(optarg);
break;
case 'n':
fancy = 0;
fancy = false;
break;
case 'p':
pid = atoi(optarg);
break;
case 'r':
resolv = 1;
resolv = true;
break;
case 'S':
syscallno = 1;
syscallno = true;
break;
case 's':
suppressdata = 1;
suppressdata = true;
break;
case 'E':
timestamp |= TIMESTAMP_ELAPSED;
break;
case 'H':
threads = 1;
threads = true;
break;
case 'R':
timestamp |= TIMESTAMP_RELATIVE;
@ -427,18 +428,18 @@ main(int argc, char *argv[])
caph_cache_tzdata();
#ifdef WITH_CASPER
if (resolv != 0) {
if (resolv) {
if (cappwdgrp_setup(&cappwd, &capgrp) < 0) {
cappwd = NULL;
capgrp = NULL;
}
}
if (resolv == 0 || (cappwd != NULL && capgrp != NULL)) {
if (!resolv || (cappwd != NULL && capgrp != NULL)) {
if (caph_enter() < 0)
err(1, "unable to enter capability mode");
}
#else
if (resolv == 0) {
if (!resolv) {
if (caph_enter() < 0)
err(1, "unable to enter capability mode");
}
@ -1835,14 +1836,14 @@ ktrstat(struct stat *statp)
printf("struct stat {");
printf("dev=%ju, ino=%ju, ",
(uintmax_t)statp->st_dev, (uintmax_t)statp->st_ino);
if (resolv == 0)
if (!resolv)
printf("mode=0%jo, ", (uintmax_t)statp->st_mode);
else {
strmode(statp->st_mode, mode);
printf("mode=%s, ", mode);
}
printf("nlink=%ju, ", (uintmax_t)statp->st_nlink);
if (resolv == 0) {
if (!resolv) {
pwd = NULL;
} else {
#ifdef WITH_CASPER
@ -1856,7 +1857,7 @@ ktrstat(struct stat *statp)
printf("uid=%ju, ", (uintmax_t)statp->st_uid);
else
printf("uid=\"%s\", ", pwd->pw_name);
if (resolv == 0) {
if (!resolv) {
grp = NULL;
} else {
#ifdef WITH_CASPER
@ -1872,7 +1873,7 @@ ktrstat(struct stat *statp)
printf("gid=\"%s\", ", grp->gr_name);
printf("rdev=%ju, ", (uintmax_t)statp->st_rdev);
printf("atime=");
if (resolv == 0)
if (!resolv)
printf("%jd", (intmax_t)statp->st_atim.tv_sec);
else {
tm = localtime(&statp->st_atim.tv_sec);
@ -1884,7 +1885,7 @@ ktrstat(struct stat *statp)
else
printf(", ");
printf("mtime=");
if (resolv == 0)
if (!resolv)
printf("%jd", (intmax_t)statp->st_mtim.tv_sec);
else {
tm = localtime(&statp->st_mtim.tv_sec);
@ -1896,7 +1897,7 @@ ktrstat(struct stat *statp)
else
printf(", ");
printf("ctime=");
if (resolv == 0)
if (!resolv)
printf("%jd", (intmax_t)statp->st_ctim.tv_sec);
else {
tm = localtime(&statp->st_ctim.tv_sec);
@ -1908,7 +1909,7 @@ ktrstat(struct stat *statp)
else
printf(", ");
printf("birthtime=");
if (resolv == 0)
if (!resolv)
printf("%jd", (intmax_t)statp->st_birthtim.tv_sec);
else {
tm = localtime(&statp->st_birthtim.tv_sec);