MFHead @350453

Sponsored by:	The FreeBSD Foundation
This commit is contained in:
Alan Somers 2019-07-30 19:42:46 +00:00
commit ad13e15e1d
41 changed files with 392 additions and 2330 deletions

View File

@ -12,7 +12,6 @@ task:
timeout_in: 90m
install_script:
- pkg install -y qemu-devel uefi-edk2-qemu-x86_64
- fetch https://people.freebsd.org/~emaste/OVMF.fd
script:
- make -j$(sysctl -n hw.ncpu) WITHOUT_TOOLCHAIN=yes buildworld buildkernel
test_script:

View File

@ -1187,6 +1187,12 @@ strip_main(struct elfcopy *ecp, int argc, char **argv)
ecp->strip = STRIP_ALL;
if (optind == argc)
strip_usage();
/*
* Only accept a single input file if an output file had been
* specified.
*/
if (outfile != NULL && argc != (optind + 1))
strip_usage();
for (i = optind; i < argc; i++)
create_file(ecp, argv[i], outfile);

View File

@ -23,7 +23,7 @@
.\"
.\" $Id: strip.1 3642 2018-10-14 14:24:28Z jkoshy $
.\"
.Dd September 17, 2011
.Dd July 27, 2019
.Dt STRIP 1
.Os
.Sh NAME
@ -51,7 +51,15 @@
.Sh DESCRIPTION
The
.Nm
utility is used to discard information from ELF objects.
utility is used to discard information from the ELF objects
specified by the arguments
.Ar .
.Pp
If an explicit output file name is not specified using the
.Fl o
option, the
.Nm
utility will modify its input arguments in-place.
.Pp
The
.Nm
@ -65,8 +73,9 @@ Print a help message and exit.
Remove all content except that which would be used for debugging.
.It Fl o Ar outputfile | Fl -output-file= Ns Ar outputfile
Write the stripped object to file
.Ar outputfile .
The default behaviour is to modify objects in place.
.Ar outputfile
instead of modifying the input in-place.
Only a single input object should be specified if this option is used.
.It Fl p | Fl -preserve-dates
Preserve the object's access and modification times.
.It Fl s | Fl -strip-all

View File

@ -5,7 +5,7 @@
/set type=dir uname=root gname=wheel mode=0755 tags=package=runtime
.
account
account mode=0750
..
at
/set uname=daemon

View File

@ -28,7 +28,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd November 5, 2018
.Dd July 29, 2019
.Dt LIBCASPER 3
.Os
.Sh NAME
@ -212,9 +212,6 @@ compatible API
provides
.Xr getpwent 3
compatible API
.It system.random
allows to obtain entropy from
.Pa /dev/random
.It system.sysctl
provides
.Xr sysctlbyname 3

View File

@ -247,7 +247,7 @@ for (i = 0; i < argc; i++) {
fd = fileargs_open(fa, argv[i]);
if (fd < 0)
err(1, "unable to open file %s", argv[i]);
printf("File %s opened in capability mode\n", argv[i]);
printf("File %s opened in capability mode\en", argv[i]);
close(fd);
}

View File

@ -289,6 +289,7 @@ MAN= accept_filter.9 \
securelevel_gt.9 \
selrecord.9 \
sema.9 \
seqc.9 \
sf_buf.9 \
sglist.9 \
shm_map.9 \
@ -1825,6 +1826,10 @@ MLINKS+=sema.9 sema_destroy.9 \
sema.9 sema_trywait.9 \
sema.9 sema_value.9 \
sema.9 sema_wait.9
MLINKS+=seqc.9 seqc_consistent.9 \
seqc.9 seqc_read.9 \
seqc.9 seqc_write_begin.9 \
seqc.9 seqc_write_end.9
MLINKS+=sf_buf.9 sf_buf_alloc.9 \
sf_buf.9 sf_buf_free.9 \
sf_buf.9 sf_buf_kva.9 \

138
share/man/man9/seqc.9 Normal file
View File

@ -0,0 +1,138 @@
.\"
.\" Copyright (C) 2019 Mariusz Zaborski <oshogbo@FreeBSD.org>
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice(s), this list of conditions and the following disclaimer as
.\" the first lines of this file unmodified other than the possible
.\" addition of one or more copyright notices.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice(s), this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
.\" EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
.\" WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
.\" DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
.\" DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
.\" (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
.\" SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
.\" CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
.\" DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd July 29, 2019
.Dt SEQC 9
.Os
.Sh NAME
.Nm seqc_consistent ,
.Nm seqc_read ,
.Nm seqc_write_begin ,
.Nm seqc_write_end
.Nd "lockless read algorithm"
.Sh SYNOPSIS
.In sys/seqc.h
.Ft void
.Fn seqc_write_begin "seqc_t *seqcp"
.Ft void
.Fn seqc_write_end "seqc_t *seqcp"
.Ft seqc_t
.Fn seqc_read "seqc_t *seqcp"
.Ft seqc_t
.Fn seqc_consistent "const seqc_t *seqcp" "seqc_t oldseqc"
.Sh DESCRIPTION
The
.Nm seqc
allows zero or more readers and zero or one writer to concurrently access
an object, providing a consistent snapshot of the object for readers.
No mutual exclusion between readers and writers is required,
but readers may be starved indefinitely by writers.
.Pp
The functions
.Fn seqc_write_begin
and
.Fn seqc_write_end
are used to create a transaction for writer, and notify the readers that the
object will be modified.
.Pp
The
.Fn seqc_read
function returns the current sequence number.
If a writer has started a transaction, this function will spin until the
transaction has ended.
.Pp
The
.Fn seqc_consistent
function compares the sequence number with a previously fetched value.
The
.Fa oldseqc
variable should contain a sequence number from the beginning of read
transaction.
.Pp
The reader at the end of a transaction checks if the sequence number has
changed.
If the sequence number didn't change the object wasn't modified, and fetched
variables are valid.
If the sequence number changed the object was modified and the fetch should be
repeated.
In case when sequence number is odd the object change is in progress and the
reader will wait until the write will the sequence number will become even.
.Sh EXAMPLES
The following example for a writer changees the
.Va var1
and
.Va var2
variables in the
.Va obj
structure:
.Bd -literal
lock_exclusive(&obj->lock);
seqc_write_begin(&obj->seqc);
obj->var1 = 1;
obj->var2 = 2;
seqc_write_end(&obj->seqc);
unlock_exclusive(&obj->lock);
.Ed
The following example for a reader reads the
.Va var1
and
.Va var2
variables from the
.Va obj
structure.
In the case where the sequence number was changed it restarts the whole process.
.Bd -literal
int var1, var2;
seqc_t seqc;
for (;;) {
seqc = seqc_read(&obj->seqc);
var1 = obj->var1;
var2 = obj->var2;
if (seqc_consistent(&obj->seqc, seqc))
break;
}
.Ed
.Sh AUTHORS
The
.Nm seqc
functions was implemented by
.An Mateusz Guzik Aq Mt mjg@FreeBSD.org .
This manual page was written by
.An Mariusz Zaborski Aq Mt oshogbo@FreeBSD.org .
.Sh CAVEATS
There is no guarantee of progress for readers.
In case when there are a lot of writers the reader can be starved.
This concern may be solved by returning error after a few attempts.
.Pp
Theoretically if reading takes a very long time, and when there are many writers
the counter may overflow and wrap around to the same value.
In that case the reader will not notice that the object was changed.
Given that this needs 4 billion transactional writes across a single contended
reader, it is unlikely to ever happen.
This could be avoided by extending the interface to allow 64-bit counters.

View File

@ -234,6 +234,8 @@ DEBUG_FILES_CFLAGS?= -g
.if ${MK_WARNS} != "no"
CFLAGS+= ${CWARNFLAGS:M*} ${CWARNFLAGS.${COMPILER_TYPE}}
CFLAGS+= ${CWARNFLAGS.${.IMPSRC:T}}
CXXFLAGS+= ${CXXWARNFLAGS:M*} ${CXXWARNFLAGS.${COMPILER_TYPE}}
CXXFLAGS+= ${CXXWARNFLAGS.${.IMPSRC:T}}
.endif
CFLAGS+= ${CFLAGS.${COMPILER_TYPE}}

View File

@ -40,7 +40,9 @@ struct devsw *devsw[] = {
&efipart_cddev,
&efipart_hddev,
&efihttp_dev, /* ordering with efinet_dev matters */
#if defined(LOADER_NET_SUPPORT)
&efinet_dev,
#endif
&vdisk_dev,
#ifdef EFI_ZFS_BOOT
&zfs_dev,
@ -64,7 +66,9 @@ struct fs_ops *file_system[] = {
};
struct netif_driver *netif_drivers[] = {
#if defined(LOADER_NET_SUPPORT)
&efinetif,
#endif
NULL
};

View File

@ -127,7 +127,6 @@ device nvram # Access to rtc cmos via /dev/nvram
device speaker #Play IBM BASIC-style noises out your speaker
hint.speaker.0.at="isa"
hint.speaker.0.port="0x61"
device gzip #Exec gzipped a.out's. REQUIRES COMPAT_AOUT!
#####################################################################

View File

@ -1,557 +0,0 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2005 Olivier Houchard. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Since we are compiled outside of the normal kernel build process, we
* need to include opt_global.h manually.
*/
#include "opt_global.h"
#include "opt_kernname.h"
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <machine/asm.h>
#include <sys/param.h>
#include <sys/elf32.h>
#include <sys/inflate.h>
#include <machine/elf.h>
#include <machine/pte-v4.h>
#include <machine/cpufunc.h>
#include <machine/armreg.h>
#include <machine/cpu.h>
#include <machine/vmparam.h> /* For KERNVIRTADDR */
#if __ARM_ARCH >= 6
#error "elf_trampline is not supported on ARMv6/v7 platforms"
#endif
extern char kernel_start[];
extern char kernel_end[];
extern void *_end;
void _start(void);
void __start(void);
void __startC(unsigned r0, unsigned r1, unsigned r2, unsigned r3);
extern void do_call(void *, void *, void *, int);
#define GZ_HEAD 0xa
#if defined(CPU_ARM9E)
#define cpu_idcache_wbinv_all armv5_ec_idcache_wbinv_all
extern void armv5_ec_idcache_wbinv_all(void);
#endif
#if defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY)
#define cpu_l2cache_wbinv_all sheeva_l2cache_wbinv_all
extern void sheeva_l2cache_wbinv_all(void);
#else
#define cpu_l2cache_wbinv_all()
#endif
/*
* Boot parameters
*/
static struct arm_boot_params s_boot_params;
static __inline void *
memcpy(void *dst, const void *src, int len)
{
const char *s = src;
char *d = dst;
while (len) {
if (0 && len >= 4 && !((vm_offset_t)d & 3) &&
!((vm_offset_t)s & 3)) {
*(uint32_t *)d = *(uint32_t *)s;
s += 4;
d += 4;
len -= 4;
} else {
*d++ = *s++;
len--;
}
}
return (dst);
}
static __inline void
bzero(void *addr, int count)
{
char *tmp = (char *)addr;
while (count > 0) {
if (count >= 4 && !((vm_offset_t)tmp & 3)) {
*(uint32_t *)tmp = 0;
tmp += 4;
count -= 4;
} else {
*tmp = 0;
tmp++;
count--;
}
}
}
void
_startC(unsigned r0, unsigned r1, unsigned r2, unsigned r3)
{
int tmp1;
unsigned int sp = ((unsigned int)&_end & ~3) + 4;
unsigned int pc, kernphysaddr;
s_boot_params.abp_r0 = r0;
s_boot_params.abp_r1 = r1;
s_boot_params.abp_r2 = r2;
s_boot_params.abp_r3 = r3;
/*
* Figure out the physical address the kernel was loaded at. This
* assumes the entry point (this code right here) is in the first page,
* which will always be the case for this trampoline code.
*/
__asm __volatile("mov %0, pc\n"
: "=r" (pc));
kernphysaddr = pc & ~PAGE_MASK;
#if defined(FLASHADDR) && defined(PHYSADDR) && defined(LOADERRAMADDR)
if ((FLASHADDR > LOADERRAMADDR && pc >= FLASHADDR) ||
(FLASHADDR < LOADERRAMADDR && pc < LOADERRAMADDR)) {
/*
* We're running from flash, so just copy the whole thing
* from flash to memory.
* This is far from optimal, we could do the relocation or
* the unzipping directly from flash to memory to avoid this
* needless copy, but it would require to know the flash
* physical address.
*/
unsigned int target_addr;
unsigned int tmp_sp;
uint32_t src_addr = (uint32_t)&_start - PHYSADDR + FLASHADDR
+ (pc - FLASHADDR - ((uint32_t)&_startC - PHYSADDR)) & 0xfffff000;
target_addr = (unsigned int)&_start - PHYSADDR + LOADERRAMADDR;
tmp_sp = target_addr + 0x100000 +
(unsigned int)&_end - (unsigned int)&_start;
memcpy((char *)target_addr, (char *)src_addr,
(unsigned int)&_end - (unsigned int)&_start);
/* Temporary set the sp and jump to the new location. */
__asm __volatile(
"mov sp, %1\n"
"mov r0, %2\n"
"mov r1, %3\n"
"mov r2, %4\n"
"mov r3, %5\n"
"mov pc, %0\n"
: : "r" (target_addr), "r" (tmp_sp),
"r" (s_boot_params.abp_r0), "r" (s_boot_params.abp_r1),
"r" (s_boot_params.abp_r2), "r" (s_boot_params.abp_r3)
: "r0", "r1", "r2", "r3");
}
#endif
#ifdef KZIP
sp += KERNSIZE + 0x100;
sp &= ~(L1_TABLE_SIZE - 1);
sp += 2 * L1_TABLE_SIZE;
#endif
sp += 1024 * 1024; /* Should be enough for a stack */
__asm __volatile("adr %0, 2f\n"
"bic %0, %0, #0xff000000\n"
"and %1, %1, #0xff000000\n"
"orr %0, %0, %1\n"
"mrc p15, 0, %1, c1, c0, 0\n" /* CP15_SCTLR(%1)*/
"bic %1, %1, #1\n" /* Disable MMU */
"orr %1, %1, #(4 | 8)\n" /* Add DC enable,
WBUF enable */
"orr %1, %1, #0x1000\n" /* Add IC enable */
"orr %1, %1, #(0x800)\n" /* BPRD enable */
"mcr p15, 0, %1, c1, c0, 0\n" /* CP15_SCTLR(%1)*/
"nop\n"
"nop\n"
"nop\n"
"mov pc, %0\n"
"2: nop\n"
"mov sp, %2\n"
: "=r" (tmp1), "+r" (kernphysaddr), "+r" (sp));
__start();
}
#ifdef KZIP
static unsigned char *orig_input, *i_input, *i_output;
static u_int memcnt; /* Memory allocated: blocks */
static size_t memtot; /* Memory allocated: bytes */
/*
* Library functions required by inflate().
*/
#define MEMSIZ 0x8000
/*
* Allocate memory block.
*/
unsigned char *
kzipmalloc(int size)
{
void *ptr;
static u_char mem[MEMSIZ];
if (memtot + size > MEMSIZ)
return NULL;
ptr = mem + memtot;
memtot += size;
memcnt++;
return ptr;
}
/*
* Free allocated memory block.
*/
void
kzipfree(void *ptr)
{
memcnt--;
if (!memcnt)
memtot = 0;
}
void
putstr(char *dummy)
{
}
static int
input(void *dummy)
{
if ((size_t)(i_input - orig_input) >= KERNCOMPSIZE) {
return (GZ_EOF);
}
return *i_input++;
}
static int
output(void *dummy, unsigned char *ptr, unsigned long len)
{
memcpy(i_output, ptr, len);
i_output += len;
return (0);
}
static void *
inflate_kernel(void *kernel, void *startaddr)
{
struct inflate infl;
unsigned char slide[GZ_WSIZE];
orig_input = kernel;
memcnt = memtot = 0;
i_input = (unsigned char *)kernel + GZ_HEAD;
if (((char *)kernel)[3] & 0x18) {
while (*i_input)
i_input++;
i_input++;
}
i_output = startaddr;
bzero(&infl, sizeof(infl));
infl.gz_input = input;
infl.gz_output = output;
infl.gz_slide = slide;
inflate(&infl);
return ((char *)(((vm_offset_t)i_output & ~3) + 4));
}
#endif
void *
load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end,
int d)
{
Elf32_Ehdr *eh;
Elf32_Phdr phdr[64] /* XXX */, *php;
Elf32_Shdr shdr[64] /* XXX */;
int i,j;
void *entry_point;
int symtabindex = -1;
int symstrindex = -1;
vm_offset_t lastaddr = 0;
Elf_Addr ssym = 0;
Elf_Dyn *dp;
struct arm_boot_params local_boot_params;
eh = (Elf32_Ehdr *)kstart;
ssym = 0;
entry_point = (void*)eh->e_entry;
memcpy(phdr, (void *)(kstart + eh->e_phoff ),
eh->e_phnum * sizeof(phdr[0]));
/* Determine lastaddr. */
for (i = 0; i < eh->e_phnum; i++) {
if (lastaddr < (phdr[i].p_vaddr - KERNVIRTADDR + curaddr
+ phdr[i].p_memsz))
lastaddr = phdr[i].p_vaddr - KERNVIRTADDR +
curaddr + phdr[i].p_memsz;
}
/* Save the symbol tables, as there're about to be scratched. */
memcpy(shdr, (void *)(kstart + eh->e_shoff),
sizeof(*shdr) * eh->e_shnum);
if (eh->e_shnum * eh->e_shentsize != 0 &&
eh->e_shoff != 0) {
for (i = 0; i < eh->e_shnum; i++) {
if (shdr[i].sh_type == SHT_SYMTAB) {
for (j = 0; j < eh->e_phnum; j++) {
if (phdr[j].p_type == PT_LOAD &&
shdr[i].sh_offset >=
phdr[j].p_offset &&
(shdr[i].sh_offset +
shdr[i].sh_size <=
phdr[j].p_offset +
phdr[j].p_filesz)) {
shdr[i].sh_offset = 0;
shdr[i].sh_size = 0;
j = eh->e_phnum;
}
}
if (shdr[i].sh_offset != 0 &&
shdr[i].sh_size != 0) {
symtabindex = i;
symstrindex = shdr[i].sh_link;
}
}
}
func_end = roundup(func_end, sizeof(long));
if (symtabindex >= 0 && symstrindex >= 0) {
ssym = lastaddr;
if (d) {
memcpy((void *)func_end, (void *)(
shdr[symtabindex].sh_offset + kstart),
shdr[symtabindex].sh_size);
memcpy((void *)(func_end +
shdr[symtabindex].sh_size),
(void *)(shdr[symstrindex].sh_offset +
kstart), shdr[symstrindex].sh_size);
} else {
lastaddr += shdr[symtabindex].sh_size;
lastaddr = roundup(lastaddr,
sizeof(shdr[symtabindex].sh_size));
lastaddr += sizeof(shdr[symstrindex].sh_size);
lastaddr += shdr[symstrindex].sh_size;
lastaddr = roundup(lastaddr,
sizeof(shdr[symstrindex].sh_size));
}
}
}
if (!d)
return ((void *)lastaddr);
/*
* Now the stack is fixed, copy boot params
* before it's overrided
*/
memcpy(&local_boot_params, &s_boot_params, sizeof(local_boot_params));
j = eh->e_phnum;
for (i = 0; i < j; i++) {
volatile char c;
if (phdr[i].p_type != PT_LOAD)
continue;
memcpy((void *)(phdr[i].p_vaddr - KERNVIRTADDR + curaddr),
(void*)(kstart + phdr[i].p_offset), phdr[i].p_filesz);
/* Clean space from oversized segments, eg: bss. */
if (phdr[i].p_filesz < phdr[i].p_memsz)
bzero((void *)(phdr[i].p_vaddr - KERNVIRTADDR +
curaddr + phdr[i].p_filesz), phdr[i].p_memsz -
phdr[i].p_filesz);
}
/* Now grab the symbol tables. */
if (symtabindex >= 0 && symstrindex >= 0) {
*(Elf_Size *)lastaddr =
shdr[symtabindex].sh_size;
lastaddr += sizeof(shdr[symtabindex].sh_size);
memcpy((void*)lastaddr,
(void *)func_end,
shdr[symtabindex].sh_size);
lastaddr += shdr[symtabindex].sh_size;
lastaddr = roundup(lastaddr,
sizeof(shdr[symtabindex].sh_size));
*(Elf_Size *)lastaddr =
shdr[symstrindex].sh_size;
lastaddr += sizeof(shdr[symstrindex].sh_size);
memcpy((void*)lastaddr,
(void*)(func_end +
shdr[symtabindex].sh_size),
shdr[symstrindex].sh_size);
lastaddr += shdr[symstrindex].sh_size;
lastaddr = roundup(lastaddr,
sizeof(shdr[symstrindex].sh_size));
*(Elf_Addr *)curaddr = MAGIC_TRAMP_NUMBER;
*((Elf_Addr *)curaddr + 1) = ssym - curaddr + KERNVIRTADDR;
*((Elf_Addr *)curaddr + 2) = lastaddr - curaddr + KERNVIRTADDR;
} else
*(Elf_Addr *)curaddr = 0;
/* Invalidate the instruction cache. */
__asm __volatile("mcr p15, 0, %0, c7, c5, 0\n"
"mcr p15, 0, %0, c7, c10, 4\n"
: : "r" (curaddr));
__asm __volatile("mrc p15, 0, %0, c1, c0, 0\n" /* CP15_SCTLR(%0)*/
"bic %0, %0, #1\n" /* MMU_ENABLE */
"mcr p15, 0, %0, c1, c0, 0\n" /* CP15_SCTLR(%0)*/
: "=r" (ssym));
/* Jump to the entry point. */
((void(*)(unsigned, unsigned, unsigned, unsigned))
(entry_point - KERNVIRTADDR + curaddr))
(local_boot_params.abp_r0, local_boot_params.abp_r1,
local_boot_params.abp_r2, local_boot_params.abp_r3);
__asm __volatile(".globl func_end\n"
"func_end:");
/* NOTREACHED */
return NULL;
}
extern char func_end[];
#define PMAP_DOMAIN_KERNEL 0 /*
* Just define it instead of including the
* whole VM headers set.
*/
int __hack;
static __inline void
setup_pagetables(unsigned int pt_addr, vm_paddr_t physstart, vm_paddr_t physend,
int write_back)
{
unsigned int *pd = (unsigned int *)pt_addr;
vm_paddr_t addr;
int domain = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT;
int tmp;
bzero(pd, L1_TABLE_SIZE);
for (addr = physstart; addr < physend; addr += L1_S_SIZE) {
pd[addr >> L1_S_SHIFT] = L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)|
L1_S_DOM(PMAP_DOMAIN_KERNEL) | addr;
if (write_back && 0)
pd[addr >> L1_S_SHIFT] |= L1_S_B;
}
/* XXX: See below */
if (0xfff00000 < physstart || 0xfff00000 > physend)
pd[0xfff00000 >> L1_S_SHIFT] = L1_TYPE_S|L1_S_AP(AP_KRW)|
L1_S_DOM(PMAP_DOMAIN_KERNEL)|physstart;
__asm __volatile("mcr p15, 0, %1, c2, c0, 0\n" /* set TTB */
"mcr p15, 0, %1, c8, c7, 0\n" /* Flush TTB */
"mcr p15, 0, %2, c3, c0, 0\n" /* Set DAR */
"mrc p15, 0, %0, c1, c0, 0\n" /* CP15_SCTLR(%0)*/
"orr %0, %0, #1\n" /* MMU_ENABLE */
"mcr p15, 0, %0, c1, c0, 0\n" /* CP15_SCTLR(%0)*/
"mrc p15, 0, %0, c2, c0, 0\n" /* CPWAIT */
"mov r0, r0\n"
"sub pc, pc, #4\n" :
"=r" (tmp) : "r" (pd), "r" (domain));
/*
* XXX: This is the most stupid workaround I've ever wrote.
* For some reason, the KB9202 won't boot the kernel unless
* we access an address which is not in the
* 0x20000000 - 0x20ffffff range. I hope I'll understand
* what's going on later.
*/
__hack = *(volatile int *)0xfffff21c;
}
void
__start(void)
{
void *curaddr;
void *dst, *altdst;
char *kernel = (char *)&kernel_start;
int sp;
int pt_addr;
__asm __volatile("mov %0, pc" :
"=r" (curaddr));
curaddr = (void*)((unsigned int)curaddr & 0xfff00000);
#ifdef KZIP
if (*kernel == 0x1f && kernel[1] == 0x8b) {
pt_addr = L1_TABLE_SIZE +
rounddown2((int)&_end + KERNSIZE + 0x100, L1_TABLE_SIZE);
setup_pagetables(pt_addr, (vm_paddr_t)curaddr,
(vm_paddr_t)curaddr + 0x10000000, 1);
/* Gzipped kernel */
dst = inflate_kernel(kernel, &_end);
kernel = (char *)&_end;
altdst = 4 + load_kernel((unsigned int)kernel,
(unsigned int)curaddr,
(unsigned int)&func_end + 800 , 0);
if (altdst > dst)
dst = altdst;
/*
* Disable MMU. Otherwise, setup_pagetables call below
* might overwrite the L1 table we are currently using.
*/
cpu_idcache_wbinv_all();
cpu_l2cache_wbinv_all();
__asm __volatile("mrc p15, 0, %0, c1, c0, 0\n" /* CP15_SCTLR(%0)*/
"bic %0, %0, #1\n" /* MMU_DISABLE */
"mcr p15, 0, %0, c1, c0, 0\n" /* CP15_SCTLR(%0)*/
:"=r" (pt_addr));
} else
#endif
dst = 4 + load_kernel((unsigned int)&kernel_start,
(unsigned int)curaddr,
(unsigned int)&func_end, 0);
dst = (void *)(((vm_offset_t)dst & ~3));
pt_addr = L1_TABLE_SIZE + rounddown2((unsigned int)dst, L1_TABLE_SIZE);
setup_pagetables(pt_addr, (vm_paddr_t)curaddr,
(vm_paddr_t)curaddr + 0x10000000, 0);
sp = pt_addr + L1_TABLE_SIZE + 8192;
sp = sp &~3;
dst = (void *)(sp + 4);
memcpy((void *)dst, (void *)&load_kernel, (unsigned int)&func_end -
(unsigned int)&load_kernel + 800);
do_call(dst, kernel, dst + (unsigned int)(&func_end) -
(unsigned int)(&load_kernel) + 800, sp);
}
/* We need to provide these functions but never call them */
void __aeabi_unwind_cpp_pr0(void);
void __aeabi_unwind_cpp_pr1(void);
void __aeabi_unwind_cpp_pr2(void);
__strong_reference(__aeabi_unwind_cpp_pr0, __aeabi_unwind_cpp_pr1);
__strong_reference(__aeabi_unwind_cpp_pr0, __aeabi_unwind_cpp_pr2);
void
__aeabi_unwind_cpp_pr0(void)
{
}

View File

@ -5743,7 +5743,7 @@ pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
int
pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
{
pt_entry_t *pte;
pt_entry_t pte, *ptep;
register_t intr;
uint64_t ec, par;
int lvl, rv;
@ -5767,9 +5767,9 @@ pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
case ISS_DATA_DFSC_AFF_L2:
case ISS_DATA_DFSC_AFF_L3:
PMAP_LOCK(pmap);
pte = pmap_pte(pmap, far, &lvl);
if (pte != NULL) {
pmap_set_bits(pte, ATTR_AF);
ptep = pmap_pte(pmap, far, &lvl);
if (ptep != NULL) {
pmap_set_bits(ptep, ATTR_AF);
rv = KERN_SUCCESS;
/*
* XXXMJ as an optimization we could mark the entry
@ -5785,12 +5785,13 @@ pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
(esr & ISS_DATA_WnR) == 0)
return (rv);
PMAP_LOCK(pmap);
pte = pmap_pte(pmap, far, &lvl);
if (pte != NULL &&
(pmap_load(pte) & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) ==
(ATTR_AP(ATTR_AP_RO) | ATTR_SW_DBM)) {
pmap_clear_bits(pte, ATTR_AP_RW_BIT);
pmap_invalidate_page(pmap, far);
ptep = pmap_pte(pmap, far, &lvl);
if (ptep != NULL &&
((pte = pmap_load(ptep)) & ATTR_SW_DBM) != 0) {
if ((pte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RO)) {
pmap_clear_bits(ptep, ATTR_AP_RW_BIT);
pmap_invalidate_page(pmap, far);
}
rv = KERN_SUCCESS;
}
PMAP_UNLOCK(pmap);

View File

@ -189,6 +189,7 @@ device aw_mmc # Allwinner SD/MMC controller
device mmc # mmc/sd bus
device mmcsd # mmc/sd flash cards
device dwmmc
device dwmmc_altera
device rk_emmcphy
# Serial (COM) ports

View File

@ -3814,7 +3814,6 @@ kern/subr_firmware.c optional firmware
kern/subr_gtaskqueue.c standard
kern/subr_hash.c standard
kern/subr_hints.c standard
kern/subr_inflate.c optional gzip
kern/subr_kdb.c standard
kern/subr_kobj.c standard
kern/subr_lock.c standard

View File

@ -45,7 +45,7 @@ linux32_assym.h optional compat_linux32 \
clean "linux32_assym.h"
#
linux32_locore.o optional compat_linux32 \
dependency "linux32_assym.h $S/amd64/linux32/linux32_locore.s" \
dependency "linux32_assym.h $S/amd64/linux32/linux32_locore.asm" \
compile-with "${CC} -x assembler-with-cpp -DLOCORE -m32 -shared -s -pipe -I. -I$S -Werror -Wall -fPIC -fno-common -nostdinc -nostdlib -Wl,-T$S/amd64/linux32/linux32_vdso.lds.s -Wl,-soname=linux32_vdso.so,--eh-frame-hdr,-warn-common ${.IMPSRC} -o ${.TARGET}" \
no-obj no-implicit-rule \
clean "linux32_locore.o"
@ -633,7 +633,6 @@ isa/syscons_isa.c optional sc
isa/vga_isa.c optional vga
kern/kern_clocksource.c standard
kern/imgact_aout.c optional compat_aout
kern/imgact_gzip.c optional gzip
kern/link_elf_obj.c standard
libkern/x86/crc32_sse42.c standard
#

View File

@ -221,6 +221,7 @@ dev/hwpmc/hwpmc_arm64.c optional hwpmc
dev/hwpmc/hwpmc_arm64_md.c optional hwpmc
dev/mbox/mbox_if.m optional soc_brcm_bcm2837
dev/mmc/host/dwmmc.c optional dwmmc fdt
dev/mmc/host/dwmmc_altera.c optional dwmmc fdt dwmmc_altera
dev/mmc/host/dwmmc_hisi.c optional dwmmc fdt soc_hisi_hi6220
dev/mmc/host/dwmmc_rockchip.c optional dwmmc fdt soc_rockchip_rk3328
dev/neta/if_mvneta_fdt.c optional neta fdt

View File

@ -32,7 +32,7 @@ linux_assym.h optional compat_linux \
clean "linux_assym.h"
#
linux_locore.o optional compat_linux \
dependency "linux_assym.h $S/i386/linux/linux_locore.s" \
dependency "linux_assym.h $S/i386/linux/linux_locore.asm" \
compile-with "${CC} -x assembler-with-cpp -DLOCORE -shared -s -pipe -I. -I$S -Werror -Wall -fPIC -fno-common -nostdinc -nostdlib -Wl,-T$S/i386/linux/linux_vdso.lds.s -Wl,-soname=linux_vdso.so,--eh-frame-hdr,-warn-common ${.IMPSRC} -o ${.TARGET}" \
no-obj no-implicit-rule \
clean "linux_locore.o"
@ -529,7 +529,6 @@ isa/syscons_isa.c optional sc
isa/vga_isa.c optional vga
kern/kern_clocksource.c standard
kern/imgact_aout.c optional compat_aout
kern/imgact_gzip.c optional gzip
kern/subr_sfbuf.c standard
libkern/divdi3.c standard
libkern/ffsll.c standard

View File

@ -41,6 +41,8 @@ __FBSDID("$FreeBSD$");
#include <dev/mmc/host/dwmmc_var.h>
#include "opt_mmccam.h"
static struct ofw_compat_data compat_data[] = {
{"altr,socfpga-dw-mshc", 1},
{NULL, 0},
@ -66,10 +68,18 @@ static int
altera_dwmmc_attach(device_t dev)
{
struct dwmmc_softc *sc;
phandle_t root;
sc = device_get_softc(dev);
sc->hwtype = HWTYPE_ALTERA;
root = OF_finddevice("/");
if (ofw_bus_node_is_compatible(root, "altr,socfpga-stratix10")) {
sc->bus_hz = 24000000;
sc->use_pio = 1;
}
return (dwmmc_attach(dev));
}

View File

@ -425,10 +425,19 @@ hdac_pin_patch(struct hdaa_widget *w)
} else if (id == HDA_CODEC_ALC298 && subid == DELL_XPS9560_SUBVENDOR) {
switch (nid) {
case 24:
config = 0x01a1913c;
config = 0x01a1913c;
break;
case 26:
config = 0x01a1913d;
config = 0x01a1913d;
break;
}
} else if (id == HDA_CODEC_ALC256 && subid == DELL_I7577_SUBVENDOR ) {
switch (nid) {
case 20:
patch = "as=1 seq=0";
break;
case 33:
patch = "as=1 seq=15";
break;
}
}
@ -769,6 +778,10 @@ hdaa_patch_direct(struct hdaa_devinfo *devinfo)
}
break;
}
if (id == HDA_CODEC_ALC255 || id == HDA_CODEC_ALC256) {
val = hdaa_read_coef(dev, 0x20, 0x46);
hdaa_write_coef(dev, 0x20, 0x46, val|0x3000);
}
if (subid == APPLE_INTEL_MAC)
hda_command(dev, HDA_CMD_12BIT(0, devinfo->nid,
0x7e7, 0));

View File

@ -203,6 +203,7 @@
#define DELL_XPSM1210_SUBVENDOR HDA_MODEL_CONSTRUCT(DELL, 0x01d7)
#define DELL_OPLX745_SUBVENDOR HDA_MODEL_CONSTRUCT(DELL, 0x01da)
#define DELL_XPS9560_SUBVENDOR HDA_MODEL_CONSTRUCT(DELL, 0x07be)
#define DELL_I7577_SUBVENDOR HDA_MODEL_CONSTRUCT(DELL, 0x0802)
#define DELL_ALL_SUBVENDOR HDA_MODEL_CONSTRUCT(DELL, 0xffff)
/* Clevo */

View File

@ -275,7 +275,6 @@ device nvram # Access to rtc cmos via /dev/nvram
device speaker #Play IBM BASIC-style noises out your speaker
hint.speaker.0.at="isa"
hint.speaker.0.port="0x61"
device gzip #Exec gzipped a.out's. REQUIRES COMPAT_AOUT!
device apm_saver # Requires APM

View File

@ -132,6 +132,11 @@ closefrom
##
connectat
##
## copy_file_range(2) reads from one descriptor and writes to the other.
##
copy_file_range
##
## cpuset(2) and related calls are limited to caller's own process/thread.
##

View File

@ -1,394 +0,0 @@
/*-
* SPDX-License-Identifier: Beerware
*
* ----------------------------------------------------------------------------
* "THE BEER-WARE LICENSE" (Revision 42):
* <phk@FreeBSD.org> wrote this file. As long as you retain this notice you
* can do whatever you want with this stuff. If we meet some day, and you think
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*/
/*
* This module handles execution of a.out files which have been run through
* "gzip". This saves diskspace, but wastes cpu-cycles and VM.
*
* TODO:
* text-segments should be made R/O after being filled
* is the vm-stuff safe ?
* should handle the entire header of gzip'ed stuff.
* inflate isn't quite reentrant yet...
* error-handling is a mess...
* so is the rest...
* tidy up unnecessary includes
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/exec.h>
#include <sys/imgact.h>
#include <sys/imgact_aout.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mman.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/racct.h>
#include <sys/resourcevar.h>
#include <sys/sysent.h>
#include <sys/systm.h>
#include <sys/vnode.h>
#include <sys/inflate.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
struct imgact_gzip {
struct image_params *ip;
struct exec a_out;
int error;
int gotheader;
int where;
u_char *inbuf;
u_long offset;
u_long output;
u_long len;
int idx;
u_long virtual_offset, file_offset, file_end, bss_size;
};
static int exec_gzip_imgact(struct image_params *imgp);
static int NextByte(void *vp);
static int do_aout_hdr(struct imgact_gzip *);
static int Flush(void *vp, u_char *, u_long siz);
static int
exec_gzip_imgact(struct image_params *imgp)
{
int error;
const u_char *p = (const u_char *) imgp->image_header;
struct imgact_gzip igz;
struct inflate infl;
struct vmspace *vmspace;
/* If these four are not OK, it isn't a gzip file */
if (p[0] != 0x1f)
return -1; /* 0 Simply magic */
if (p[1] != 0x8b)
return -1; /* 1 Simply magic */
if (p[2] != 0x08)
return -1; /* 2 Compression method */
if (p[9] != 0x03)
return -1; /* 9 OS compressed on */
/*
* If this one contains anything but a comment or a filename marker,
* we don't want to chew on it
*/
if (p[3] & ~(0x18))
return ENOEXEC; /* 3 Flags */
/* These are of no use to us */
/* 4-7 Timestamp */
/* 8 Extra flags */
bzero(&igz, sizeof igz);
bzero(&infl, sizeof infl);
infl.gz_private = (void *) &igz;
infl.gz_input = NextByte;
infl.gz_output = Flush;
igz.ip = imgp;
igz.idx = 10;
if (p[3] & 0x08) { /* skip a filename */
while (p[igz.idx++])
if (igz.idx >= PAGE_SIZE)
return ENOEXEC;
}
if (p[3] & 0x10) { /* skip a comment */
while (p[igz.idx++])
if (igz.idx >= PAGE_SIZE)
return ENOEXEC;
}
igz.len = imgp->attr->va_size;
error = inflate(&infl);
/*
* The unzipped file may not even have been long enough to contain
* a header giving Flush() a chance to return error. Check for this.
*/
if ( !igz.gotheader )
return ENOEXEC;
if ( !error ) {
vmspace = imgp->proc->p_vmspace;
error = vm_map_protect(&vmspace->vm_map,
(vm_offset_t) vmspace->vm_taddr,
(vm_offset_t) (vmspace->vm_taddr +
(vmspace->vm_tsize << PAGE_SHIFT)) ,
VM_PROT_READ|VM_PROT_EXECUTE,0);
}
if (igz.inbuf)
kmap_free_wakeup(exec_map, (vm_offset_t)igz.inbuf, PAGE_SIZE);
if (igz.error || error) {
printf("Output=%lu ", igz.output);
printf("Inflate_error=%d igz.error=%d where=%d\n",
error, igz.error, igz.where);
}
if (igz.error)
return igz.error;
if (error)
return ENOEXEC;
return 0;
}
static int
do_aout_hdr(struct imgact_gzip * gz)
{
int error;
struct vmspace *vmspace;
vm_offset_t vmaddr;
/*
* Set file/virtual offset based on a.out variant. We do two cases:
* host byte order and network byte order (for NetBSD compatibility)
*/
switch ((int) (gz->a_out.a_midmag & 0xffff)) {
case ZMAGIC:
gz->virtual_offset = 0;
if (gz->a_out.a_text) {
gz->file_offset = PAGE_SIZE;
} else {
/* Bill's "screwball mode" */
gz->file_offset = 0;
}
break;
case QMAGIC:
gz->virtual_offset = PAGE_SIZE;
gz->file_offset = 0;
break;
default:
/* NetBSD compatibility */
switch ((int) (ntohl(gz->a_out.a_midmag) & 0xffff)) {
case ZMAGIC:
case QMAGIC:
gz->virtual_offset = PAGE_SIZE;
gz->file_offset = 0;
break;
default:
gz->where = __LINE__;
return (-1);
}
}
gz->bss_size = roundup(gz->a_out.a_bss, PAGE_SIZE);
/*
* Check various fields in header for validity/bounds.
*/
if ( /* entry point must lay with text region */
gz->a_out.a_entry < gz->virtual_offset ||
gz->a_out.a_entry >= gz->virtual_offset + gz->a_out.a_text ||
/* text and data size must each be page rounded */
gz->a_out.a_text & PAGE_MASK || gz->a_out.a_data & PAGE_MASK) {
gz->where = __LINE__;
return (-1);
}
/*
* text/data/bss must not exceed limits
*/
PROC_LOCK(gz->ip->proc);
if ( /* text can't exceed maximum text size */
gz->a_out.a_text > maxtsiz ||
/* data + bss can't exceed rlimit */
gz->a_out.a_data + gz->bss_size >
lim_cur_proc(gz->ip->proc, RLIMIT_DATA) ||
racct_set(gz->ip->proc, RACCT_DATA,
gz->a_out.a_data + gz->bss_size) != 0) {
PROC_UNLOCK(gz->ip->proc);
gz->where = __LINE__;
return (ENOMEM);
}
PROC_UNLOCK(gz->ip->proc);
/* Find out how far we should go */
gz->file_end = gz->file_offset + gz->a_out.a_text + gz->a_out.a_data;
/*
* Avoid a possible deadlock if the current address space is destroyed
* and that address space maps the locked vnode. In the common case,
* the locked vnode's v_usecount is decremented but remains greater
* than zero. Consequently, the vnode lock is not needed by vrele().
* However, in cases where the vnode lock is external, such as nullfs,
* v_usecount may become zero.
*/
VOP_UNLOCK(gz->ip->vp, 0);
/*
* Destroy old process VM and create a new one (with a new stack)
*/
error = exec_new_vmspace(gz->ip, &aout_sysvec);
vn_lock(gz->ip->vp, LK_EXCLUSIVE | LK_RETRY);
if (error) {
gz->where = __LINE__;
return (error);
}
vmspace = gz->ip->proc->p_vmspace;
vmaddr = gz->virtual_offset;
error = vm_mmap(&vmspace->vm_map,
&vmaddr,
gz->a_out.a_text + gz->a_out.a_data,
VM_PROT_ALL, VM_PROT_ALL, MAP_ANON | MAP_FIXED,
OBJT_DEFAULT,
NULL,
0);
if (error) {
gz->where = __LINE__;
return (error);
}
if (gz->bss_size != 0) {
/*
* Allocate demand-zeroed area for uninitialized data.
* "bss" = 'block started by symbol' - named after the
* IBM 7090 instruction of the same name.
*/
vmaddr = gz->virtual_offset + gz->a_out.a_text +
gz->a_out.a_data;
error = vm_map_find(&vmspace->vm_map, NULL, 0, &vmaddr,
gz->bss_size, 0, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL,
0);
if (error) {
gz->where = __LINE__;
return (error);
}
}
/* Fill in process VM information */
vmspace->vm_tsize = gz->a_out.a_text >> PAGE_SHIFT;
vmspace->vm_dsize = (gz->a_out.a_data + gz->bss_size) >> PAGE_SHIFT;
vmspace->vm_taddr = (caddr_t) (uintptr_t) gz->virtual_offset;
vmspace->vm_daddr = (caddr_t) (uintptr_t)
(gz->virtual_offset + gz->a_out.a_text);
/* Fill in image_params */
gz->ip->interpreted = 0;
gz->ip->entry_addr = gz->a_out.a_entry;
gz->ip->proc->p_sysent = &aout_sysvec;
return 0;
}
static int
NextByte(void *vp)
{
int error;
struct imgact_gzip *igz = (struct imgact_gzip *) vp;
if (igz->idx >= igz->len) {
igz->where = __LINE__;
return GZ_EOF;
}
if (igz->inbuf && igz->idx < (igz->offset + PAGE_SIZE)) {
return igz->inbuf[(igz->idx++) - igz->offset];
}
if (igz->inbuf)
kmap_free_wakeup(exec_map, (vm_offset_t)igz->inbuf, PAGE_SIZE);
igz->offset = igz->idx & ~PAGE_MASK;
error = vm_mmap(exec_map, /* map */
(vm_offset_t *) & igz->inbuf, /* address */
PAGE_SIZE, /* size */
VM_PROT_READ, /* protection */
VM_PROT_READ, /* max protection */
0, /* flags */
OBJT_VNODE, /* handle type */
igz->ip->vp, /* vnode */
igz->offset); /* offset */
if (error) {
igz->where = __LINE__;
igz->error = error;
return GZ_EOF;
}
return igz->inbuf[(igz->idx++) - igz->offset];
}
static int
Flush(void *vp, u_char * ptr, u_long siz)
{
struct imgact_gzip *gz = (struct imgact_gzip *) vp;
u_char *p = ptr, *q;
int i;
/* First, find an a.out-header. */
if (gz->output < sizeof gz->a_out) {
q = (u_char *) & gz->a_out;
i = min(siz, sizeof gz->a_out - gz->output);
bcopy(p, q + gz->output, i);
gz->output += i;
p += i;
siz -= i;
if (gz->output == sizeof gz->a_out) {
gz->gotheader = 1;
i = do_aout_hdr(gz);
if (i == -1) {
if (!gz->where)
gz->where = __LINE__;
gz->error = ENOEXEC;
return ENOEXEC;
} else if (i) {
gz->where = __LINE__;
gz->error = i;
return ENOEXEC;
}
if (gz->file_offset == 0) {
q = (u_char *) (uintptr_t) gz->virtual_offset;
copyout(&gz->a_out, q, sizeof gz->a_out);
}
}
}
/* Skip over zero-padded first PAGE if needed */
if (gz->output < gz->file_offset &&
gz->output + siz > gz->file_offset) {
i = min(siz, gz->file_offset - gz->output);
gz->output += i;
p += i;
siz -= i;
}
if (gz->output >= gz->file_offset && gz->output < gz->file_end) {
i = min(siz, gz->file_end - gz->output);
q = (u_char *) (uintptr_t)
(gz->virtual_offset + gz->output - gz->file_offset);
copyout(p, q, i);
gz->output += i;
p += i;
siz -= i;
}
gz->output += siz;
return 0;
}
/*
* Tell kern_execve.c about it, with a little help from the linker.
*/
static struct execsw gzip_execsw = {
.ex_imgact = exec_gzip_imgact,
.ex_name = "gzip"
};
EXEC_SET(execgzip, gzip_execsw);

View File

@ -618,5 +618,5 @@ struct sysent sysent[] = {
{ AS(fhlinkat_args), (sy_call_t *)sys_fhlinkat, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 566 = fhlinkat */
{ AS(fhreadlink_args), (sy_call_t *)sys_fhreadlink, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 567 = fhreadlink */
{ AS(funlinkat_args), (sy_call_t *)sys_funlinkat, AUE_UNLINKAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 568 = funlinkat */
{ AS(copy_file_range_args), (sy_call_t *)sys_copy_file_range, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 569 = copy_file_range */
{ AS(copy_file_range_args), (sy_call_t *)sys_copy_file_range, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 569 = copy_file_range */
};

View File

@ -167,8 +167,8 @@ reaper_clear(struct proc *p)
proc_id_clear(PROC_ID_REAP, p->p_reapsubtree);
}
static void
clear_orphan(struct proc *p)
void
proc_clear_orphan(struct proc *p)
{
struct proc *p1;
@ -522,7 +522,7 @@ exit1(struct thread *td, int rval, int signo)
* list due to present P_TRACED flag. Clear
* orphan link for q now while q is locked.
*/
clear_orphan(q);
proc_clear_orphan(q);
q->p_flag &= ~(P_TRACED | P_STOPPED_TRACE);
q->p_flag2 &= ~P2_PTRACE_FSTP;
q->p_ptevents = 0;
@ -556,7 +556,7 @@ exit1(struct thread *td, int rval, int signo)
kern_psignal(q, q->p_pdeathsig);
CTR2(KTR_PTRACE, "exit: pid %d, clearing orphan %d", p->p_pid,
q->p_pid);
clear_orphan(q);
proc_clear_orphan(q);
PROC_UNLOCK(q);
}
@ -912,7 +912,7 @@ proc_reap(struct thread *td, struct proc *p, int *status, int options)
reaper_clear(p);
proc_id_clear(PROC_ID_PID, p->p_pid);
PROC_LOCK(p);
clear_orphan(p);
proc_clear_orphan(p);
PROC_UNLOCK(p);
leavepgrp(p);
if (p->p_procdesc != NULL)
@ -1372,7 +1372,7 @@ proc_reparent(struct proc *child, struct proc *parent, bool set_oppid)
LIST_REMOVE(child, p_sibling);
LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
clear_orphan(child);
proc_clear_orphan(child);
if (child->p_flag & P_TRACED) {
if (LIST_EMPTY(&child->p_pptr->p_orphans)) {
child->p_treeflag |= P_TREE_FIRST_ORPHAN;

View File

@ -121,76 +121,22 @@ sfstat_sysctl(SYSCTL_HANDLER_ARGS)
SYSCTL_PROC(_kern_ipc, OID_AUTO, sfstat, CTLTYPE_OPAQUE | CTLFLAG_RW,
NULL, 0, sfstat_sysctl, "I", "sendfile statistics");
/*
* Detach mapped page and release resources back to the system. Called
* by mbuf(9) code when last reference to a page is freed.
*/
static void
sendfile_free_page(vm_page_t pg, bool nocache)
{
bool freed;
vm_page_lock(pg);
/*
* In either case check for the object going away on us. This can
* happen since we don't hold a reference to it. If so, we're
* responsible for freeing the page. In 'noncache' case try to free
* the page, but only if it is cheap to.
*/
if (vm_page_unwire_noq(pg)) {
vm_object_t obj;
if ((obj = pg->object) == NULL)
vm_page_free(pg);
else {
freed = false;
if (nocache && !vm_page_xbusied(pg) &&
VM_OBJECT_TRYWLOCK(obj)) {
/* Only free unmapped pages. */
if (obj->ref_count == 0 ||
!pmap_page_is_mapped(pg))
/*
* The busy test before the object is
* locked cannot be relied upon.
*/
freed = vm_page_try_to_free(pg);
VM_OBJECT_WUNLOCK(obj);
}
if (!freed) {
/*
* If we were asked to not cache the page, place
* it near the head of the inactive queue so
* that it is reclaimed sooner. Otherwise,
* maintain LRU.
*/
if (nocache)
vm_page_deactivate_noreuse(pg);
else if (vm_page_active(pg))
vm_page_reference(pg);
else
vm_page_deactivate(pg);
}
}
}
vm_page_unlock(pg);
}
static void
sendfile_free_mext(struct mbuf *m)
{
struct sf_buf *sf;
vm_page_t pg;
bool nocache;
int flags;
KASSERT(m->m_flags & M_EXT && m->m_ext.ext_type == EXT_SFBUF,
("%s: m %p !M_EXT or !EXT_SFBUF", __func__, m));
sf = m->m_ext.ext_arg1;
pg = sf_buf_page(sf);
nocache = m->m_ext.ext_flags & EXT_FLAG_NOCACHE;
flags = (m->m_ext.ext_flags & EXT_FLAG_NOCACHE) != 0 ? VPR_TRYFREE : 0;
sf_buf_free(sf);
sendfile_free_page(pg, nocache);
vm_page_release(pg, flags);
if (m->m_ext.ext_flags & EXT_FLAG_SYNC) {
struct sendfile_sync *sfs = m->m_ext.ext_arg2;
@ -208,21 +154,21 @@ sendfile_free_mext_pg(struct mbuf *m)
{
struct mbuf_ext_pgs *ext_pgs;
vm_page_t pg;
int i;
bool nocache, cache_last;
int flags, i;
bool cache_last;
KASSERT(m->m_flags & M_EXT && m->m_ext.ext_type == EXT_PGS,
("%s: m %p !M_EXT or !EXT_PGS", __func__, m));
nocache = m->m_ext.ext_flags & EXT_FLAG_NOCACHE;
cache_last = m->m_ext.ext_flags & EXT_FLAG_CACHE_LAST;
ext_pgs = m->m_ext.ext_pgs;
flags = (m->m_ext.ext_flags & EXT_FLAG_NOCACHE) != 0 ? VPR_TRYFREE : 0;
for (i = 0; i < ext_pgs->npgs; i++) {
if (cache_last && i == ext_pgs->npgs - 1)
nocache = false;
flags = 0;
pg = PHYS_TO_VM_PAGE(ext_pgs->pa[i]);
sendfile_free_page(pg, nocache);
vm_page_release(pg, flags);
}
if (m->m_ext.ext_flags & EXT_FLAG_SYNC) {

File diff suppressed because it is too large Load Diff

View File

@ -132,9 +132,6 @@ __FBSDID("$FreeBSD$");
#define LI_EXCLUSIVE 0x00010000 /* Exclusive lock instance. */
#define LI_NORELEASE 0x00020000 /* Lock not allowed to be released. */
/* Define this to check for blessed mutexes */
#undef BLESSING
#ifndef WITNESS_COUNT
#define WITNESS_COUNT 1536
#endif
@ -278,12 +275,10 @@ struct witness_lock_order_hash {
u_int wloh_count;
};
#ifdef BLESSING
struct witness_blessed {
const char *b_lock1;
const char *b_lock2;
};
#endif
struct witness_pendhelp {
const char *wh_type;
@ -318,9 +313,7 @@ witness_lock_order_key_equal(const struct witness_lock_order_key *a,
static int _isitmyx(struct witness *w1, struct witness *w2, int rmask,
const char *fname);
static void adopt(struct witness *parent, struct witness *child);
#ifdef BLESSING
static int blessed(struct witness *, struct witness *);
#endif
static void depart(struct witness *w);
static struct witness *enroll(const char *description,
struct lock_class *lock_class);
@ -726,14 +719,25 @@ static struct witness_order_list_entry order_lists[] = {
{ NULL, NULL }
};
#ifdef BLESSING
/*
* Pairs of locks which have been blessed
* Don't complain about order problems with blessed locks
* Pairs of locks which have been blessed. Witness does not complain about
* order problems with blessed lock pairs. Please do not add an entry to the
* table without an explanatory comment.
*/
static struct witness_blessed blessed_list[] = {
/*
* See the comment in ufs_dirhash.c. Basically, a vnode lock serializes
* both lock orders, so a deadlock cannot happen as a result of this
* LOR.
*/
{ "dirhash", "bufwait" },
/*
* A UFS vnode may be locked in vget() while a buffer belonging to the
* parent directory vnode is locked.
*/
{ "ufs", "bufwait" },
};
#endif
/*
* This global is set to 0 once it becomes safe to use the witness code.
@ -1339,7 +1343,6 @@ witness_checkorder(struct lock_object *lock, int flags, const char *file,
* We have a lock order violation, check to see if it
* is allowed or has already been yelled about.
*/
#ifdef BLESSING
/*
* If the lock order is blessed, just bail. We don't
@ -1348,7 +1351,6 @@ witness_checkorder(struct lock_object *lock, int flags, const char *file,
*/
if (blessed(w, w1))
goto out;
#endif
/* Bail if this violation is known */
if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL)
@ -2084,7 +2086,6 @@ isitmydescendant(struct witness *ancestor, struct witness *descendant)
__func__));
}
#ifdef BLESSING
static int
blessed(struct witness *w1, struct witness *w2)
{
@ -2104,7 +2105,6 @@ blessed(struct witness *w1, struct witness *w2)
}
return (0);
}
#endif
static struct witness *
witness_get(void)

View File

@ -2894,47 +2894,6 @@ vfs_vmio_iodone(struct buf *bp)
}
}
/*
* Unwire a page held by a buf and either free it or update the page queues to
* reflect its recent use.
*/
static void
vfs_vmio_unwire(struct buf *bp, vm_page_t m)
{
bool freed;
vm_page_lock(m);
if (vm_page_unwire_noq(m)) {
if ((bp->b_flags & B_DIRECT) != 0)
freed = vm_page_try_to_free(m);
else
freed = false;
if (!freed) {
/*
* Use a racy check of the valid bits to determine
* whether we can accelerate reclamation of the page.
* The valid bits will be stable unless the page is
* being mapped or is referenced by multiple buffers,
* and in those cases we expect races to be rare. At
* worst we will either accelerate reclamation of a
* valid page and violate LRU, or unnecessarily defer
* reclamation of an invalid page.
*
* The B_NOREUSE flag marks data that is not expected to
* be reused, so accelerate reclamation in that case
* too. Otherwise, maintain LRU.
*/
if (m->valid == 0 || (bp->b_flags & B_NOREUSE) != 0)
vm_page_deactivate_noreuse(m);
else if (vm_page_active(m))
vm_page_reference(m);
else
vm_page_deactivate(m);
}
}
vm_page_unlock(m);
}
/*
* Perform page invalidation when a buffer is released. The fully invalid
* pages will be reclaimed later in vfs_vmio_truncate().
@ -2944,7 +2903,7 @@ vfs_vmio_invalidate(struct buf *bp)
{
vm_object_t obj;
vm_page_t m;
int i, resid, poffset, presid;
int flags, i, resid, poffset, presid;
if (buf_mapped(bp)) {
BUF_CHECK_MAPPED(bp);
@ -2963,6 +2922,7 @@ vfs_vmio_invalidate(struct buf *bp)
*
* See man buf(9) for more information
*/
flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0;
obj = bp->b_bufobj->bo_object;
resid = bp->b_bufsize;
poffset = bp->b_offset & PAGE_MASK;
@ -2984,7 +2944,7 @@ vfs_vmio_invalidate(struct buf *bp)
}
if (pmap_page_wired_mappings(m) == 0)
vm_page_set_invalid(m, poffset, presid);
vfs_vmio_unwire(bp, m);
vm_page_release_locked(m, flags);
resid -= presid;
poffset = 0;
}
@ -3000,7 +2960,7 @@ vfs_vmio_truncate(struct buf *bp, int desiredpages)
{
vm_object_t obj;
vm_page_t m;
int i;
int flags, i;
if (bp->b_npages == desiredpages)
return;
@ -3015,14 +2975,22 @@ vfs_vmio_truncate(struct buf *bp, int desiredpages)
/*
* The object lock is needed only if we will attempt to free pages.
*/
obj = (bp->b_flags & B_DIRECT) != 0 ? bp->b_bufobj->bo_object : NULL;
if (obj != NULL)
flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0;
if ((bp->b_flags & B_DIRECT) != 0) {
flags |= VPR_TRYFREE;
obj = bp->b_bufobj->bo_object;
VM_OBJECT_WLOCK(obj);
} else {
obj = NULL;
}
for (i = desiredpages; i < bp->b_npages; i++) {
m = bp->b_pages[i];
KASSERT(m != bogus_page, ("allocbuf: bogus page found"));
bp->b_pages[i] = NULL;
vfs_vmio_unwire(bp, m);
if (obj != NULL)
vm_page_release_locked(m, flags);
else
vm_page_release(m, flags);
}
if (obj != NULL)
VM_OBJECT_WUNLOCK(obj);

View File

@ -29,7 +29,7 @@ OBJS= ${VDSO}.so
linux_assym.h: linux_genassym.o
sh ${SYSDIR}/kern/genassym.sh linux_genassym.o > ${.TARGET}
linux_locore.o: linux_locore.s linux_assym.h
linux_locore.o: linux_locore.asm linux_assym.h
${CC} -x assembler-with-cpp -DLOCORE -shared -mcmodel=small \
-pipe -I. -I${SYSDIR} -Werror -Wall -fno-common -fPIC -nostdinc \
-Wl,-T${SRCTOP}/sys/${MACHINE}/linux/${VDSO}.lds.s \

View File

@ -1,53 +0,0 @@
/*-
* ----------------------------------------------------------------------------
* "THE BEER-WARE LICENSE" (Revision 42):
* <phk@FreeBSD.org> wrote this file. As long as you retain this notice you
* can do whatever you want with this stuff. If we meet some day, and you think
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*
* $FreeBSD$
*
*/
#ifndef _SYS_INFLATE_H_
#define _SYS_INFLATE_H_
#if defined(_KERNEL) || defined(KZIP)
#define GZ_EOF -1
#define GZ_WSIZE 0x8000
/*
* Global variables used by inflate and friends.
* This structure is used in order to make inflate() reentrant.
*/
struct inflate {
/* Public part */
/* This pointer is passed along to the two functions below */
void *gz_private;
/* Fetch next character to be uncompressed */
int (*gz_input)(void *);
/* Dispose of uncompressed characters */
int (*gz_output)(void *, u_char *, u_long);
/* Private part */
u_long gz_bb; /* bit buffer */
unsigned gz_bk; /* bits in bit buffer */
unsigned gz_hufts; /* track memory usage */
struct huft *gz_fixed_tl; /* must init to NULL !! */
struct huft *gz_fixed_td;
int gz_fixed_bl;
int gz_fixed_bd;
u_char *gz_slide;
unsigned gz_wp;
};
int inflate(struct inflate *);
#endif /* _KERNEL || KZIP */
#endif /* ! _SYS_INFLATE_H_ */

View File

@ -1074,6 +1074,7 @@ void proc_wkilled(struct proc *p);
struct pstats *pstats_alloc(void);
void pstats_fork(struct pstats *src, struct pstats *dst);
void pstats_free(struct pstats *ps);
void proc_clear_orphan(struct proc *p);
void reaper_abandon_children(struct proc *p, bool exiting);
int securelevel_ge(struct ucred *cr, int level);
int securelevel_gt(struct ucred *cr, int level);

View File

@ -30,7 +30,6 @@
#ifndef __SYS_REFCOUNT_H__
#define __SYS_REFCOUNT_H__
#include <sys/limits.h>
#include <machine/atomic.h>
#ifdef _KERNEL
@ -40,19 +39,41 @@
#define KASSERT(exp, msg) /* */
#endif
#define REFCOUNT_SATURATED(val) (((val) & (1U << 31)) != 0)
#define REFCOUNT_SATURATION_VALUE (3U << 30)
/*
* Attempt to handle reference count overflow and underflow. Force the counter
* to stay at the saturation value so that a counter overflow cannot trigger
* destruction of the containing object and instead leads to a less harmful
* memory leak.
*/
static __inline void
_refcount_update_saturated(volatile u_int *count)
{
#ifdef INVARIANTS
panic("refcount %p wraparound", count);
#else
atomic_store_int(count, REFCOUNT_SATURATION_VALUE);
#endif
}
static __inline void
refcount_init(volatile u_int *count, u_int value)
{
KASSERT(!REFCOUNT_SATURATED(value),
("invalid initial refcount value %u", value));
*count = value;
}
static __inline void
refcount_acquire(volatile u_int *count)
{
u_int old;
KASSERT(*count < UINT_MAX, ("refcount %p overflowed", count));
atomic_add_int(count, 1);
old = atomic_fetchadd_int(count, 1);
if (__predict_false(REFCOUNT_SATURATED(old)))
_refcount_update_saturated(count);
}
static __inline __result_use_check bool
@ -61,7 +82,7 @@ refcount_acquire_checked(volatile u_int *count)
u_int lcount;
for (lcount = *count;;) {
if (__predict_false(lcount + 1 < lcount))
if (__predict_false(REFCOUNT_SATURATED(lcount + 1)))
return (false);
if (__predict_true(atomic_fcmpset_int(count, &lcount,
lcount + 1) == 1))
@ -76,7 +97,15 @@ refcount_release(volatile u_int *count)
atomic_thread_fence_rel();
old = atomic_fetchadd_int(count, -1);
KASSERT(old > 0, ("refcount %p is zero", count));
if (__predict_false(old == 0 || REFCOUNT_SATURATED(old))) {
/*
* Avoid multiple destructor invocations if underflow occurred.
* This is not perfect since the memory backing the containing
* object may already have been reallocated.
*/
_refcount_update_saturated(count);
return (false);
}
if (old > 1)
return (false);
@ -84,7 +113,7 @@ refcount_release(volatile u_int *count)
* Last reference. Signal the user to call the destructor.
*
* Ensure that the destructor sees all updates. The fence_rel
* at the start of the function synchronized with this fence.
* at the start of the function synchronizes with this fence.
*/
atomic_thread_fence_acq();
return (true);
@ -101,9 +130,10 @@ refcount_acquire_if_not_zero(volatile u_int *count)
old = *count;
for (;;) {
KASSERT(old < UINT_MAX, ("refcount %p overflowed", count));
if (old == 0)
return (false);
if (__predict_false(REFCOUNT_SATURATED(old)))
return (true);
if (atomic_fcmpset_int(count, &old, old + 1))
return (true);
}
@ -116,9 +146,10 @@ refcount_release_if_not_last(volatile u_int *count)
old = *count;
for (;;) {
KASSERT(old > 0, ("refcount %p is zero", count));
if (old == 1)
return (false);
if (__predict_false(REFCOUNT_SATURATED(old)))
return (true);
if (atomic_fcmpset_int(count, &old, old - 1))
return (true);
}

View File

@ -40,55 +40,6 @@ typedef uint32_t seqc_t;
#ifdef _KERNEL
/*
* seqc allows readers and writers to work with a consistent snapshot. Modifying
* operations must be enclosed within a transaction delineated by
* seqc_write_beg/seqc_write_end. The trick works by having the writer increment
* the sequence number twice, at the beginning and end of the transaction.
* The reader detects that the sequence number has not changed between its start
* and end, and that the sequence number is even, to validate consistency.
*
* Some fencing (both hard fencing and compiler barriers) may be needed,
* depending on the cpu. Modern AMD cpus provide strong enough guarantees to not
* require any fencing by the reader or writer.
*
* Example usage:
*
* writers:
* lock_exclusive(&obj->lock);
* seqc_write_begin(&obj->seqc);
* obj->var1 = ...;
* obj->var2 = ...;
* seqc_write_end(&obj->seqc);
* unlock_exclusive(&obj->lock);
*
* readers:
* int var1, var2;
* seqc_t seqc;
*
* for (;;) {
* seqc = seqc_read(&obj->seqc);
* var1 = obj->var1;
* var2 = obj->var2;
* if (seqc_consistent(&obj->seqc, seqc))
* break;
* }
* .....
*
* Writers may not block or sleep in any way.
*
* There are 2 minor caveats in this implementation:
*
* 1. There is no guarantee of progress. That is, a large number of writers can
* interfere with the execution of the readers and cause the code to live-lock
* in a loop trying to acquire a consistent snapshot.
*
* 2. If the reader loops long enough, the counter may overflow and eventually
* wrap back to its initial value, fooling the reader into accepting the
* snapshot. Given that this needs 4 billion transactional writes across a
* single contended reader, it is unlikely to ever happen.
*/
/* A hack to get MPASS macro */
#include <sys/lock.h>

View File

@ -3747,29 +3747,92 @@ vm_page_unswappable(vm_page_t m)
vm_page_enqueue(m, PQ_UNSWAPPABLE);
}
/*
* Attempt to free the page. If it cannot be freed, do nothing. Returns true
* if the page is freed and false otherwise.
*
* The page must be managed. The page and its containing object must be
* locked.
*/
bool
vm_page_try_to_free(vm_page_t m)
static void
vm_page_release_toq(vm_page_t m, int flags)
{
vm_page_assert_locked(m);
VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("page %p is unmanaged", m));
if (m->dirty != 0 || vm_page_wired(m) || vm_page_busied(m))
return (false);
if (m->object->ref_count != 0) {
pmap_remove_all(m);
if (m->dirty != 0)
return (false);
/*
* Use a check of the valid bits to determine whether we should
* accelerate reclamation of the page. The object lock might not be
* held here, in which case the check is racy. At worst we will either
* accelerate reclamation of a valid page and violate LRU, or
* unnecessarily defer reclamation of an invalid page.
*
* If we were asked to not cache the page, place it near the head of the
* inactive queue so that is reclaimed sooner.
*/
if ((flags & (VPR_TRYFREE | VPR_NOREUSE)) != 0 || m->valid == 0)
vm_page_deactivate_noreuse(m);
else if (vm_page_active(m))
vm_page_reference(m);
else
vm_page_deactivate(m);
}
/*
* Unwire a page and either attempt to free it or re-add it to the page queues.
*/
void
vm_page_release(vm_page_t m, int flags)
{
vm_object_t object;
bool freed;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("vm_page_release: page %p is unmanaged", m));
vm_page_lock(m);
if (m->object != NULL)
VM_OBJECT_ASSERT_UNLOCKED(m->object);
if (vm_page_unwire_noq(m)) {
if ((object = m->object) == NULL) {
vm_page_free(m);
} else {
freed = false;
if ((flags & VPR_TRYFREE) != 0 && !vm_page_busied(m) &&
/* Depends on type stability. */
VM_OBJECT_TRYWLOCK(object)) {
/*
* Only free unmapped pages. The busy test from
* before the object was locked cannot be relied
* upon.
*/
if ((object->ref_count == 0 ||
!pmap_page_is_mapped(m)) && m->dirty == 0 &&
!vm_page_busied(m)) {
vm_page_free(m);
freed = true;
}
VM_OBJECT_WUNLOCK(object);
}
if (!freed)
vm_page_release_toq(m, flags);
}
}
vm_page_free(m);
return (true);
vm_page_unlock(m);
}
/* See vm_page_release(). */
void
vm_page_release_locked(vm_page_t m, int flags)
{
VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("vm_page_release_locked: page %p is unmanaged", m));
vm_page_lock(m);
if (vm_page_unwire_noq(m)) {
if ((flags & VPR_TRYFREE) != 0 &&
(m->object->ref_count == 0 || !pmap_page_is_mapped(m)) &&
m->dirty == 0 && !vm_page_busied(m)) {
vm_page_free(m);
} else {
vm_page_release_toq(m, flags);
}
}
vm_page_unlock(m);
}
/*

View File

@ -562,8 +562,12 @@ bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low,
bool vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
void vm_page_reference(vm_page_t m);
#define VPR_TRYFREE 0x01
#define VPR_NOREUSE 0x02
void vm_page_release(vm_page_t m, int flags);
void vm_page_release_locked(vm_page_t m, int flags);
bool vm_page_remove(vm_page_t);
int vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
int vm_page_rename(vm_page_t, vm_object_t, vm_pindex_t);
vm_page_t vm_page_replace(vm_page_t mnew, vm_object_t object,
vm_pindex_t pindex);
void vm_page_requeue(vm_page_t m);
@ -574,7 +578,6 @@ void vm_page_set_valid_range(vm_page_t m, int base, int size);
int vm_page_sleep_if_busy(vm_page_t m, const char *msg);
vm_offset_t vm_page_startup(vm_offset_t vaddr);
void vm_page_sunbusy(vm_page_t m);
bool vm_page_try_to_free(vm_page_t m);
int vm_page_trysbusy(vm_page_t m);
void vm_page_unhold_pages(vm_page_t *ma, int count);
void vm_page_unswappable(vm_page_t m);