mmap(MAP_STACK): on stack grow, use original protection

If mprotect(2) changed protection in the bottom of the currently grown
stack region, currently the changed protection would be used for the
stack grow on next fault.  This is arguably unexpected.

Store the original protection for the entry at mmap(2) time in the
offset member of the gap vm_map_entry, and use it for protection of the
grown stack region.

PR:	272585
Reported by:	John F. Carr <jfc@mit.edu>
Reviewed by:	alc, markj
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
Differential revision:	https://reviews.freebsd.org/D41089
This commit is contained in:
Konstantin Belousov 2023-07-19 14:05:32 +03:00
parent a52f23f4c4
commit 21e45c30c3
2 changed files with 20 additions and 8 deletions

View File

@ -4493,7 +4493,7 @@ static int
vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow)
{
vm_map_entry_t new_entry, prev_entry;
vm_map_entry_t gap_entry, new_entry, prev_entry;
vm_offset_t bot, gap_bot, gap_top, top;
vm_size_t init_ssize, sgp;
int orient, rv;
@ -4575,11 +4575,14 @@ vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
* read-ahead logic is never used for it. Re-use
* next_read of the gap entry to store
* stack_guard_page for vm_map_growstack().
* Similarly, since a gap cannot have a backing object,
* store the original stack protections in the
* object offset.
*/
if (orient == MAP_STACK_GROWS_DOWN)
vm_map_entry_pred(new_entry)->next_read = sgp;
else
vm_map_entry_succ(new_entry)->next_read = sgp;
gap_entry = orient == MAP_STACK_GROWS_DOWN ?
vm_map_entry_pred(new_entry) : vm_map_entry_succ(new_entry);
gap_entry->next_read = sgp;
gap_entry->offset = prot;
} else {
(void)vm_map_delete(map, bot, top);
}
@ -4599,6 +4602,7 @@ vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
struct ucred *cred;
vm_offset_t gap_end, gap_start, grow_start;
vm_size_t grow_amount, guard, max_grow;
vm_prot_t prot;
rlim_t lmemlim, stacklim, vmemlim;
int rv, rv1 __diagused;
bool gap_deleted, grow_down, is_procstack;
@ -4739,6 +4743,12 @@ vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
}
if (grow_down) {
/*
* The gap_entry "offset" field is overloaded. See
* vm_map_stack_locked().
*/
prot = gap_entry->offset;
grow_start = gap_entry->end - grow_amount;
if (gap_entry->start + grow_amount == gap_entry->end) {
gap_start = gap_entry->start;
@ -4751,9 +4761,7 @@ vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
gap_deleted = false;
}
rv = vm_map_insert(map, NULL, 0, grow_start,
grow_start + grow_amount,
stack_entry->protection, stack_entry->max_protection,
MAP_STACK_GROWS_DOWN);
grow_start + grow_amount, prot, prot, MAP_STACK_GROWS_DOWN);
if (rv != KERN_SUCCESS) {
if (gap_deleted) {
rv1 = vm_map_insert(map, NULL, 0, gap_start,

View File

@ -97,6 +97,10 @@ union vm_map_object {
* a VM object (or sharing map) and offset into that object,
* and user-exported inheritance and protection information.
* Also included is control information for virtual copy operations.
*
* For stack gap map entries (MAP_ENTRY_GUARD | MAP_ENTRY_GROWS_DOWN
* or UP), the next_read member is reused as the stack_guard_page
* storage, and offset is the stack protection.
*/
struct vm_map_entry {
struct vm_map_entry *left; /* left child or previous entry */