Malloc p_stats instead of putting it in the U area. We should consider

simply embedding it in struct proc.

Reviewed by:	arch@
This commit is contained in:
David Schultz 2004-11-20 02:28:48 +00:00
parent 7a62aa8a18
commit 8b059651ba
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=137909
3 changed files with 49 additions and 12 deletions

View File

@ -422,6 +422,8 @@ proc0_init(void *dummy __unused)
p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = i / 3;
p->p_cpulimit = RLIM_INFINITY;
p->p_stats = pstats_alloc();
/* Allocate a prototype map so we have something to fork. */
pmap_pinit0(vmspace_pmap(&vmspace0));
p->p_vmspace = &vmspace0;
@ -430,12 +432,6 @@ proc0_init(void *dummy __unused)
p->p_sysent->sv_maxuser);
vmspace0.vm_map.pmap = vmspace_pmap(&vmspace0);
/*
* We continue to place resource usage info
* in the user struct so that it's pageable.
*/
p->p_stats = &p->p_uarea->u_stats;
/*
* Charge root for one process.
*/

View File

@ -489,7 +489,6 @@ fork1(td, flags, pages, procp)
/*
* Duplicate sub-structures as needed.
* Increase reference counts on shared objects.
* The p_stats substruct is set in vm_forkproc.
*/
p2->p_flag = 0;
if (p1->p_flag & P_PROFIL)
@ -527,6 +526,9 @@ fork1(td, flags, pages, procp)
* p_limit is copy-on-write. Bump its refcount.
*/
p2->p_limit = lim_hold(p1->p_limit);
pstats_fork(p1->p_stats, p2->p_stats);
PROC_UNLOCK(p1);
PROC_UNLOCK(p2);

View File

@ -96,9 +96,7 @@ uma_zone_t proc_zone;
uma_zone_t ithread_zone;
int kstack_pages = KSTACK_PAGES;
int uarea_pages = UAREA_PAGES;
SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, "");
SYSCTL_INT(_kern, OID_AUTO, uarea_pages, CTLFLAG_RD, &uarea_pages, 0, "");
CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
@ -180,11 +178,11 @@ proc_init(void *mem, int size, int flags)
p = (struct proc *)mem;
p->p_sched = (struct p_sched *)&p[1];
vm_proc_new(p);
td = thread_alloc();
kg = ksegrp_alloc();
bzero(&p->p_mtx, sizeof(struct mtx));
mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
p->p_stats = pstats_alloc();
proc_linkup(p, kg, td);
sched_newproc(p, kg, td);
return (0);
@ -660,8 +658,6 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
kp->ki_size = vm->vm_map.size;
kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/
if (p->p_sflag & PS_INMEM)
kp->ki_rssize += UAREA_PAGES;
FOREACH_THREAD_IN_PROC(p, td0) {
if (!TD_IS_SWAPPED(td0))
kp->ki_rssize += td0->td_kstack_pages;
@ -804,6 +800,49 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
kp->ki_ppid = p->p_pptr->p_pid;
}
/*
* Fill a 'struct user' for backwards compatibility with a.out core dumps.
* This is used by the aout, linux, and pecoff modules.
*/
void
fill_user(struct proc *p, struct user *u)
{
PROC_LOCK_ASSERT(p, MA_OWNED);
bcopy(&p->p_stats, &u->u_stats, sizeof(struct pstats));
fill_kinfo_proc(p, &u->u_kproc);
}
struct pstats *
pstats_alloc(void)
{
return (malloc(sizeof(struct pstats), M_SUBPROC, M_ZERO|M_WAITOK));
}
/*
* Copy parts of p_stats; zero the rest of p_stats (statistics).
*/
void
pstats_fork(struct pstats *src, struct pstats *dst)
{
#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
bzero(&dst->pstat_startzero,
(unsigned)RANGEOF(struct pstats, pstat_startzero, pstat_endzero));
bcopy(&src->pstat_startcopy, &dst->pstat_startcopy,
(unsigned)RANGEOF(struct pstats, pstat_startcopy, pstat_endcopy));
#undef RANGEOF
}
void
pstats_free(struct pstats *ps)
{
free(ps, M_SUBPROC);
}
/*
* Locate a zombie process by number
*/