- Move GDT/LDT locking into a seperate spinlock, removing the global

scheduler lock from this responsibility.

Contributed by:	Attilio Rao <attilio@FreeBSD.org>
Tested by:	jeff, kkenn
This commit is contained in:
Jeff Roberson 2007-05-20 22:03:57 +00:00
parent 3b0e49fad9
commit 0ad5e7f326
6 changed files with 80 additions and 76 deletions

View File

@ -883,10 +883,10 @@ x86_oldldt(dummy)
struct gdt gtable; struct gdt gtable;
uint16_t ltable; uint16_t ltable;
mtx_lock_spin(&sched_lock);
t = curthread; t = curthread;
mtx_lock_spin(&dt_lock);
/* Grab location of existing GDT. */ /* Grab location of existing GDT. */
x86_getldt(&gtable, &ltable); x86_getldt(&gtable, &ltable);
@ -904,7 +904,7 @@ x86_oldldt(dummy)
x86_setldt(&gtable, ltable); x86_setldt(&gtable, ltable);
mtx_unlock_spin(&sched_lock); mtx_unlock_spin(&dt_lock);
return; return;
} }
@ -918,10 +918,10 @@ x86_newldt(dummy)
struct x86desc *l; struct x86desc *l;
struct thread *t; struct thread *t;
mtx_lock_spin(&sched_lock);
t = curthread; t = curthread;
mtx_lock_spin(&dt_lock);
/* Grab location of existing GDT. */ /* Grab location of existing GDT. */
x86_getldt(&gtable, &ltable); x86_getldt(&gtable, &ltable);
@ -952,7 +952,7 @@ x86_newldt(dummy)
x86_setldt(&gtable, ltable); x86_setldt(&gtable, ltable);
mtx_unlock_spin(&sched_lock); mtx_unlock_spin(&dt_lock);
/* Whew. */ /* Whew. */

View File

@ -1171,8 +1171,10 @@ exec_setregs(td, entry, stack, ps_strings)
pcb->pcb_gs = _udatasel; pcb->pcb_gs = _udatasel;
load_gs(_udatasel); load_gs(_udatasel);
mtx_lock_spin(&dt_lock);
if (td->td_proc->p_md.md_ldt) if (td->td_proc->p_md.md_ldt)
user_ldt_free(td); user_ldt_free(td);
mtx_unlock_spin(&dt_lock);
bzero((char *)regs, sizeof(struct trapframe)); bzero((char *)regs, sizeof(struct trapframe));
regs->tf_eip = entry; regs->tf_eip = entry;
@ -1278,6 +1280,7 @@ static struct gate_descriptor idt0[NIDT];
struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
union descriptor ldt[NLDT]; /* local descriptor table */ union descriptor ldt[NLDT]; /* local descriptor table */
struct region_descriptor r_gdt, r_idt; /* table descriptors */ struct region_descriptor r_gdt, r_idt; /* table descriptors */
struct mtx dt_lock; /* lock for GDT and LDT */
#if defined(I586_CPU) && !defined(NO_F00F_HACK) #if defined(I586_CPU) && !defined(NO_F00F_HACK)
extern int has_f00f_bug; extern int has_f00f_bug;
@ -2101,6 +2104,7 @@ init386(first)
r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
r_gdt.rd_base = (int) gdt; r_gdt.rd_base = (int) gdt;
mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
lgdt(&r_gdt); lgdt(&r_gdt);
pcpu_init(pc, 0, sizeof(struct pcpu)); pcpu_init(pc, 0, sizeof(struct pcpu));

View File

@ -1388,11 +1388,9 @@ release_aps(void *dummy __unused)
if (mp_ncpus == 1) if (mp_ncpus == 1)
return; return;
mtx_lock_spin(&sched_lock);
atomic_store_rel_int(&aps_ready, 1); atomic_store_rel_int(&aps_ready, 1);
while (smp_started == 0) while (smp_started == 0)
ia32_pause(); ia32_pause();
mtx_unlock_spin(&sched_lock);
} }
SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);

View File

@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h> #include <sys/mutex.h>
#include <sys/priv.h> #include <sys/priv.h>
#include <sys/proc.h> #include <sys/proc.h>
#include <sys/refcount.h>
#include <sys/smp.h> #include <sys/smp.h>
#include <sys/sysproto.h> #include <sys/sysproto.h>
@ -115,7 +116,6 @@ sysarch(td, uap)
break; break;
} }
mtx_lock(&Giant);
switch(uap->op) { switch(uap->op) {
case I386_GET_LDT: case I386_GET_LDT:
error = i386_get_ldt(td, &kargs.largs); error = i386_get_ldt(td, &kargs.largs);
@ -215,7 +215,6 @@ sysarch(td, uap)
error = EINVAL; error = EINVAL;
break; break;
} }
mtx_unlock(&Giant);
return (error); return (error);
} }
@ -351,16 +350,19 @@ done:
/* /*
* Update the GDT entry pointing to the LDT to point to the LDT of the * Update the GDT entry pointing to the LDT to point to the LDT of the
* current process. * current process. Manage dt_lock holding/unholding autonomously.
*
* This must be called with sched_lock held. Unfortunately, we can't use a
* mtx_assert() here because cpu_switch() calls this function after changing
* curproc but before sched_lock's owner is updated in mi_switch().
*/ */
void void
set_user_ldt(struct mdproc *mdp) set_user_ldt(struct mdproc *mdp)
{ {
struct proc_ldt *pldt; struct proc_ldt *pldt;
int dtlocked;
dtlocked = 0;
if (!mtx_owned(&dt_lock)) {
mtx_lock_spin(&dt_lock);
dtlocked = 1;
}
pldt = mdp->md_ldt; pldt = mdp->md_ldt;
#ifdef SMP #ifdef SMP
@ -370,6 +372,8 @@ set_user_ldt(struct mdproc *mdp)
#endif #endif
lldt(GSEL(GUSERLDT_SEL, SEL_KPL)); lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
PCPU_SET(currentldt, GSEL(GUSERLDT_SEL, SEL_KPL)); PCPU_SET(currentldt, GSEL(GUSERLDT_SEL, SEL_KPL));
if (dtlocked)
mtx_unlock_spin(&dt_lock);
} }
#ifdef SMP #ifdef SMP
@ -385,17 +389,15 @@ set_user_ldt_rv(struct thread *td)
#endif #endif
/* /*
* Must be called with either sched_lock free or held but not recursed. * dt_lock must be held. Returns with dt_lock held.
* If it does not return NULL, it will return with it owned.
*/ */
struct proc_ldt * struct proc_ldt *
user_ldt_alloc(struct mdproc *mdp, int len) user_ldt_alloc(struct mdproc *mdp, int len)
{ {
struct proc_ldt *pldt, *new_ldt; struct proc_ldt *pldt, *new_ldt;
if (mtx_owned(&sched_lock)) mtx_assert(&dt_lock, MA_OWNED);
mtx_unlock_spin(&sched_lock); mtx_unlock_spin(&dt_lock);
mtx_assert(&sched_lock, MA_NOTOWNED);
MALLOC(new_ldt, struct proc_ldt *, sizeof(struct proc_ldt), MALLOC(new_ldt, struct proc_ldt *, sizeof(struct proc_ldt),
M_SUBPROC, M_WAITOK); M_SUBPROC, M_WAITOK);
@ -406,54 +408,49 @@ user_ldt_alloc(struct mdproc *mdp, int len)
FREE(new_ldt, M_SUBPROC); FREE(new_ldt, M_SUBPROC);
return NULL; return NULL;
} }
new_ldt->ldt_refcnt = 1; refcount_init(&new_ldt->ldt_refcnt, 1);
new_ldt->ldt_active = 0; new_ldt->ldt_active = 0;
mtx_lock_spin(&sched_lock); mtx_lock_spin(&dt_lock);
gdt_segs[GUSERLDT_SEL].ssd_base = (unsigned)new_ldt->ldt_base; gdt_segs[GUSERLDT_SEL].ssd_base = (unsigned)new_ldt->ldt_base;
gdt_segs[GUSERLDT_SEL].ssd_limit = len * sizeof(union descriptor) - 1; gdt_segs[GUSERLDT_SEL].ssd_limit = len * sizeof(union descriptor) - 1;
ssdtosd(&gdt_segs[GUSERLDT_SEL], &new_ldt->ldt_sd); ssdtosd(&gdt_segs[GUSERLDT_SEL], &new_ldt->ldt_sd);
if ((pldt = mdp->md_ldt)) { if ((pldt = mdp->md_ldt) != NULL) {
if (len > pldt->ldt_len) if (len > pldt->ldt_len)
len = pldt->ldt_len; len = pldt->ldt_len;
bcopy(pldt->ldt_base, new_ldt->ldt_base, bcopy(pldt->ldt_base, new_ldt->ldt_base,
len * sizeof(union descriptor)); len * sizeof(union descriptor));
} else { } else
bcopy(ldt, new_ldt->ldt_base, sizeof(ldt)); bcopy(ldt, new_ldt->ldt_base, sizeof(ldt));
}
return new_ldt; return (new_ldt);
} }
/* /*
* Must be called either with sched_lock free or held but not recursed. * Must be called with dt_lock held.
* If md_ldt is not NULL, it will return with sched_lock released.
*/ */
void void
user_ldt_free(struct thread *td) user_ldt_free(struct thread *td)
{ {
struct mdproc *mdp = &td->td_proc->p_md; struct mdproc *mdp = &td->td_proc->p_md;
struct proc_ldt *pldt = mdp->md_ldt; struct proc_ldt *pldt;
if (pldt == NULL) mtx_assert(&dt_lock, MA_OWNED);
if ((pldt = mdp->md_ldt) == NULL)
return; return;
if (!mtx_owned(&sched_lock))
mtx_lock_spin(&sched_lock);
mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
if (td == PCPU_GET(curthread)) { if (td == PCPU_GET(curthread)) {
lldt(_default_ldt); lldt(_default_ldt);
PCPU_SET(currentldt, _default_ldt); PCPU_SET(currentldt, _default_ldt);
} }
mdp->md_ldt = NULL; mdp->md_ldt = NULL;
if (--pldt->ldt_refcnt == 0) { if (refcount_release(&pldt->ldt_refcnt)) {
mtx_unlock_spin(&sched_lock);
kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base, kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base,
pldt->ldt_len * sizeof(union descriptor)); pldt->ldt_len * sizeof(union descriptor));
FREE(pldt, M_SUBPROC); FREE(pldt, M_SUBPROC);
} else }
mtx_unlock_spin(&sched_lock);
} }
/* /*
@ -468,7 +465,7 @@ i386_get_ldt(td, uap)
struct i386_ldt_args *uap; struct i386_ldt_args *uap;
{ {
int error = 0; int error = 0;
struct proc_ldt *pldt = td->td_proc->p_md.md_ldt; struct proc_ldt *pldt;
int nldt, num; int nldt, num;
union descriptor *lp; union descriptor *lp;
@ -477,11 +474,14 @@ i386_get_ldt(td, uap)
uap->start, uap->num, (void *)uap->descs); uap->start, uap->num, (void *)uap->descs);
#endif #endif
if (pldt) { mtx_lock_spin(&dt_lock);
if ((pldt = td->td_proc->p_md.md_ldt) != NULL) {
nldt = pldt->ldt_len; nldt = pldt->ldt_len;
num = min(uap->num, nldt);
lp = &((union descriptor *)(pldt->ldt_base))[uap->start]; lp = &((union descriptor *)(pldt->ldt_base))[uap->start];
mtx_unlock_spin(&dt_lock);
num = min(uap->num, nldt);
} else { } else {
mtx_unlock_spin(&dt_lock);
nldt = sizeof(ldt)/sizeof(ldt[0]); nldt = sizeof(ldt)/sizeof(ldt[0]);
num = min(uap->num, nldt); num = min(uap->num, nldt);
lp = &ldt[uap->start]; lp = &ldt[uap->start];
@ -531,10 +531,10 @@ i386_set_ldt(td, uap, descs)
} }
if (uap->num <= 0) if (uap->num <= 0)
return (EINVAL); return (EINVAL);
mtx_lock_spin(&sched_lock); mtx_lock_spin(&dt_lock);
pldt = mdp->md_ldt; if ((pldt = mdp->md_ldt) == NULL ||
if (pldt == NULL || uap->start >= pldt->ldt_len) { uap->start >= pldt->ldt_len) {
mtx_unlock_spin(&sched_lock); mtx_unlock_spin(&dt_lock);
return (0); return (0);
} }
largest_ld = uap->start + uap->num; largest_ld = uap->start + uap->num;
@ -543,7 +543,7 @@ i386_set_ldt(td, uap, descs)
i = largest_ld - uap->start; i = largest_ld - uap->start;
bzero(&((union descriptor *)(pldt->ldt_base))[uap->start], bzero(&((union descriptor *)(pldt->ldt_base))[uap->start],
sizeof(union descriptor) * i); sizeof(union descriptor) * i);
mtx_unlock_spin(&sched_lock); mtx_unlock_spin(&dt_lock);
return (0); return (0);
} }
@ -626,15 +626,15 @@ i386_set_ldt(td, uap, descs)
if (uap->start == LDT_AUTO_ALLOC && uap->num == 1) { if (uap->start == LDT_AUTO_ALLOC && uap->num == 1) {
/* Allocate a free slot */ /* Allocate a free slot */
pldt = mdp->md_ldt; mtx_lock_spin(&dt_lock);
if (pldt == NULL) { if ((pldt = mdp->md_ldt) == NULL) {
error = i386_ldt_grow(td, NLDT + 1); if ((error = i386_ldt_grow(td, NLDT + 1))) {
if (error) mtx_unlock_spin(&dt_lock);
return (error); return (error);
}
pldt = mdp->md_ldt; pldt = mdp->md_ldt;
} }
again: again:
mtx_lock_spin(&sched_lock);
/* /*
* start scanning a bit up to leave room for NVidia and * start scanning a bit up to leave room for NVidia and
* Wine, which still user the "Blat" method of allocation. * Wine, which still user the "Blat" method of allocation.
@ -646,24 +646,23 @@ again:
dp++; dp++;
} }
if (i >= pldt->ldt_len) { if (i >= pldt->ldt_len) {
mtx_unlock_spin(&sched_lock); if ((error = i386_ldt_grow(td, pldt->ldt_len+1))) {
error = i386_ldt_grow(td, pldt->ldt_len+1); mtx_unlock_spin(&dt_lock);
if (error)
return (error); return (error);
}
goto again; goto again;
} }
uap->start = i; uap->start = i;
error = i386_set_ldt_data(td, i, 1, descs); error = i386_set_ldt_data(td, i, 1, descs);
mtx_unlock_spin(&sched_lock); mtx_unlock_spin(&dt_lock);
} else { } else {
largest_ld = uap->start + uap->num; largest_ld = uap->start + uap->num;
error = i386_ldt_grow(td, largest_ld); mtx_lock_spin(&dt_lock);
if (error == 0) { if (!(error = i386_ldt_grow(td, largest_ld))) {
mtx_lock_spin(&sched_lock);
error = i386_set_ldt_data(td, uap->start, uap->num, error = i386_set_ldt_data(td, uap->start, uap->num,
descs); descs);
mtx_unlock_spin(&sched_lock);
} }
mtx_unlock_spin(&dt_lock);
} }
if (error == 0) if (error == 0)
td->td_retval[0] = uap->start; td->td_retval[0] = uap->start;
@ -677,7 +676,7 @@ i386_set_ldt_data(struct thread *td, int start, int num,
struct mdproc *mdp = &td->td_proc->p_md; struct mdproc *mdp = &td->td_proc->p_md;
struct proc_ldt *pldt = mdp->md_ldt; struct proc_ldt *pldt = mdp->md_ldt;
mtx_assert(&sched_lock, MA_OWNED); mtx_assert(&dt_lock, MA_OWNED);
/* Fill in range */ /* Fill in range */
bcopy(descs, bcopy(descs,
@ -694,14 +693,15 @@ i386_ldt_grow(struct thread *td, int len)
caddr_t old_ldt_base; caddr_t old_ldt_base;
int old_ldt_len; int old_ldt_len;
mtx_assert(&dt_lock, MA_OWNED);
if (len > MAX_LD) if (len > MAX_LD)
return (ENOMEM); return (ENOMEM);
if (len < NLDT + 1) if (len < NLDT + 1)
len = NLDT + 1; len = NLDT + 1;
/* Allocate a user ldt. */ /* Allocate a user ldt. */
pldt = mdp->md_ldt; if ((pldt = mdp->md_ldt) != NULL || len > pldt->ldt_len) {
if (!pldt || len > pldt->ldt_len) {
struct proc_ldt *new_ldt; struct proc_ldt *new_ldt;
new_ldt = user_ldt_alloc(mdp, len); new_ldt = user_ldt_alloc(mdp, len);
@ -709,42 +709,35 @@ i386_ldt_grow(struct thread *td, int len)
return (ENOMEM); return (ENOMEM);
pldt = mdp->md_ldt; pldt = mdp->md_ldt;
/* sched_lock was acquired by user_ldt_alloc. */ if (pldt != NULL) {
if (pldt) {
if (new_ldt->ldt_len > pldt->ldt_len) { if (new_ldt->ldt_len > pldt->ldt_len) {
old_ldt_base = pldt->ldt_base; old_ldt_base = pldt->ldt_base;
old_ldt_len = pldt->ldt_len; old_ldt_len = pldt->ldt_len;
pldt->ldt_sd = new_ldt->ldt_sd; pldt->ldt_sd = new_ldt->ldt_sd;
pldt->ldt_base = new_ldt->ldt_base; pldt->ldt_base = new_ldt->ldt_base;
pldt->ldt_len = new_ldt->ldt_len; pldt->ldt_len = new_ldt->ldt_len;
mtx_unlock_spin(&sched_lock);
kmem_free(kernel_map, (vm_offset_t)old_ldt_base, kmem_free(kernel_map, (vm_offset_t)old_ldt_base,
old_ldt_len * sizeof(union descriptor)); old_ldt_len * sizeof(union descriptor));
FREE(new_ldt, M_SUBPROC); FREE(new_ldt, M_SUBPROC);
mtx_lock_spin(&sched_lock);
} else { } else {
/* /*
* If other threads already did the work, * If other threads already did the work,
* do nothing. * do nothing.
*/ */
mtx_unlock_spin(&sched_lock);
kmem_free(kernel_map, kmem_free(kernel_map,
(vm_offset_t)new_ldt->ldt_base, (vm_offset_t)new_ldt->ldt_base,
new_ldt->ldt_len * sizeof(union descriptor)); new_ldt->ldt_len * sizeof(union descriptor));
FREE(new_ldt, M_SUBPROC); FREE(new_ldt, M_SUBPROC);
return (0); return (0);
} }
} else { } else
mdp->md_ldt = pldt = new_ldt; mdp->md_ldt = pldt = new_ldt;
}
#ifdef SMP #ifdef SMP
mtx_unlock_spin(&sched_lock);
/* signal other cpus to reload ldt */ /* signal other cpus to reload ldt */
smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv, smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv,
NULL, td); NULL, td);
#else #else
set_user_ldt(mdp); set_user_ldt(mdp);
mtx_unlock_spin(&sched_lock);
#endif #endif
} }
return (0); return (0);

View File

@ -62,6 +62,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h> #include <sys/mutex.h>
#include <sys/pioctl.h> #include <sys/pioctl.h>
#include <sys/proc.h> #include <sys/proc.h>
#include <sys/refcount.h>
#include <sys/sf_buf.h> #include <sys/sf_buf.h>
#include <sys/smp.h> #include <sys/smp.h>
#include <sys/sched.h> #include <sys/sched.h>
@ -158,8 +159,9 @@ cpu_fork(td1, p2, td2, flags)
struct mdproc *mdp1 = &p1->p_md; struct mdproc *mdp1 = &p1->p_md;
struct proc_ldt *pldt; struct proc_ldt *pldt;
pldt = mdp1->md_ldt; mtx_lock_spin(&dt_lock);
if (pldt && pldt->ldt_refcnt > 1) { if ((pldt = mdp1->md_ldt) != NULL &&
pldt->ldt_refcnt > 1) {
pldt = user_ldt_alloc(mdp1, pldt->ldt_len); pldt = user_ldt_alloc(mdp1, pldt->ldt_len);
if (pldt == NULL) if (pldt == NULL)
panic("could not copy LDT"); panic("could not copy LDT");
@ -167,6 +169,7 @@ cpu_fork(td1, p2, td2, flags)
set_user_ldt(mdp1); set_user_ldt(mdp1);
user_ldt_free(td1); user_ldt_free(td1);
} }
mtx_unlock_spin(&dt_lock);
} }
return; return;
} }
@ -248,10 +251,10 @@ cpu_fork(td1, p2, td2, flags)
pcb2->pcb_ext = 0; pcb2->pcb_ext = 0;
/* Copy the LDT, if necessary. */ /* Copy the LDT, if necessary. */
mtx_lock_spin(&sched_lock); mtx_lock_spin(&dt_lock);
if (mdp2->md_ldt != NULL) { if (mdp2->md_ldt != NULL) {
if (flags & RFMEM) { if (flags & RFMEM) {
mdp2->md_ldt->ldt_refcnt++; refcount_acquire(&mdp2->md_ldt->ldt_refcnt);
} else { } else {
mdp2->md_ldt = user_ldt_alloc(mdp2, mdp2->md_ldt = user_ldt_alloc(mdp2,
mdp2->md_ldt->ldt_len); mdp2->md_ldt->ldt_len);
@ -259,7 +262,7 @@ cpu_fork(td1, p2, td2, flags)
panic("could not copy LDT"); panic("could not copy LDT");
} }
} }
mtx_unlock_spin(&sched_lock); mtx_unlock_spin(&dt_lock);
/* Setup to release sched_lock in fork_exit(). */ /* Setup to release sched_lock in fork_exit(). */
td2->td_md.md_spinlock_count = 1; td2->td_md.md_spinlock_count = 1;
@ -304,11 +307,13 @@ cpu_exit(struct thread *td)
* If this process has a custom LDT, release it. Reset pc->pcb_gs * If this process has a custom LDT, release it. Reset pc->pcb_gs
* and %gs before we free it in case they refer to an LDT entry. * and %gs before we free it in case they refer to an LDT entry.
*/ */
mtx_lock_spin(&dt_lock);
if (td->td_proc->p_md.md_ldt) { if (td->td_proc->p_md.md_ldt) {
td->td_pcb->pcb_gs = _udatasel; td->td_pcb->pcb_gs = _udatasel;
load_gs(_udatasel); load_gs(_udatasel);
user_ldt_free(td); user_ldt_free(td);
} }
mtx_unlock_spin(&dt_lock);
} }
void void

View File

@ -45,6 +45,8 @@ struct proc_ldt {
/* /*
* Machine-dependent part of the proc structure for i386. * Machine-dependent part of the proc structure for i386.
* Table of MD locks:
* t - Descriptor tables lock
*/ */
struct mdthread { struct mdthread {
int md_spinlock_count; /* (k) */ int md_spinlock_count; /* (k) */
@ -52,7 +54,7 @@ struct mdthread {
}; };
struct mdproc { struct mdproc {
struct proc_ldt *md_ldt; /* (j) per-process ldt */ struct proc_ldt *md_ldt; /* (t) per-process ldt */
}; };
#ifdef _KERNEL #ifdef _KERNEL
@ -61,6 +63,8 @@ void set_user_ldt(struct mdproc *);
struct proc_ldt *user_ldt_alloc(struct mdproc *, int); struct proc_ldt *user_ldt_alloc(struct mdproc *, int);
void user_ldt_free(struct thread *); void user_ldt_free(struct thread *);
extern struct mtx dt_lock;
#endif /* _KERNEL */ #endif /* _KERNEL */
#endif /* !_MACHINE_PROC_H_ */ #endif /* !_MACHINE_PROC_H_ */