Update various places that store or manipulate CPU masks to use cpumask_t
instead of int or u_int. Since cpumask_t is currently u_int on all platforms this should just be a cosmetic change.
This commit is contained in:
parent
c031c93b5d
commit
60c7b36b7a
@ -1053,7 +1053,7 @@ smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_o
|
||||
int ncpu, othercpus;
|
||||
|
||||
othercpus = mp_ncpus - 1;
|
||||
if (mask == (u_int)-1) {
|
||||
if (mask == (cpumask_t)-1) {
|
||||
ncpu = othercpus;
|
||||
if (ncpu < 1)
|
||||
return;
|
||||
@ -1078,7 +1078,7 @@ smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_o
|
||||
smp_tlb_addr1 = addr1;
|
||||
smp_tlb_addr2 = addr2;
|
||||
atomic_store_rel_int(&smp_tlb_wait, 0);
|
||||
if (mask == (u_int)-1)
|
||||
if (mask == (cpumask_t)-1)
|
||||
ipi_all_but_self(vector);
|
||||
else
|
||||
ipi_selected(mask, vector);
|
||||
@ -1549,9 +1549,11 @@ mp_grab_cpu_hlt(void)
|
||||
mask = PCPU_GET(cpumask);
|
||||
#endif
|
||||
|
||||
retval = mask & hlt_cpus_mask;
|
||||
while (mask & hlt_cpus_mask)
|
||||
retval = 0;
|
||||
while (mask & hlt_cpus_mask) {
|
||||
retval = 1;
|
||||
__asm __volatile("sti; hlt" : : : "memory");
|
||||
}
|
||||
return (retval);
|
||||
}
|
||||
|
||||
|
@ -527,7 +527,8 @@ void
|
||||
cpu_reset()
|
||||
{
|
||||
#ifdef SMP
|
||||
u_int cnt, map;
|
||||
cpumask_t map;
|
||||
u_int cnt;
|
||||
|
||||
if (smp_active) {
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus;
|
||||
|
@ -136,7 +136,7 @@ struct pmap {
|
||||
pd_entry_t *pm_pdir; /* KVA of page directory */
|
||||
uint32_t pm_gen_count; /* generation count (pmap lock dropped) */
|
||||
u_int pm_retries;
|
||||
int pm_active; /* active on cpus */
|
||||
cpumask_t pm_active; /* active on cpus */
|
||||
struct pmap_statistics pm_stats; /* pmap statictics */
|
||||
TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
|
||||
};
|
||||
|
@ -1598,9 +1598,11 @@ mp_grab_cpu_hlt(void)
|
||||
mask = PCPU_GET(cpumask);
|
||||
#endif
|
||||
|
||||
retval = mask & hlt_cpus_mask;
|
||||
while (mask & hlt_cpus_mask)
|
||||
retval = 0;
|
||||
while (mask & hlt_cpus_mask) {
|
||||
retval = 1;
|
||||
__asm __volatile("sti; hlt" : : : "memory");
|
||||
}
|
||||
return (retval);
|
||||
}
|
||||
|
||||
|
@ -596,7 +596,8 @@ cpu_reset()
|
||||
#endif
|
||||
|
||||
#ifdef SMP
|
||||
u_int cnt, map;
|
||||
cpumask_t map;
|
||||
u_int cnt;
|
||||
|
||||
if (smp_active) {
|
||||
map = PCPU_GET(other_cpus) & ~stopped_cpus;
|
||||
|
@ -863,8 +863,7 @@ pmap_cache_bits(int mode, boolean_t is_pde)
|
||||
void
|
||||
pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
u_int cpumask;
|
||||
u_int other_cpus;
|
||||
cpumask_t cpumask, other_cpus;
|
||||
|
||||
CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x",
|
||||
pmap, va);
|
||||
@ -888,8 +887,7 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
|
||||
void
|
||||
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
{
|
||||
u_int cpumask;
|
||||
u_int other_cpus;
|
||||
cpumask_t cpumask, other_cpus;
|
||||
vm_offset_t addr;
|
||||
|
||||
CTR3(KTR_PMAP, "pmap_invalidate_page: pmap=%p eva=0x%x sva=0x%x",
|
||||
@ -917,8 +915,7 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
void
|
||||
pmap_invalidate_all(pmap_t pmap)
|
||||
{
|
||||
u_int cpumask;
|
||||
u_int other_cpus;
|
||||
cpumask_t cpumask, other_cpus;
|
||||
|
||||
CTR1(KTR_PMAP, "pmap_invalidate_page: pmap=%p", pmap);
|
||||
|
||||
|
@ -81,7 +81,7 @@ struct pmap {
|
||||
* pmap */
|
||||
uint32_t pm_gen_count; /* generation count (pmap lock dropped) */
|
||||
u_int pm_retries;
|
||||
int pm_active; /* active on cpus */
|
||||
cpumask_t pm_active; /* active on cpus */
|
||||
struct {
|
||||
u_int32_t asid:ASID_BITS; /* TLB address space tag */
|
||||
u_int32_t gen:ASIDGEN_BITS; /* its generation number */
|
||||
|
@ -99,7 +99,7 @@ struct pmap {
|
||||
#else
|
||||
register_t pm_sr[16];
|
||||
#endif
|
||||
u_int pm_active;
|
||||
cpumask_t pm_active;
|
||||
uint32_t pm_gen_count; /* generation count (pmap lock dropped) */
|
||||
u_int pm_retries;
|
||||
|
||||
@ -153,7 +153,7 @@ void slb_free_user_cache(struct slb *);
|
||||
struct pmap {
|
||||
struct mtx pm_mtx; /* pmap mutex */
|
||||
tlbtid_t pm_tid[MAXCPU]; /* TID to identify this pmap entries in TLB */
|
||||
u_int pm_active; /* active on cpus */
|
||||
cpumask_t pm_active; /* active on cpus */
|
||||
int pm_refs; /* ref count */
|
||||
struct pmap_statistics pm_stats; /* pmap statistics */
|
||||
|
||||
|
@ -61,7 +61,7 @@ struct pmap {
|
||||
struct mtx pm_mtx;
|
||||
struct tte *pm_tsb;
|
||||
vm_object_t pm_tsb_obj;
|
||||
u_int pm_active;
|
||||
cpumask_t pm_active;
|
||||
uint32_t pm_gen_count; /* generation count (pmap lock dropped) */
|
||||
u_int pm_retries;
|
||||
u_int pm_context[MAXCPU];
|
||||
|
@ -77,17 +77,17 @@ struct cpu_start_args {
|
||||
};
|
||||
|
||||
struct ipi_cache_args {
|
||||
u_int ica_mask;
|
||||
cpumask_t ica_mask;
|
||||
vm_paddr_t ica_pa;
|
||||
};
|
||||
|
||||
struct ipi_rd_args {
|
||||
u_int ira_mask;
|
||||
cpumask_t ira_mask;
|
||||
register_t *ira_val;
|
||||
};
|
||||
|
||||
struct ipi_tlb_args {
|
||||
u_int ita_mask;
|
||||
cpumask_t ita_mask;
|
||||
struct pmap *ita_pmap;
|
||||
u_long ita_start;
|
||||
u_long ita_end;
|
||||
@ -208,7 +208,7 @@ static __inline void *
|
||||
ipi_tlb_context_demap(struct pmap *pm)
|
||||
{
|
||||
struct ipi_tlb_args *ita;
|
||||
u_int cpus;
|
||||
cpumask_t cpus;
|
||||
|
||||
if (smp_cpus == 1)
|
||||
return (NULL);
|
||||
@ -230,7 +230,7 @@ static __inline void *
|
||||
ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
|
||||
{
|
||||
struct ipi_tlb_args *ita;
|
||||
u_int cpus;
|
||||
cpumask_t cpus;
|
||||
|
||||
if (smp_cpus == 1)
|
||||
return (NULL);
|
||||
@ -252,7 +252,7 @@ static __inline void *
|
||||
ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
|
||||
{
|
||||
struct ipi_tlb_args *ita;
|
||||
u_int cpus;
|
||||
cpumask_t cpus;
|
||||
|
||||
if (smp_cpus == 1)
|
||||
return (NULL);
|
||||
@ -275,7 +275,7 @@ ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
|
||||
static __inline void
|
||||
ipi_wait(void *cookie)
|
||||
{
|
||||
volatile u_int *mask;
|
||||
volatile cpumask_t *mask;
|
||||
|
||||
if ((mask = cookie) != NULL) {
|
||||
atomic_clear_int(mask, PCPU_GET(cpumask));
|
||||
|
@ -121,7 +121,7 @@ cpu_ipi_single_t *cpu_ipi_single;
|
||||
static vm_offset_t mp_tramp;
|
||||
static u_int cpuid_to_mid[MAXCPU];
|
||||
static int isjbus;
|
||||
static volatile u_int shutdown_cpus;
|
||||
static volatile cpumask_t shutdown_cpus;
|
||||
|
||||
static void ap_count(phandle_t node, u_int mid, u_int cpu_impl);
|
||||
static void ap_start(phandle_t node, u_int mid, u_int cpu_impl);
|
||||
|
@ -59,12 +59,12 @@ struct cpu_start_args {
|
||||
};
|
||||
|
||||
struct ipi_cache_args {
|
||||
u_int ica_mask;
|
||||
cpumask_t ica_mask;
|
||||
vm_paddr_t ica_pa;
|
||||
};
|
||||
|
||||
struct ipi_tlb_args {
|
||||
u_int ita_mask;
|
||||
cpumask_t ita_mask;
|
||||
struct pmap *ita_pmap;
|
||||
u_long ita_start;
|
||||
u_long ita_end;
|
||||
|
@ -115,7 +115,7 @@ vm_offset_t mp_tramp;
|
||||
|
||||
u_int mp_boot_mid;
|
||||
|
||||
static volatile u_int shutdown_cpus;
|
||||
static volatile cpumask_t shutdown_cpus;
|
||||
|
||||
void cpu_mp_unleash(void *);
|
||||
SYSINIT(cpu_mp_unleash, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);
|
||||
|
@ -1451,7 +1451,7 @@ pmap_ipi(pmap_t pmap, char *func, uint64_t arg1, uint64_t arg2)
|
||||
{
|
||||
|
||||
int i, cpu_count, retried;
|
||||
u_int cpus;
|
||||
cpumask_t cpus;
|
||||
cpumask_t cpumask, active, curactive;
|
||||
cpumask_t active_total, ackmask;
|
||||
uint16_t *cpulist;
|
||||
|
Loading…
Reference in New Issue
Block a user