Remove several write-only variables, all reported by the gcc 4.9

buildkernel run.

Some of them were write-only under some kernel options, e.g. variables
keeping values only used by CTR() macros.  It costs nothing to the
code readability and correctness to eliminate the warnings in those
cases too by removing the local cached values used only for
single-access.

Review:	https://reviews.freebsd.org/D2665
Reviewed by:	rodrigc
Looked at by:	bjk
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
This commit is contained in:
Konstantin Belousov 2015-05-29 13:24:17 +00:00
parent 4e870f943f
commit 69baeadc31
16 changed files with 16 additions and 76 deletions

View File

@ -3935,7 +3935,6 @@ pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
pd_entry_t newpde;
pt_entry_t *firstpte, oldpte, pa, *pte;
pt_entry_t PG_G, PG_A, PG_M, PG_RW, PG_V;
vm_offset_t oldpteva;
vm_page_t mpte;
int PG_PTE_CACHE;
@ -3995,10 +3994,9 @@ pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
if (!atomic_cmpset_long(pte, oldpte, oldpte & ~PG_RW))
goto setpte;
oldpte &= ~PG_RW;
oldpteva = (oldpte & PG_FRAME & PDRMASK) |
(va & ~PDRMASK);
CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#lx"
" in pmap %p", oldpteva, pmap);
" in pmap %p", (oldpte & PG_FRAME & PDRMASK) |
(va & ~PDRMASK), pmap);
}
if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
atomic_add_long(&pmap_pde_p_failures, 1);

View File

@ -155,7 +155,6 @@ cpu_fork(td1, p2, td2, flags)
struct pcb *pcb2;
struct mdproc *mdp1, *mdp2;
struct proc_ldt *pldt;
pmap_t pmap2;
p1 = td1->td_proc;
if ((flags & RFPROC) == 0) {
@ -218,7 +217,6 @@ cpu_fork(td1, p2, td2, flags)
* Set registers for trampoline to user mode. Leave space for the
* return address on stack. These are the kernel mode register values.
*/
pmap2 = vmspace_pmap(p2->p_vmspace);
pcb2->pcb_r12 = (register_t)fork_return; /* fork_trampoline argument */
pcb2->pcb_rbp = 0;
pcb2->pcb_rsp = (register_t)td2->td_frame - sizeof(void *);

View File

@ -79,11 +79,9 @@ __FBSDID("$FreeBSD$");
int
fill_regs32(struct thread *td, struct reg32 *regs)
{
struct pcb *pcb;
struct trapframe *tp;
tp = td->td_frame;
pcb = td->td_pcb;
if (tp->tf_flags & TF_HASSEGS) {
regs->r_gs = tp->tf_gs;
regs->r_fs = tp->tf_fs;
@ -113,18 +111,16 @@ fill_regs32(struct thread *td, struct reg32 *regs)
int
set_regs32(struct thread *td, struct reg32 *regs)
{
struct pcb *pcb;
struct trapframe *tp;
tp = td->td_frame;
if (!EFL_SECURE(regs->r_eflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs))
return (EINVAL);
pcb = td->td_pcb;
tp->tf_gs = regs->r_gs;
tp->tf_fs = regs->r_fs;
tp->tf_es = regs->r_es;
tp->tf_ds = regs->r_ds;
set_pcb_flags(pcb, PCB_FULL_IRET);
set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
tp->tf_flags = TF_HASSEGS;
tp->tf_rdi = regs->r_edi;
tp->tf_rsi = regs->r_esi;

View File

@ -2389,7 +2389,7 @@ pci_set_powerstate_method(device_t dev, device_t child, int state)
struct pci_devinfo *dinfo = device_get_ivars(child);
pcicfgregs *cfg = &dinfo->cfg;
uint16_t status;
int result, oldstate, highest, delay;
int oldstate, highest, delay;
if (cfg->pp.pp_cap == 0)
return (EOPNOTSUPP);
@ -2424,7 +2424,6 @@ pci_set_powerstate_method(device_t dev, device_t child, int state)
delay = 0;
status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
& ~PCIM_PSTAT_DMASK;
result = 0;
switch (state) {
case PCI_POWERSTATE_D0:
status |= PCIM_PSTAT_D0;
@ -2989,7 +2988,6 @@ static void
pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
uint32_t prefetchmask)
{
struct resource *r;
int rid, type, progif;
#if 0
/* if this device supports PCI native addressing use it */
@ -3012,11 +3010,11 @@ pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
} else {
rid = PCIR_BAR(0);
resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
r = resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0,
(void)resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0,
0x1f7, 8, 0);
rid = PCIR_BAR(1);
resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
r = resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6,
(void)resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6,
0x3f6, 1, 0);
}
if (progif & PCIP_STORAGE_IDE_MODESEC) {
@ -3027,11 +3025,11 @@ pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
} else {
rid = PCIR_BAR(2);
resource_list_add(rl, type, rid, 0x170, 0x177, 8);
r = resource_list_reserve(rl, bus, dev, type, &rid, 0x170,
(void)resource_list_reserve(rl, bus, dev, type, &rid, 0x170,
0x177, 8, 0);
rid = PCIR_BAR(3);
resource_list_add(rl, type, rid, 0x376, 0x376, 1);
r = resource_list_reserve(rl, bus, dev, type, &rid, 0x376,
(void)resource_list_reserve(rl, bus, dev, type, &rid, 0x376,
0x376, 1, 0);
}
pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
@ -3727,7 +3725,6 @@ pci_detach(device_t dev)
static void
pci_set_power_child(device_t dev, device_t child, int state)
{
struct pci_devinfo *dinfo;
device_t pcib;
int dstate;
@ -3739,7 +3736,6 @@ pci_set_power_child(device_t dev, device_t child, int state)
* are handled separately.
*/
pcib = device_get_parent(dev);
dinfo = device_get_ivars(child);
dstate = state;
if (device_is_attached(child) &&
PCIB_POWER_FOR_SLEEP(pcib, child, &dstate) == 0)

View File

@ -964,12 +964,10 @@ proc_to_reap(struct thread *td, struct proc *p, idtype_t idtype, id_t id,
int *status, int options, struct __wrusage *wrusage, siginfo_t *siginfo,
int check_only)
{
struct proc *q;
struct rusage *rup;
sx_assert(&proctree_lock, SA_XLOCKED);
q = td->td_proc;
PROC_LOCK(p);
switch (idtype) {

View File

@ -414,11 +414,9 @@ mi_switch(int flags, struct thread *newtd)
{
uint64_t runtime, new_switchtime;
struct thread *td;
struct proc *p;
td = curthread; /* XXX */
THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
p = td->td_proc; /* XXX */
KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code"));
#ifdef INVARIANTS
if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td))
@ -458,7 +456,7 @@ mi_switch(int flags, struct thread *newtd)
PCPU_INC(cnt.v_swtch);
PCPU_SET(switchticks, ticks);
CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)",
td->td_tid, td->td_sched, p->p_pid, td->td_name);
td->td_tid, td->td_sched, td->td_proc->p_pid, td->td_name);
#if (KTR_COMPILE & KTR_SCHED) != 0
if (TD_IS_IDLETHREAD(td))
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
@ -474,7 +472,7 @@ mi_switch(int flags, struct thread *newtd)
"prio:%d", td->td_priority);
CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)",
td->td_tid, td->td_sched, p->p_pid, td->td_name);
td->td_tid, td->td_sched, td->td_proc->p_pid, td->td_name);
/*
* If the last thread was exiting, finish cleaning it up.

View File

@ -310,7 +310,6 @@ static struct buf *
cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn,
daddr_t blkno, long size, int run, int gbflags, struct buf *fbp)
{
struct bufobj *bo;
struct buf *bp, *tbp;
daddr_t bn;
off_t off;
@ -376,7 +375,6 @@ cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn,
bp->b_npages = 0;
inc = btodb(size);
bo = &vp->v_bufobj;
for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
if (i == 0) {
VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);

View File

@ -311,9 +311,7 @@ static int
vfs_unregister(struct vfsconf *vfc)
{
struct vfsconf *vfsp;
int error, i, maxtypenum;
i = vfc->vfc_typenum;
int error, maxtypenum;
vfsconf_lock();
vfsp = vfs_byname_locked(vfc->vfc_name);

View File

@ -4691,12 +4691,10 @@ softdep_setup_dotdot_link(dp, ip)
struct inodedep *inodedep;
struct jaddref *jaddref;
struct vnode *dvp;
struct vnode *vp;
KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
("softdep_setup_dotdot_link called on non-softdep filesystem"));
dvp = ITOV(dp);
vp = ITOV(ip);
jaddref = NULL;
/*
* We don't set MKDIR_PARENT as this is not tied to a mkdir and
@ -7052,7 +7050,6 @@ trunc_dependencies(ip, freeblks, lastlbn, lastoff, flags)
struct bufobj *bo;
struct vnode *vp;
struct buf *bp;
struct fs *fs;
int blkoff;
/*
@ -7061,7 +7058,6 @@ trunc_dependencies(ip, freeblks, lastlbn, lastoff, flags)
* Once they are all there, walk the list and get rid of
* any dependencies.
*/
fs = ip->i_fs;
vp = ITOV(ip);
bo = &vp->v_bufobj;
BO_LOCK(bo);
@ -9493,12 +9489,10 @@ handle_written_sbdep(sbdep, bp)
struct buf *bp;
{
struct inodedep *inodedep;
struct mount *mp;
struct fs *fs;
LOCK_OWNED(sbdep->sb_ump);
fs = sbdep->sb_fs;
mp = UFSTOVFS(sbdep->sb_ump);
/*
* If the superblock doesn't match the in-memory list start over.
*/

View File

@ -177,7 +177,6 @@ ffs_susp_rdwr(struct cdev *dev, struct uio *uio, int ioflag)
static int
ffs_susp_suspend(struct mount *mp)
{
struct fs *fs;
struct ufsmount *ump;
int error;
@ -189,7 +188,6 @@ ffs_susp_suspend(struct mount *mp)
return (EBUSY);
ump = VFSTOUFS(mp);
fs = ump->um_fs;
/*
* Make sure the calling thread is permitted to access the mounted

View File

@ -1486,7 +1486,7 @@ ffs_sync(mp, waitfor)
struct inode *ip;
struct ufsmount *ump = VFSTOUFS(mp);
struct fs *fs;
int error, count, wait, lockreq, allerror = 0;
int error, count, lockreq, allerror = 0;
int suspend;
int suspended;
int secondary_writes;
@ -1495,7 +1495,6 @@ ffs_sync(mp, waitfor)
int softdep_accdeps;
struct bufobj *bo;
wait = 0;
suspend = 0;
suspended = 0;
td = curthread;
@ -1517,10 +1516,8 @@ ffs_sync(mp, waitfor)
suspend = 1;
waitfor = MNT_WAIT;
}
if (waitfor == MNT_WAIT) {
wait = 1;
if (waitfor == MNT_WAIT)
lockreq = LK_EXCLUSIVE;
}
lockreq |= LK_INTERLOCK | LK_SLEEPFAIL;
loop:
/* Grab snapshot of secondary write counts */
@ -2024,7 +2021,6 @@ static int
ffs_bufwrite(struct buf *bp)
{
struct buf *newbp;
int oldflags;
CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
if (bp->b_flags & B_INVAL) {
@ -2032,8 +2028,6 @@ ffs_bufwrite(struct buf *bp)
return (0);
}
oldflags = bp->b_flags;
if (!BUF_ISLOCKED(bp))
panic("bufwrite: buffer is not busy???");
/*

View File

@ -1366,11 +1366,6 @@ struct vop_openextattr_args {
};
*/
{
struct inode *ip;
struct fs *fs;
ip = VTOI(ap->a_vp);
fs = ip->i_fs;
if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
return (EOPNOTSUPP);
@ -1394,11 +1389,6 @@ struct vop_closeextattr_args {
};
*/
{
struct inode *ip;
struct fs *fs;
ip = VTOI(ap->a_vp);
fs = ip->i_fs;
if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
return (EOPNOTSUPP);
@ -1512,13 +1502,11 @@ vop_getextattr {
*/
{
struct inode *ip;
struct fs *fs;
u_char *eae, *p;
unsigned easize;
int error, ealen;
ip = VTOI(ap->a_vp);
fs = ip->i_fs;
if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
return (EOPNOTSUPP);
@ -1567,14 +1555,12 @@ vop_listextattr {
*/
{
struct inode *ip;
struct fs *fs;
u_char *eae, *p, *pe, *pn;
unsigned easize;
uint32_t ul;
int error, ealen;
ip = VTOI(ap->a_vp);
fs = ip->i_fs;
if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
return (EOPNOTSUPP);

View File

@ -114,7 +114,6 @@ ufs_bmaparray(vp, bn, bnp, nbp, runp, runb)
struct buf *bp;
struct ufsmount *ump;
struct mount *mp;
struct vnode *devvp;
struct indir a[NIADDR+1], *ap;
ufs2_daddr_t daddr;
ufs_lbn_t metalbn;
@ -125,7 +124,6 @@ ufs_bmaparray(vp, bn, bnp, nbp, runp, runb)
ip = VTOI(vp);
mp = vp->v_mount;
ump = VFSTOUFS(mp);
devvp = ump->um_devvp;
if (runp) {
maxrun = mp->mnt_iosize_max / mp->mnt_stat.f_iosize - 1;

View File

@ -190,9 +190,7 @@ ufsdirhash_create(struct inode *ip)
struct dirhash *ndh;
struct dirhash *dh;
struct vnode *vp;
int error;
error = 0;
ndh = dh = NULL;
vp = ip->i_vnode;
for (;;) {
@ -274,11 +272,9 @@ static struct dirhash *
ufsdirhash_acquire(struct inode *ip)
{
struct dirhash *dh;
struct vnode *vp;
ASSERT_VOP_ELOCKED(ip->i_vnode, __FUNCTION__);
vp = ip->i_vnode;
dh = ip->i_dirhash;
if (dh == NULL)
return (NULL);

View File

@ -818,7 +818,6 @@ dmar_bus_task_dmamap(void *arg, int pending)
struct bus_dma_tag_dmar *tag;
struct bus_dmamap_dmar *map;
struct dmar_unit *unit;
struct dmar_ctx *ctx;
unit = arg;
DMAR_LOCK(unit);
@ -826,7 +825,6 @@ dmar_bus_task_dmamap(void *arg, int pending)
TAILQ_REMOVE(&unit->delayed_maps, map, delay_link);
DMAR_UNLOCK(unit);
tag = map->tag;
ctx = map->tag->ctx;
map->cansleep = true;
map->locked = false;
bus_dmamap_load_mem((bus_dma_tag_t)tag, (bus_dmamap_t)map,
@ -847,9 +845,7 @@ dmar_bus_task_dmamap(void *arg, int pending)
static void
dmar_bus_schedule_dmamap(struct dmar_unit *unit, struct bus_dmamap_dmar *map)
{
struct dmar_ctx *ctx;
ctx = map->tag->ctx;
map->locked = false;
DMAR_LOCK(unit);
TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link);

View File

@ -108,7 +108,7 @@ static void
ctx_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
dmar_gaddr_t addr)
{
vm_page_t m, m1;
vm_page_t m1;
dmar_pte_t *pte;
struct sf_buf *sf;
dmar_gaddr_t f, pg_sz;
@ -118,7 +118,7 @@ ctx_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
VM_OBJECT_ASSERT_LOCKED(tbl->pgtbl_obj);
if (addr >= tbl->maxaddr)
return;
m = dmar_pgalloc(tbl->pgtbl_obj, idx, DMAR_PGF_OBJL | DMAR_PGF_WAITOK |
(void)dmar_pgalloc(tbl->pgtbl_obj, idx, DMAR_PGF_OBJL | DMAR_PGF_WAITOK |
DMAR_PGF_ZERO);
base = idx * DMAR_NPTEPG + 1; /* Index of the first child page of idx */
pg_sz = pglvl_page_size(tbl->pglvl, lvl);
@ -598,7 +598,7 @@ ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base,
dmar_pte_t *pte;
struct sf_buf *sf;
vm_pindex_t idx;
dmar_gaddr_t pg_sz, base1, size1;
dmar_gaddr_t pg_sz;
int lvl;
DMAR_CTX_ASSERT_PGLOCKED(ctx);
@ -625,8 +625,6 @@ ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base,
KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags));
pg_sz = 0; /* silence gcc */
base1 = base;
size1 = size;
flags |= DMAR_PGF_OBJL;
TD_PREP_PINNED_ASSERT;