Don't grab Giant around malloc(9) and free(9).

Don't grab Giant around wakeup(9).
Don't print verbose messages about each device found in geom_dev.
Various cleanups.

Sponsored by: DARPA & NAI Labs.
This commit is contained in:
phk 2002-05-20 10:03:15 +00:00
parent 110842c75e
commit 0d0d0abc10
4 changed files with 44 additions and 59 deletions

View File

@ -251,9 +251,7 @@ g_malloc(int size, int flags)
{
void *p;
mtx_lock(&Giant);
p = malloc(size, M_GEOM, flags);
mtx_unlock(&Giant);
g_sanity(p);
/* printf("malloc(%d, %x) -> %p\n", size, flags, p); */
return (p);
@ -264,15 +262,28 @@ g_free(void *ptr)
{
g_sanity(ptr);
/* printf("free(%p)\n", ptr); */
mtx_lock(&Giant);
free(ptr, M_GEOM);
mtx_unlock(&Giant);
}
extern struct sx topology_lock;
#define g_topology_lock() do { mtx_assert(&Giant, MA_NOTOWNED); sx_xlock(&topology_lock); } while (0)
#define g_topology_unlock() do { g_sanity(NULL); sx_xunlock(&topology_lock); } while (0)
#define g_topology_assert() do { g_sanity(NULL); sx_assert(&topology_lock, SX_XLOCKED); } while (0)
#define g_topology_lock() \
do { \
mtx_assert(&Giant, MA_NOTOWNED); \
sx_xlock(&topology_lock); \
} while (0)
#define g_topology_unlock() \
do { \
g_sanity(NULL); \
sx_xunlock(&topology_lock); \
} while (0)
#define g_topology_assert() \
do { \
g_sanity(NULL); \
sx_assert(&topology_lock, SX_XLOCKED); \
} while (0)
#define DECLARE_GEOM_CLASS(class, name) \
static void \

View File

@ -114,9 +114,9 @@ g_dev_register_cloner(void *foo __unused)
{
static int once;
/* XXX: why would this happen more than once ?? */
if (!once) {
if (!once)
EVENTHANDLER_REGISTER(dev_clone, g_dev_clone, 0, 1000);
EVENTHANDLER_REGISTER(dev_clone, g_dev_clone, 0, 1000);
once++;
}
}
@ -129,9 +129,7 @@ g_dev_taste(struct g_class *mp, struct g_provider *pp, int insist __unused)
struct g_geom *gp;
struct g_consumer *cp;
static int unit;
u_int secsize;
off_t mediasize;
int error, j;
int error;
dev_t dev;
g_trace(G_T_TOPOLOGY, "dev_taste(%s,%s)", mp->name, pp->name);
@ -142,45 +140,23 @@ g_dev_taste(struct g_class *mp, struct g_provider *pp, int insist __unused)
gp = g_new_geomf(mp, pp->name);
gp->orphan = g_dev_orphan;
cp = g_new_consumer(gp);
g_attach(cp, pp);
error = g_access_rel(cp, 1, 0, 0);
error = g_attach(cp, pp);
KASSERT(error == 0,
("g_dev_taste(%s) failed to g_attach, err=%d", pp->name, error));
/*
* XXX: I'm not 100% sure we can call make_dev(9) without Giant
* yet. Once we can, we don't need to drop topology here either.
*/
g_topology_unlock();
if (!error) {
j = sizeof secsize;
error = g_io_getattr("GEOM::sectorsize", cp, &j, &secsize);
if (error) {
secsize = 512;
printf("g_dev_taste: error %d Sectors are %d bytes\n",
error, secsize);
}
j = sizeof mediasize;
error = g_io_getattr("GEOM::mediasize", cp, &j, &mediasize);
if (error) {
mediasize = 0;
printf("g_dev_taste: error %d Mediasize is %lld bytes\n",
error, (long long)mediasize);
}
g_topology_lock();
g_access_rel(cp, -1, 0, 0);
g_topology_unlock();
} else {
secsize = 512;
mediasize = 0;
}
mtx_lock(&Giant);
if (mediasize != 0)
printf("GEOM: \"%s\" %lld bytes in %lld sectors of %u bytes\n",
pp->name, (long long)mediasize,
(long long)mediasize / secsize, secsize);
else
printf("GEOM: \"%s\" (size unavailable)\n", pp->name);
dev = make_dev(&g_dev_cdevsw, unit++,
dev = make_dev(&g_dev_cdevsw, unit2minor(unit++),
UID_ROOT, GID_WHEEL, 0600, gp->name);
mtx_unlock(&Giant);
g_topology_lock();
gp->softc = dev;
dev->si_drv1 = gp;
dev->si_drv2 = cp;
mtx_unlock(&Giant);
g_topology_lock();
return (gp);
}
@ -398,6 +374,16 @@ g_dev_strategy(struct bio *bp)
g_io_request(bp2, cp);
}
/*
* g_dev_orphan()
*
* Called from below when the provider orphaned us. It is our responsibility
* to get the access counts back to zero, until we do so the stack below will
* not unravel. We must clear the kernel-dump settings, if this is the
* current dumpdev. We call destroy_dev(9) to send our dev_t the way of
* punched cards and if we have non-zero access counts, we call down with
* them negated before we detattch and selfdestruct.
*/
static void
g_dev_orphan(struct g_consumer *cp)
@ -413,6 +399,7 @@ g_dev_orphan(struct g_consumer *cp)
dev = gp->softc;
if (dev->si_flags & SI_DUMPDEV)
set_dumper(NULL);
/* XXX: we may need Giant for now */
destroy_dev(dev);
if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
g_access_rel(cp, -cp->acr, -cp->acw, -cp->ace);
@ -421,5 +408,4 @@ g_dev_orphan(struct g_consumer *cp)
g_destroy_geom(gp);
}
DECLARE_GEOM_CLASS(g_dev_class, g_dev)
DECLARE_GEOM_CLASS(g_dev_class, g_dev);

View File

@ -98,9 +98,7 @@ g_orphan_provider(struct g_provider *pp, int error)
mtx_lock(&g_doorlock);
TAILQ_INSERT_TAIL(&g_doorstep, pp, orphan);
mtx_unlock(&g_doorlock);
mtx_lock(&Giant);
wakeup(&g_wait_event);
mtx_unlock(&Giant);
}
/*
@ -238,9 +236,7 @@ one_event(void)
g_do_event(ep);
g_pending_events--;
if (g_pending_events == 0) {
mtx_lock(&Giant);
wakeup(&g_pending_events);
mtx_unlock(&Giant);
}
g_topology_unlock();
g_destroy_event(ep);
@ -287,9 +283,7 @@ g_post_event(enum g_events ev, struct g_class *mp, struct g_geom *gp, struct g_p
}
g_pending_events++;
TAILQ_INSERT_TAIL(&g_events, ep, events);
mtx_lock(&Giant);
wakeup(&g_wait_event);
mtx_unlock(&Giant);
}
void

View File

@ -302,9 +302,7 @@ g_io_request(struct bio *bp, struct g_consumer *cp)
bp, bp->bio_from, bp->bio_from->geom->name,
bp->bio_to, bp->bio_to->name, bp->bio_cmd);
g_bioq_enqueue_tail(bp, &g_bio_run_down);
mtx_lock(&Giant);
wakeup(&g_wait_down);
mtx_unlock(&Giant);
}
void
@ -319,9 +317,7 @@ g_io_deliver(struct bio *bp)
g_bioq_enqueue_tail(bp, &g_bio_run_up);
mtx_lock(&Giant);
wakeup(&g_wait_up);
mtx_unlock(&Giant);
}
void
@ -355,9 +351,7 @@ g_io_schedule_up(struct thread *tp __unused)
if (bp->bio_done != NULL) {
bp->bio_done(bp);
} else {
mtx_lock(&Giant);
wakeup(bp);
mtx_unlock(&Giant);
}
}
}