Introduce and use delayed-destruction functionality from a pre-sync hook,

which means that devices will be destroyed on last close.

This fixes destruction order problems when, eg. RAID3 array is build on
top of RAID1 arrays.

Requested, reviewed and tested by:	ru
MFC after:	2 weeks
This commit is contained in:
Pawel Jakub Dawidek 2006-04-10 10:32:22 +00:00
parent 281f5eff33
commit 712fe9bd7a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=157630
6 changed files with 174 additions and 111 deletions

View File

@ -78,7 +78,7 @@ SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN,
G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \
} while (0)
static eventhandler_tag g_mirror_pre_sync = NULL, g_mirror_post_sync = NULL;
static eventhandler_tag g_mirror_pre_sync = NULL;
static int g_mirror_destroy_geom(struct gctl_req *req, struct g_class *mp,
struct g_geom *gp);
@ -2697,11 +2697,37 @@ g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp,
return (0);
}
static void
g_mirror_destroy_delayed(void *arg, int flag)
{
struct g_mirror_softc *sc;
int error;
if (flag == EV_CANCEL) {
G_MIRROR_DEBUG(1, "Destroying canceled.");
return;
}
sc = arg;
g_topology_unlock();
sx_xlock(&sc->sc_lock);
KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0,
("DESTROY flag set on %s.", sc->sc_name));
KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0,
("DESTROYING flag not set on %s.", sc->sc_name));
G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name);
error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT);
if (error != 0) {
G_MIRROR_DEBUG(0, "Cannot destroy %s.", sc->sc_name);
sx_xunlock(&sc->sc_lock);
}
g_topology_lock();
}
static int
g_mirror_access(struct g_provider *pp, int acr, int acw, int ace)
{
struct g_mirror_softc *sc;
int dcr, dcw, dce;
int dcr, dcw, dce, error = 0;
g_topology_assert();
G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
@ -2712,21 +2738,32 @@ g_mirror_access(struct g_provider *pp, int acr, int acw, int ace)
dce = pp->ace + ace;
sc = pp->geom->softc;
if (sc == NULL || LIST_EMPTY(&sc->sc_disks) ||
(sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
if (acr <= 0 && acw <= 0 && ace <= 0)
return (0);
else
return (ENXIO);
}
if (dcw == 0 && !sc->sc_idle) {
KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
g_topology_unlock();
sx_xlock(&sc->sc_lock);
if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 ||
LIST_EMPTY(&sc->sc_disks)) {
if (acr > 0 || acw > 0 || ace > 0)
error = ENXIO;
goto end;
}
if (dcw == 0 && !sc->sc_idle)
g_mirror_idle(sc, dcw);
if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0) {
if (acr > 0 || acw > 0 || ace > 0) {
error = ENXIO;
goto end;
}
if (dcr == 0 && dcw == 0 && dce == 0) {
g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK,
sc, NULL);
}
}
end:
sx_xunlock(&sc->sc_lock);
g_topology_lock();
}
return (0);
return (error);
}
static struct g_geom *
@ -2813,8 +2850,9 @@ g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md)
}
int
g_mirror_destroy(struct g_mirror_softc *sc, boolean_t force)
g_mirror_destroy(struct g_mirror_softc *sc, int how)
{
struct g_mirror_disk *disk;
struct g_provider *pp;
g_topology_assert_not();
@ -2824,14 +2862,27 @@ g_mirror_destroy(struct g_mirror_softc *sc, boolean_t force)
pp = sc->sc_provider;
if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
if (force) {
G_MIRROR_DEBUG(1, "Device %s is still open, so it "
"can't be definitely removed.", pp->name);
} else {
switch (how) {
case G_MIRROR_DESTROY_SOFT:
G_MIRROR_DEBUG(1,
"Device %s is still open (r%dw%de%d).", pp->name,
pp->acr, pp->acw, pp->ace);
return (EBUSY);
case G_MIRROR_DESTROY_DELAYED:
G_MIRROR_DEBUG(1,
"Device %s will be destroyed on last close.",
pp->name);
LIST_FOREACH(disk, &sc->sc_disks, d_next) {
if (disk->d_state ==
G_MIRROR_DISK_STATE_SYNCHRONIZING) {
g_mirror_sync_stop(disk, 1);
}
}
sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROYING;
return (EBUSY);
case G_MIRROR_DESTROY_HARD:
G_MIRROR_DEBUG(1, "Device %s is still open, so it "
"can't be definitely removed.", pp->name);
}
}
@ -2937,6 +2988,7 @@ g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
pp->name, gp->name, error);
if (LIST_EMPTY(&sc->sc_disks)) {
g_cancel_event(sc);
g_mirror_destroy(sc, 1);
g_topology_lock();
return (NULL);
@ -2958,6 +3010,7 @@ g_mirror_destroy_geom(struct gctl_req *req __unused,
g_topology_unlock();
sc = gp->softc;
sx_xlock(&sc->sc_lock);
g_cancel_event(sc);
error = g_mirror_destroy(gp->softc, 0);
if (error != 0)
sx_xunlock(&sc->sc_lock);
@ -3087,7 +3140,7 @@ g_mirror_shutdown_pre_sync(void *arg, int howto)
struct g_class *mp;
struct g_geom *gp, *gp2;
struct g_mirror_softc *sc;
struct g_mirror_disk *disk;
int error;
mp = arg;
DROP_GIANT();
@ -3095,12 +3148,14 @@ g_mirror_shutdown_pre_sync(void *arg, int howto)
LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
if ((sc = gp->softc) == NULL)
continue;
/* Skip synchronization geom. */
if (gp == sc->sc_sync.ds_geom)
continue;
g_topology_unlock();
sx_xlock(&sc->sc_lock);
LIST_FOREACH(disk, &sc->sc_disks, d_next) {
if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
g_mirror_sync_stop(disk, 1);
}
g_cancel_event(sc);
error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED);
if (error != 0)
sx_xunlock(&sc->sc_lock);
g_topology_lock();
}
@ -3108,40 +3163,13 @@ g_mirror_shutdown_pre_sync(void *arg, int howto)
PICKUP_GIANT();
}
static void
g_mirror_shutdown_post_sync(void *arg, int howto)
{
struct g_class *mp;
struct g_geom *gp, *gp2;
struct g_mirror_softc *sc;
mp = arg;
DROP_GIANT();
g_topology_lock();
LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
if ((sc = gp->softc) == NULL)
continue;
g_topology_unlock();
sx_xlock(&sc->sc_lock);
g_mirror_destroy(sc, 1);
g_topology_lock();
}
g_topology_unlock();
PICKUP_GIANT();
#if 0
tsleep(&gp, PRIBIO, "m:shutdown", hz * 20);
#endif
}
static void
g_mirror_init(struct g_class *mp)
{
g_mirror_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
g_mirror_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
g_mirror_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync,
g_mirror_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST);
if (g_mirror_pre_sync == NULL || g_mirror_post_sync == NULL)
if (g_mirror_pre_sync == NULL)
G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event.");
}
@ -3151,8 +3179,6 @@ g_mirror_fini(struct g_class *mp)
if (g_mirror_pre_sync != NULL)
EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_mirror_pre_sync);
if (g_mirror_post_sync != NULL)
EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_mirror_post_sync);
}
DECLARE_GEOM_CLASS(g_mirror_class, g_mirror);

View File

@ -153,6 +153,7 @@ struct g_mirror_event {
#define G_MIRROR_DEVICE_FLAG_DESTROY 0x0100000000000000ULL
#define G_MIRROR_DEVICE_FLAG_WAIT 0x0200000000000000ULL
#define G_MIRROR_DEVICE_FLAG_DESTROYING 0x0400000000000000ULL
#define G_MIRROR_DEVICE_STATE_STARTING 0
#define G_MIRROR_DEVICE_STATE_RUNNING 1
@ -209,7 +210,10 @@ struct g_mirror_softc {
#define sc_name sc_geom->name
u_int g_mirror_ndisks(struct g_mirror_softc *sc, int state);
int g_mirror_destroy(struct g_mirror_softc *sc, boolean_t force);
#define G_MIRROR_DESTROY_SOFT 0
#define G_MIRROR_DESTROY_DELAYED 1
#define G_MIRROR_DESTROY_HARD 2
int g_mirror_destroy(struct g_mirror_softc *sc, int how);
int g_mirror_event_send(void *arg, int state, int flags);
struct g_mirror_metadata;
int g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp,

View File

@ -628,6 +628,7 @@ g_mirror_ctl_stop(struct gctl_req *req, struct g_class *mp)
const char *name;
char param[16];
u_int i;
int how;
nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
if (nargs == NULL) {
@ -643,6 +644,10 @@ g_mirror_ctl_stop(struct gctl_req *req, struct g_class *mp)
gctl_error(req, "No '%s' argument.", "force");
return;
}
if (*force)
how = G_MIRROR_DESTROY_HARD;
else
how = G_MIRROR_DESTROY_SOFT;
for (i = 0; i < (u_int)*nargs; i++) {
snprintf(param, sizeof(param), "arg%u", i);
@ -656,7 +661,8 @@ g_mirror_ctl_stop(struct gctl_req *req, struct g_class *mp)
gctl_error(req, "No such device: %s.", name);
return;
}
error = g_mirror_destroy(sc, *force);
g_cancel_event(sc);
error = g_mirror_destroy(sc, how);
if (error != 0) {
gctl_error(req, "Cannot destroy device %s (error=%d).",
sc->sc_geom->name, error);

View File

@ -97,7 +97,7 @@ SYSCTL_UINT(_kern_geom_raid3_stat, OID_AUTO, parity_mismatch, CTLFLAG_RD,
G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \
} while (0)
static eventhandler_tag g_raid3_pre_sync = NULL, g_raid3_post_sync = NULL;
static eventhandler_tag g_raid3_pre_sync = NULL;
static int g_raid3_destroy_geom(struct gctl_req *req, struct g_class *mp,
struct g_geom *gp);
@ -2896,11 +2896,37 @@ g_raid3_add_disk(struct g_raid3_softc *sc, struct g_provider *pp,
return (0);
}
static void
g_raid3_destroy_delayed(void *arg, int flag)
{
struct g_raid3_softc *sc;
int error;
if (flag == EV_CANCEL) {
G_RAID3_DEBUG(1, "Destroying canceled.");
return;
}
sc = arg;
g_topology_unlock();
sx_xlock(&sc->sc_lock);
KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) == 0,
("DESTROY flag set on %s.", sc->sc_name));
KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0,
("DESTROYING flag not set on %s.", sc->sc_name));
G_RAID3_DEBUG(0, "Destroying %s (delayed).", sc->sc_name);
error = g_raid3_destroy(sc, G_RAID3_DESTROY_SOFT);
if (error != 0) {
G_RAID3_DEBUG(0, "Cannot destroy %s.", sc->sc_name);
sx_xunlock(&sc->sc_lock);
}
g_topology_lock();
}
static int
g_raid3_access(struct g_provider *pp, int acr, int acw, int ace)
{
struct g_raid3_softc *sc;
int dcr, dcw, dce, error;
int dcr, dcw, dce, error = 0;
g_topology_assert();
G_RAID3_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
@ -2910,17 +2936,12 @@ g_raid3_access(struct g_provider *pp, int acr, int acw, int ace)
dcw = pp->acw + acw;
dce = pp->ace + ace;
error = 0;
sc = pp->geom->softc;
if (sc != NULL) {
if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0)
sc = NULL;
else {
KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
g_topology_unlock();
sx_xlock(&sc->sc_lock);
}
}
if (sc == NULL ||
if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0 ||
g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < sc->sc_ndisks - 1) {
if (acr > 0 || acw > 0 || ace > 0)
error = ENXIO;
@ -2928,11 +2949,19 @@ g_raid3_access(struct g_provider *pp, int acr, int acw, int ace)
}
if (dcw == 0 && !sc->sc_idle)
g_raid3_idle(sc, dcw);
if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0) {
if (acr > 0 || acw > 0 || ace > 0) {
error = ENXIO;
goto end;
}
if (dcr == 0 && dcw == 0 && dce == 0) {
g_post_event(g_raid3_destroy_delayed, sc, M_WAITOK,
sc, NULL);
}
}
end:
if (sc != NULL) {
sx_xunlock(&sc->sc_lock);
g_topology_lock();
}
return (error);
}
@ -3049,7 +3078,7 @@ g_raid3_create(struct g_class *mp, const struct g_raid3_metadata *md)
}
int
g_raid3_destroy(struct g_raid3_softc *sc, boolean_t force)
g_raid3_destroy(struct g_raid3_softc *sc, int how)
{
struct g_provider *pp;
@ -3060,14 +3089,24 @@ g_raid3_destroy(struct g_raid3_softc *sc, boolean_t force)
pp = sc->sc_provider;
if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
if (force) {
G_RAID3_DEBUG(1, "Device %s is still open, so it "
"can't be definitely removed.", pp->name);
} else {
switch (how) {
case G_RAID3_DESTROY_SOFT:
G_RAID3_DEBUG(1,
"Device %s is still open (r%dw%de%d).", pp->name,
pp->acr, pp->acw, pp->ace);
return (EBUSY);
case G_RAID3_DESTROY_DELAYED:
G_RAID3_DEBUG(1,
"Device %s will be destroyed on last close.",
pp->name);
if (sc->sc_syncdisk != NULL)
g_raid3_sync_stop(sc, 1);
sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROYING;
return (EBUSY);
case G_RAID3_DESTROY_HARD:
G_RAID3_DEBUG(1, "Device %s is still open, so it "
"can't be definitely removed.", pp->name);
break;
}
}
@ -3168,6 +3207,7 @@ g_raid3_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
pp->name, gp->name, error);
if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NODISK) ==
sc->sc_ndisks) {
g_cancel_event(sc);
g_raid3_destroy(sc, 1);
g_topology_lock();
return (NULL);
@ -3189,6 +3229,7 @@ g_raid3_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused,
g_topology_unlock();
sc = gp->softc;
sx_xlock(&sc->sc_lock);
g_cancel_event(sc);
error = g_raid3_destroy(gp->softc, 0);
if (error != 0)
sx_xunlock(&sc->sc_lock);
@ -3325,6 +3366,7 @@ g_raid3_shutdown_pre_sync(void *arg, int howto)
struct g_class *mp;
struct g_geom *gp, *gp2;
struct g_raid3_softc *sc;
int error;
mp = arg;
DROP_GIANT();
@ -3332,10 +3374,14 @@ g_raid3_shutdown_pre_sync(void *arg, int howto)
LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
if ((sc = gp->softc) == NULL)
continue;
/* Skip synchronization geom. */
if (gp == sc->sc_sync.ds_geom)
continue;
g_topology_unlock();
sx_xlock(&sc->sc_lock);
if (sc->sc_syncdisk != NULL)
g_raid3_sync_stop(sc, 1);
g_cancel_event(sc);
error = g_raid3_destroy(sc, G_RAID3_DESTROY_DELAYED);
if (error != 0)
sx_xunlock(&sc->sc_lock);
g_topology_lock();
}
@ -3343,40 +3389,13 @@ g_raid3_shutdown_pre_sync(void *arg, int howto)
PICKUP_GIANT();
}
static void
g_raid3_shutdown_post_sync(void *arg, int howto)
{
struct g_class *mp;
struct g_geom *gp, *gp2;
struct g_raid3_softc *sc;
mp = arg;
DROP_GIANT();
g_topology_lock();
LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
if ((sc = gp->softc) == NULL)
continue;
g_topology_unlock();
sx_xlock(&sc->sc_lock);
g_raid3_destroy(sc, 1);
g_topology_lock();
}
g_topology_unlock();
PICKUP_GIANT();
#if 0
tsleep(&gp, PRIBIO, "r3:shutdown", hz * 20);
#endif
}
static void
g_raid3_init(struct g_class *mp)
{
g_raid3_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
g_raid3_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
g_raid3_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync,
g_raid3_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST);
if (g_raid3_pre_sync == NULL || g_raid3_post_sync == NULL)
if (g_raid3_pre_sync == NULL)
G_RAID3_DEBUG(0, "Warning! Cannot register shutdown event.");
}
@ -3386,8 +3405,6 @@ g_raid3_fini(struct g_class *mp)
if (g_raid3_pre_sync != NULL)
EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_raid3_pre_sync);
if (g_raid3_post_sync != NULL)
EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_raid3_post_sync);
}
DECLARE_GEOM_CLASS(g_raid3_class, g_raid3);

View File

@ -160,6 +160,7 @@ struct g_raid3_event {
#define G_RAID3_DEVICE_FLAG_DESTROY 0x0100000000000000ULL
#define G_RAID3_DEVICE_FLAG_WAIT 0x0200000000000000ULL
#define G_RAID3_DEVICE_FLAG_DESTROYING 0x0400000000000000ULL
#define G_RAID3_DEVICE_STATE_STARTING 0
#define G_RAID3_DEVICE_STATE_DEGRADED 1
@ -243,7 +244,10 @@ struct g_raid3_softc {
const char *g_raid3_get_diskname(struct g_raid3_disk *disk);
u_int g_raid3_ndisks(struct g_raid3_softc *sc, int state);
int g_raid3_destroy(struct g_raid3_softc *sc, boolean_t force);
#define G_RAID3_DESTROY_SOFT 0
#define G_RAID3_DESTROY_DELAYED 1
#define G_RAID3_DESTROY_HARD 2
int g_raid3_destroy(struct g_raid3_softc *sc, int how);
int g_raid3_event_send(void *arg, int state, int flags);
struct g_raid3_metadata;
int g_raid3_add_disk(struct g_raid3_softc *sc, struct g_provider *pp,

View File

@ -310,6 +310,7 @@ g_raid3_ctl_stop(struct gctl_req *req, struct g_class *mp)
const char *name;
char param[16];
u_int i;
int how;
nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
if (nargs == NULL) {
@ -325,6 +326,10 @@ g_raid3_ctl_stop(struct gctl_req *req, struct g_class *mp)
gctl_error(req, "No '%s' argument.", "force");
return;
}
if (*force)
how = G_RAID3_DESTROY_HARD;
else
how = G_RAID3_DESTROY_SOFT;
for (i = 0; i < (u_int)*nargs; i++) {
snprintf(param, sizeof(param), "arg%u", i);
@ -338,7 +343,8 @@ g_raid3_ctl_stop(struct gctl_req *req, struct g_class *mp)
gctl_error(req, "No such device: %s.", name);
return;
}
error = g_raid3_destroy(sc, *force);
g_cancel_event(sc);
error = g_raid3_destroy(sc, how);
if (error != 0) {
gctl_error(req, "Cannot destroy device %s (error=%d).",
sc->sc_geom->name, error);