Reduce number of opens by REOM RAID during provider taste.
Instead opening/closing provider by each of metadata classes, do it only once in core code. Since for SCSI disks open/close means sending some SCSI commands to the device, this change reduces taste time. MFC after: 2 weeks Sponsored by: iXsystems, Inc.
This commit is contained in:
parent
12ae7f74a0
commit
b384f8775f
@ -2251,6 +2251,8 @@ g_raid_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
|
||||
return (NULL);
|
||||
G_RAID_DEBUG(2, "Tasting provider %s.", pp->name);
|
||||
|
||||
geom = NULL;
|
||||
status = G_RAID_MD_TASTE_FAIL;
|
||||
gp = g_new_geomf(mp, "raid:taste");
|
||||
/*
|
||||
* This orphan function should be never called.
|
||||
@ -2259,8 +2261,9 @@ g_raid_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
|
||||
cp = g_new_consumer(gp);
|
||||
cp->flags |= G_CF_DIRECT_RECEIVE;
|
||||
g_attach(cp, pp);
|
||||
if (g_access(cp, 1, 0, 0) != 0)
|
||||
goto ofail;
|
||||
|
||||
geom = NULL;
|
||||
LIST_FOREACH(class, &g_raid_md_classes, mdc_list) {
|
||||
if (!class->mdc_enable)
|
||||
continue;
|
||||
@ -2276,6 +2279,9 @@ g_raid_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
|
||||
break;
|
||||
}
|
||||
|
||||
if (status == G_RAID_MD_TASTE_FAIL)
|
||||
(void)g_access(cp, -1, 0, 0);
|
||||
ofail:
|
||||
g_detach(cp);
|
||||
g_destroy_consumer(cp);
|
||||
g_destroy_geom(gp);
|
||||
|
@ -2120,13 +2120,10 @@ g_raid_md_taste_ddf(struct g_raid_md_object *md, struct g_class *mp,
|
||||
pp = cp->provider;
|
||||
|
||||
/* Read metadata from device. */
|
||||
if (g_access(cp, 1, 0, 0) != 0)
|
||||
return (G_RAID_MD_TASTE_FAIL);
|
||||
g_topology_unlock();
|
||||
bzero(&meta, sizeof(meta));
|
||||
error = ddf_meta_read(cp, &meta);
|
||||
g_topology_lock();
|
||||
g_access(cp, -1, 0, 0);
|
||||
if (error != 0)
|
||||
return (G_RAID_MD_TASTE_FAIL);
|
||||
be = meta.bigendian;
|
||||
@ -2164,6 +2161,9 @@ g_raid_md_taste_ddf(struct g_raid_md_object *md, struct g_class *mp,
|
||||
geom = sc->sc_geom;
|
||||
}
|
||||
|
||||
/* There is no return after this point, so we close passed consumer. */
|
||||
g_access(cp, -1, 0, 0);
|
||||
|
||||
rcp = g_new_consumer(geom);
|
||||
rcp->flags |= G_CF_DIRECT_RECEIVE;
|
||||
g_attach(rcp, pp);
|
||||
|
@ -1382,8 +1382,6 @@ g_raid_md_taste_intel(struct g_raid_md_object *md, struct g_class *mp,
|
||||
meta = NULL;
|
||||
vendor = 0xffff;
|
||||
disk_pos = 0;
|
||||
if (g_access(cp, 1, 0, 0) != 0)
|
||||
return (G_RAID_MD_TASTE_FAIL);
|
||||
g_topology_unlock();
|
||||
error = g_raid_md_get_label(cp, serial, sizeof(serial));
|
||||
if (error != 0) {
|
||||
@ -1396,7 +1394,6 @@ g_raid_md_taste_intel(struct g_raid_md_object *md, struct g_class *mp,
|
||||
g_io_getattr("GEOM::hba_vendor", cp, &len, &vendor);
|
||||
meta = intel_meta_read(cp);
|
||||
g_topology_lock();
|
||||
g_access(cp, -1, 0, 0);
|
||||
if (meta == NULL) {
|
||||
if (g_raid_aggressive_spare) {
|
||||
if (vendor != 0x8086) {
|
||||
@ -1476,6 +1473,9 @@ search:
|
||||
G_RAID_DEBUG1(1, sc, "root_mount_hold %p", mdi->mdio_rootmount);
|
||||
}
|
||||
|
||||
/* There is no return after this point, so we close passed consumer. */
|
||||
g_access(cp, -1, 0, 0);
|
||||
|
||||
rcp = g_new_consumer(geom);
|
||||
rcp->flags |= G_CF_DIRECT_RECEIVE;
|
||||
g_attach(rcp, pp);
|
||||
@ -1512,7 +1512,6 @@ search:
|
||||
return (result);
|
||||
fail2:
|
||||
g_topology_lock();
|
||||
g_access(cp, -1, 0, 0);
|
||||
fail1:
|
||||
free(meta, M_MD_INTEL);
|
||||
return (G_RAID_MD_TASTE_FAIL);
|
||||
|
@ -837,15 +837,12 @@ g_raid_md_taste_jmicron(struct g_raid_md_object *md, struct g_class *mp,
|
||||
/* Read metadata from device. */
|
||||
meta = NULL;
|
||||
vendor = 0xffff;
|
||||
if (g_access(cp, 1, 0, 0) != 0)
|
||||
return (G_RAID_MD_TASTE_FAIL);
|
||||
g_topology_unlock();
|
||||
len = 2;
|
||||
if (pp->geom->rank == 1)
|
||||
g_io_getattr("GEOM::hba_vendor", cp, &len, &vendor);
|
||||
meta = jmicron_meta_read(cp);
|
||||
g_topology_lock();
|
||||
g_access(cp, -1, 0, 0);
|
||||
if (meta == NULL) {
|
||||
if (g_raid_aggressive_spare) {
|
||||
if (vendor == 0x197b) {
|
||||
@ -922,6 +919,9 @@ search:
|
||||
G_RAID_DEBUG1(1, sc, "root_mount_hold %p", mdi->mdio_rootmount);
|
||||
}
|
||||
|
||||
/* There is no return after this point, so we close passed consumer. */
|
||||
g_access(cp, -1, 0, 0);
|
||||
|
||||
rcp = g_new_consumer(geom);
|
||||
rcp->flags |= G_CF_DIRECT_RECEIVE;
|
||||
g_attach(rcp, pp);
|
||||
|
@ -841,15 +841,12 @@ g_raid_md_taste_nvidia(struct g_raid_md_object *md, struct g_class *mp,
|
||||
/* Read metadata from device. */
|
||||
meta = NULL;
|
||||
vendor = 0xffff;
|
||||
if (g_access(cp, 1, 0, 0) != 0)
|
||||
return (G_RAID_MD_TASTE_FAIL);
|
||||
g_topology_unlock();
|
||||
len = 2;
|
||||
if (pp->geom->rank == 1)
|
||||
g_io_getattr("GEOM::hba_vendor", cp, &len, &vendor);
|
||||
meta = nvidia_meta_read(cp);
|
||||
g_topology_lock();
|
||||
g_access(cp, -1, 0, 0);
|
||||
if (meta == NULL) {
|
||||
if (g_raid_aggressive_spare) {
|
||||
if (vendor == 0x10de) {
|
||||
@ -918,6 +915,9 @@ search:
|
||||
G_RAID_DEBUG1(1, sc, "root_mount_hold %p", mdi->mdio_rootmount);
|
||||
}
|
||||
|
||||
/* There is no return after this point, so we close passed consumer. */
|
||||
g_access(cp, -1, 0, 0);
|
||||
|
||||
rcp = g_new_consumer(geom);
|
||||
rcp->flags |= G_CF_DIRECT_RECEIVE;
|
||||
g_attach(rcp, pp);
|
||||
|
@ -1106,15 +1106,12 @@ g_raid_md_taste_promise(struct g_raid_md_object *md, struct g_class *mp,
|
||||
/* Read metadata from device. */
|
||||
meta = NULL;
|
||||
vendor = 0xffff;
|
||||
if (g_access(cp, 1, 0, 0) != 0)
|
||||
return (G_RAID_MD_TASTE_FAIL);
|
||||
g_topology_unlock();
|
||||
len = 2;
|
||||
if (pp->geom->rank == 1)
|
||||
g_io_getattr("GEOM::hba_vendor", cp, &len, &vendor);
|
||||
subdisks = promise_meta_read(cp, metaarr);
|
||||
g_topology_lock();
|
||||
g_access(cp, -1, 0, 0);
|
||||
if (subdisks == 0) {
|
||||
if (g_raid_aggressive_spare) {
|
||||
if (vendor == 0x105a || vendor == 0x1002) {
|
||||
@ -1175,6 +1172,9 @@ search:
|
||||
geom = sc->sc_geom;
|
||||
}
|
||||
|
||||
/* There is no return after this point, so we close passed consumer. */
|
||||
g_access(cp, -1, 0, 0);
|
||||
|
||||
rcp = g_new_consumer(geom);
|
||||
rcp->flags |= G_CF_DIRECT_RECEIVE;
|
||||
g_attach(rcp, pp);
|
||||
|
@ -923,15 +923,12 @@ g_raid_md_taste_sii(struct g_raid_md_object *md, struct g_class *mp,
|
||||
/* Read metadata from device. */
|
||||
meta = NULL;
|
||||
vendor = 0xffff;
|
||||
if (g_access(cp, 1, 0, 0) != 0)
|
||||
return (G_RAID_MD_TASTE_FAIL);
|
||||
g_topology_unlock();
|
||||
len = 2;
|
||||
if (pp->geom->rank == 1)
|
||||
g_io_getattr("GEOM::hba_vendor", cp, &len, &vendor);
|
||||
meta = sii_meta_read(cp);
|
||||
g_topology_lock();
|
||||
g_access(cp, -1, 0, 0);
|
||||
if (meta == NULL) {
|
||||
if (g_raid_aggressive_spare) {
|
||||
if (vendor == 0x1095) {
|
||||
@ -1011,6 +1008,9 @@ search:
|
||||
G_RAID_DEBUG1(1, sc, "root_mount_hold %p", mdi->mdio_rootmount);
|
||||
}
|
||||
|
||||
/* There is no return after this point, so we close passed consumer. */
|
||||
g_access(cp, -1, 0, 0);
|
||||
|
||||
rcp = g_new_consumer(geom);
|
||||
rcp->flags |= G_CF_DIRECT_RECEIVE;
|
||||
g_attach(rcp, pp);
|
||||
|
Loading…
x
Reference in New Issue
Block a user