2002-03-11 21:42:35 +00:00
|
|
|
/*-
|
2017-11-27 15:17:37 +00:00
|
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
*
|
2002-03-11 21:42:35 +00:00
|
|
|
* Copyright (c) 2002 Poul-Henning Kamp
|
|
|
|
* Copyright (c) 2002 Networks Associates Technology, Inc.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This software was developed for the FreeBSD Project by Poul-Henning Kamp
|
|
|
|
* and NAI Labs, the Security Research Division of Network Associates, Inc.
|
|
|
|
* under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
|
|
|
|
* DARPA CHATS research program.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. The names of the authors may not be used to endorse or promote
|
|
|
|
* products derived from this software without specific prior written
|
|
|
|
* permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2003-06-11 06:49:16 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
2002-03-11 21:42:35 +00:00
|
|
|
|
2006-09-15 16:36:45 +00:00
|
|
|
#include "opt_ddb.h"
|
|
|
|
|
2002-03-11 21:42:35 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2003-03-18 09:42:33 +00:00
|
|
|
#include <sys/devicestat.h>
|
2002-03-11 21:42:35 +00:00
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/bio.h>
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/kthread.h>
|
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/errno.h>
|
|
|
|
#include <sys/sbuf.h>
|
|
|
|
#include <geom/geom.h>
|
2002-03-26 22:07:38 +00:00
|
|
|
#include <geom/geom_int.h>
|
2002-03-11 21:42:35 +00:00
|
|
|
#include <machine/stdarg.h>
|
|
|
|
|
2006-09-15 16:36:45 +00:00
|
|
|
#ifdef DDB
|
|
|
|
#include <ddb/ddb.h>
|
|
|
|
#endif
|
|
|
|
|
2010-04-19 20:07:35 +00:00
|
|
|
#ifdef KDB
|
|
|
|
#include <sys/kdb.h>
|
|
|
|
#endif
|
|
|
|
|
2002-04-04 09:41:47 +00:00
|
|
|
struct class_list_head g_classes = LIST_HEAD_INITIALIZER(g_classes);
|
2002-03-11 21:42:35 +00:00
|
|
|
static struct g_tailq_head geoms = TAILQ_HEAD_INITIALIZER(geoms);
|
|
|
|
char *g_wait_event, *g_wait_up, *g_wait_down, *g_wait_sim;
|
|
|
|
|
2003-05-31 18:13:07 +00:00
|
|
|
struct g_hh00 {
|
2012-07-07 20:13:40 +00:00
|
|
|
struct g_class *mp;
|
|
|
|
struct g_provider *pp;
|
|
|
|
off_t size;
|
|
|
|
int error;
|
|
|
|
int post;
|
2003-05-31 18:13:07 +00:00
|
|
|
};
|
2003-04-23 19:34:38 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This event offers a new class a chance to taste all preexisting providers.
|
|
|
|
*/
|
|
|
|
static void
|
2003-05-31 18:13:07 +00:00
|
|
|
g_load_class(void *arg, int flag)
|
2003-04-23 19:34:38 +00:00
|
|
|
{
|
2003-05-31 18:13:07 +00:00
|
|
|
struct g_hh00 *hh;
|
2003-04-23 19:34:38 +00:00
|
|
|
struct g_class *mp2, *mp;
|
|
|
|
struct g_geom *gp;
|
|
|
|
struct g_provider *pp;
|
|
|
|
|
|
|
|
g_topology_assert();
|
2003-05-31 18:13:07 +00:00
|
|
|
if (flag == EV_CANCEL) /* XXX: can't happen ? */
|
2003-04-23 19:34:38 +00:00
|
|
|
return;
|
|
|
|
if (g_shutdown)
|
|
|
|
return;
|
2003-05-31 18:13:07 +00:00
|
|
|
|
|
|
|
hh = arg;
|
|
|
|
mp = hh->mp;
|
2005-03-26 21:07:35 +00:00
|
|
|
hh->error = 0;
|
|
|
|
if (hh->post) {
|
2004-10-22 22:16:24 +00:00
|
|
|
g_free(hh);
|
2005-03-26 21:07:35 +00:00
|
|
|
hh = NULL;
|
|
|
|
}
|
2003-05-31 18:13:07 +00:00
|
|
|
g_trace(G_T_TOPOLOGY, "g_load_class(%s)", mp->name);
|
2004-01-23 21:02:49 +00:00
|
|
|
KASSERT(mp->name != NULL && *mp->name != '\0',
|
|
|
|
("GEOM class has no name"));
|
2003-06-04 17:51:10 +00:00
|
|
|
LIST_FOREACH(mp2, &g_classes, class) {
|
2004-10-22 22:16:24 +00:00
|
|
|
if (mp2 == mp) {
|
|
|
|
printf("The GEOM class %s is already loaded.\n",
|
|
|
|
mp2->name);
|
2005-03-26 21:07:35 +00:00
|
|
|
if (hh != NULL)
|
|
|
|
hh->error = EEXIST;
|
2004-10-22 22:16:24 +00:00
|
|
|
return;
|
|
|
|
} else if (strcmp(mp2->name, mp->name) == 0) {
|
|
|
|
printf("A GEOM class %s is already loaded.\n",
|
|
|
|
mp2->name);
|
2005-03-26 21:07:35 +00:00
|
|
|
if (hh != NULL)
|
|
|
|
hh->error = EEXIST;
|
2004-10-22 22:16:24 +00:00
|
|
|
return;
|
|
|
|
}
|
2003-06-04 17:51:10 +00:00
|
|
|
}
|
2003-05-31 18:13:07 +00:00
|
|
|
|
|
|
|
LIST_INIT(&mp->geom);
|
|
|
|
LIST_INSERT_HEAD(&g_classes, mp, class);
|
2003-11-18 18:17:39 +00:00
|
|
|
if (mp->init != NULL)
|
|
|
|
mp->init(mp);
|
2003-05-31 18:13:07 +00:00
|
|
|
if (mp->taste == NULL)
|
2003-04-23 19:34:38 +00:00
|
|
|
return;
|
2003-05-31 18:13:07 +00:00
|
|
|
LIST_FOREACH(mp2, &g_classes, class) {
|
|
|
|
if (mp == mp2)
|
2003-04-23 19:34:38 +00:00
|
|
|
continue;
|
2003-05-31 18:13:07 +00:00
|
|
|
LIST_FOREACH(gp, &mp2->geom, geom) {
|
2003-04-23 19:34:38 +00:00
|
|
|
LIST_FOREACH(pp, &gp->provider, provider) {
|
2003-05-31 18:13:07 +00:00
|
|
|
mp->taste(mp, pp, 0);
|
2003-04-23 19:34:38 +00:00
|
|
|
g_topology_assert();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-05 18:53:24 +00:00
|
|
|
static int
|
|
|
|
g_unload_class(struct g_class *mp)
|
2002-03-11 21:42:35 +00:00
|
|
|
{
|
2003-05-31 18:13:07 +00:00
|
|
|
struct g_geom *gp;
|
|
|
|
struct g_provider *pp;
|
|
|
|
struct g_consumer *cp;
|
|
|
|
int error;
|
|
|
|
|
2010-05-05 18:53:24 +00:00
|
|
|
g_topology_lock();
|
2003-05-31 18:13:07 +00:00
|
|
|
g_trace(G_T_TOPOLOGY, "g_unload_class(%s)", mp->name);
|
2010-05-05 18:53:24 +00:00
|
|
|
retry:
|
|
|
|
G_VALID_CLASS(mp);
|
2003-05-31 18:13:07 +00:00
|
|
|
LIST_FOREACH(gp, &mp->geom, geom) {
|
2010-05-05 18:53:24 +00:00
|
|
|
/* We refuse to unload if anything is open */
|
2003-05-31 18:13:07 +00:00
|
|
|
LIST_FOREACH(pp, &gp->provider, provider)
|
|
|
|
if (pp->acr || pp->acw || pp->ace) {
|
2010-05-05 18:53:24 +00:00
|
|
|
g_topology_unlock();
|
|
|
|
return (EBUSY);
|
2003-05-31 18:13:07 +00:00
|
|
|
}
|
|
|
|
LIST_FOREACH(cp, &gp->consumer, consumer)
|
|
|
|
if (cp->acr || cp->acw || cp->ace) {
|
2010-05-05 18:53:24 +00:00
|
|
|
g_topology_unlock();
|
|
|
|
return (EBUSY);
|
2003-05-31 18:13:07 +00:00
|
|
|
}
|
2010-05-05 18:53:24 +00:00
|
|
|
/* If the geom is withering, wait for it to finish. */
|
|
|
|
if (gp->flags & G_GEOM_WITHER) {
|
|
|
|
g_topology_sleep(mp, 1);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We allow unloading if we have no geoms, or a class
|
|
|
|
* method we can use to get rid of them.
|
|
|
|
*/
|
|
|
|
if (!LIST_EMPTY(&mp->geom) && mp->destroy_geom == NULL) {
|
|
|
|
g_topology_unlock();
|
|
|
|
return (EOPNOTSUPP);
|
2003-05-31 18:13:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Bar new entries */
|
|
|
|
mp->taste = NULL;
|
|
|
|
mp->config = NULL;
|
|
|
|
|
2010-05-05 18:53:24 +00:00
|
|
|
LIST_FOREACH(gp, &mp->geom, geom) {
|
|
|
|
error = mp->destroy_geom(NULL, mp, gp);
|
|
|
|
if (error != 0) {
|
|
|
|
g_topology_unlock();
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Wait for withering to finish. */
|
2004-03-11 08:16:23 +00:00
|
|
|
for (;;) {
|
|
|
|
gp = LIST_FIRST(&mp->geom);
|
|
|
|
if (gp == NULL)
|
|
|
|
break;
|
2010-05-05 18:53:24 +00:00
|
|
|
KASSERT(gp->flags & G_GEOM_WITHER,
|
|
|
|
("Non-withering geom in class %s", mp->name));
|
|
|
|
g_topology_sleep(mp, 1);
|
2003-05-31 18:13:07 +00:00
|
|
|
}
|
2010-05-05 18:53:24 +00:00
|
|
|
G_VALID_CLASS(mp);
|
|
|
|
if (mp->fini != NULL)
|
|
|
|
mp->fini(mp);
|
|
|
|
LIST_REMOVE(mp, class);
|
|
|
|
g_topology_unlock();
|
|
|
|
|
|
|
|
return (0);
|
2003-05-31 18:13:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
g_modevent(module_t mod, int type, void *data)
|
|
|
|
{
|
|
|
|
struct g_hh00 *hh;
|
|
|
|
int error;
|
|
|
|
static int g_ignition;
|
2004-08-08 06:46:27 +00:00
|
|
|
struct g_class *mp;
|
2002-03-11 21:42:35 +00:00
|
|
|
|
2004-08-08 06:46:27 +00:00
|
|
|
mp = data;
|
2004-08-08 08:34:46 +00:00
|
|
|
if (mp->version != G_VERSION) {
|
2004-08-08 06:46:27 +00:00
|
|
|
printf("GEOM class %s has Wrong version %x\n",
|
|
|
|
mp->name, mp->version);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
2002-03-11 21:42:35 +00:00
|
|
|
if (!g_ignition) {
|
|
|
|
g_ignition++;
|
|
|
|
g_init();
|
|
|
|
}
|
2003-05-31 18:13:07 +00:00
|
|
|
error = EOPNOTSUPP;
|
|
|
|
switch (type) {
|
|
|
|
case MOD_LOAD:
|
2010-05-05 18:53:24 +00:00
|
|
|
g_trace(G_T_TOPOLOGY, "g_modevent(%s, LOAD)", mp->name);
|
|
|
|
hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO);
|
|
|
|
hh->mp = mp;
|
2004-10-12 04:44:54 +00:00
|
|
|
/*
|
|
|
|
* Once the system is not cold, MOD_LOAD calls will be
|
|
|
|
* from the userland and the g_event thread will be able
|
|
|
|
* to acknowledge their completion.
|
|
|
|
*/
|
2004-10-22 22:16:24 +00:00
|
|
|
if (cold) {
|
|
|
|
hh->post = 1;
|
|
|
|
error = g_post_event(g_load_class, hh, M_WAITOK, NULL);
|
|
|
|
} else {
|
|
|
|
error = g_waitfor_event(g_load_class, hh, M_WAITOK,
|
|
|
|
NULL);
|
|
|
|
if (error == 0)
|
|
|
|
error = hh->error;
|
|
|
|
g_free(hh);
|
|
|
|
}
|
2003-05-31 18:13:07 +00:00
|
|
|
break;
|
|
|
|
case MOD_UNLOAD:
|
2010-05-05 18:53:24 +00:00
|
|
|
g_trace(G_T_TOPOLOGY, "g_modevent(%s, UNLOAD)", mp->name);
|
|
|
|
error = g_unload_class(mp);
|
2003-08-22 11:00:54 +00:00
|
|
|
if (error == 0) {
|
2010-05-05 18:53:24 +00:00
|
|
|
KASSERT(LIST_EMPTY(&mp->geom),
|
|
|
|
("Unloaded class (%s) still has geom", mp->name));
|
2003-08-22 11:00:54 +00:00
|
|
|
}
|
2004-01-23 20:40:25 +00:00
|
|
|
break;
|
2003-05-31 18:13:07 +00:00
|
|
|
}
|
|
|
|
return (error);
|
2002-03-11 21:42:35 +00:00
|
|
|
}
|
|
|
|
|
2008-03-23 01:23:35 +00:00
|
|
|
static void
|
|
|
|
g_retaste_event(void *arg, int flag)
|
|
|
|
{
|
2012-07-29 11:51:48 +00:00
|
|
|
struct g_class *mp, *mp2;
|
|
|
|
struct g_geom *gp;
|
2008-03-23 01:23:35 +00:00
|
|
|
struct g_hh00 *hh;
|
|
|
|
struct g_provider *pp;
|
2012-07-29 11:51:48 +00:00
|
|
|
struct g_consumer *cp;
|
2008-03-23 01:23:35 +00:00
|
|
|
|
|
|
|
g_topology_assert();
|
|
|
|
if (flag == EV_CANCEL) /* XXX: can't happen ? */
|
|
|
|
return;
|
2013-09-24 20:05:16 +00:00
|
|
|
if (g_shutdown || g_notaste)
|
2008-03-23 01:23:35 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
hh = arg;
|
|
|
|
mp = hh->mp;
|
|
|
|
hh->error = 0;
|
|
|
|
if (hh->post) {
|
|
|
|
g_free(hh);
|
|
|
|
hh = NULL;
|
|
|
|
}
|
|
|
|
g_trace(G_T_TOPOLOGY, "g_retaste(%s)", mp->name);
|
|
|
|
|
2012-07-29 11:51:48 +00:00
|
|
|
LIST_FOREACH(mp2, &g_classes, class) {
|
|
|
|
LIST_FOREACH(gp, &mp2->geom, geom) {
|
2008-03-23 01:23:35 +00:00
|
|
|
LIST_FOREACH(pp, &gp->provider, provider) {
|
|
|
|
if (pp->acr || pp->acw || pp->ace)
|
|
|
|
continue;
|
2012-07-29 11:51:48 +00:00
|
|
|
LIST_FOREACH(cp, &pp->consumers, consumers) {
|
|
|
|
if (cp->geom->class == mp &&
|
|
|
|
(cp->flags & G_CF_ORPHAN) == 0)
|
2008-03-28 06:31:12 +00:00
|
|
|
break;
|
|
|
|
}
|
2012-07-29 11:51:48 +00:00
|
|
|
if (cp != NULL) {
|
|
|
|
cp->flags |= G_CF_ORPHAN;
|
|
|
|
g_wither_geom(cp->geom, ENXIO);
|
|
|
|
}
|
2008-03-23 01:23:35 +00:00
|
|
|
mp->taste(mp, pp, 0);
|
|
|
|
g_topology_assert();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
g_retaste(struct g_class *mp)
|
|
|
|
{
|
|
|
|
struct g_hh00 *hh;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (mp->taste == NULL)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO);
|
|
|
|
hh->mp = mp;
|
|
|
|
|
|
|
|
if (cold) {
|
|
|
|
hh->post = 1;
|
|
|
|
error = g_post_event(g_retaste_event, hh, M_WAITOK, NULL);
|
|
|
|
} else {
|
|
|
|
error = g_waitfor_event(g_retaste_event, hh, M_WAITOK, NULL);
|
|
|
|
if (error == 0)
|
|
|
|
error = hh->error;
|
|
|
|
g_free(hh);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2002-03-11 21:42:35 +00:00
|
|
|
struct g_geom *
|
2002-12-16 22:33:27 +00:00
|
|
|
g_new_geomf(struct g_class *mp, const char *fmt, ...)
|
2002-03-11 21:42:35 +00:00
|
|
|
{
|
|
|
|
struct g_geom *gp;
|
|
|
|
va_list ap;
|
|
|
|
struct sbuf *sb;
|
|
|
|
|
|
|
|
g_topology_assert();
|
2004-03-10 08:49:08 +00:00
|
|
|
G_VALID_CLASS(mp);
|
2008-08-09 11:14:05 +00:00
|
|
|
sb = sbuf_new_auto();
|
2003-06-07 10:16:53 +00:00
|
|
|
va_start(ap, fmt);
|
2002-03-11 21:42:35 +00:00
|
|
|
sbuf_vprintf(sb, fmt, ap);
|
2003-06-07 10:16:53 +00:00
|
|
|
va_end(ap);
|
2002-03-11 21:42:35 +00:00
|
|
|
sbuf_finish(sb);
|
2003-02-19 05:47:46 +00:00
|
|
|
gp = g_malloc(sizeof *gp, M_WAITOK | M_ZERO);
|
|
|
|
gp->name = g_malloc(sbuf_len(sb) + 1, M_WAITOK | M_ZERO);
|
2002-03-26 21:40:06 +00:00
|
|
|
gp->class = mp;
|
2002-03-11 21:42:35 +00:00
|
|
|
gp->rank = 1;
|
|
|
|
LIST_INIT(&gp->consumer);
|
|
|
|
LIST_INIT(&gp->provider);
|
2017-08-07 21:12:28 +00:00
|
|
|
LIST_INIT(&gp->aliases);
|
2002-03-11 21:42:35 +00:00
|
|
|
LIST_INSERT_HEAD(&mp->geom, gp, geom);
|
|
|
|
TAILQ_INSERT_HEAD(&geoms, gp, geoms);
|
|
|
|
strcpy(gp->name, sbuf_data(sb));
|
|
|
|
sbuf_delete(sb);
|
2004-08-08 06:46:27 +00:00
|
|
|
/* Fill in defaults from class */
|
|
|
|
gp->start = mp->start;
|
|
|
|
gp->spoiled = mp->spoiled;
|
Plumb device physical path reporting from CAM devices, through GEOM and
DEVFS, and make it accessible via the diskinfo utility.
Extend GEOM's generic attribute query mechanism into generic disk consumers.
sys/geom/geom_disk.c:
sys/geom/geom_disk.h:
sys/cam/scsi/scsi_da.c:
sys/cam/ata/ata_da.c:
- Allow disk providers to implement a new method which can override
the default BIO_GETATTR response, d_getattr(struct bio *). This
function returns -1 if not handled, otherwise it returns 0 or an
errno to be passed to g_io_deliver().
sys/cam/scsi/scsi_da.c:
sys/cam/ata/ata_da.c:
- Don't copy the serial number to dp->d_ident anymore, as the CAM XPT
is now responsible for returning this information via
d_getattr()->(a)dagetattr()->xpt_getatr().
sys/geom/geom_dev.c:
- Implement a new ioctl, DIOCGPHYSPATH, which returns the GEOM
attribute "GEOM::physpath", if possible. If the attribute request
returns a zero-length string, ENOENT is returned.
usr.sbin/diskinfo/diskinfo.c:
- If the DIOCGPHYSPATH ioctl is successful, report physical path
data when diskinfo is executed with the '-v' option.
Submitted by: will
Reviewed by: gibbs
Sponsored by: Spectra Logic Corporation
Add generic attribute change notification support to GEOM.
sys/sys/geom/geom.h:
Add a new attrchanged method field to both g_class
and g_geom.
sys/sys/geom/geom.h:
sys/geom/geom_event.c:
- Provide the g_attr_changed() function that providers
can use to advertise attribute changes.
- Perform delivery of attribute change notifications
from a thread context via the standard GEOM event
mechanism.
sys/geom/geom_subr.c:
Inherit the attrchanged method from class to geom (class instance).
sys/geom/geom_disk.c:
Provide disk_attr_changed() to provide g_attr_changed() access
to consumers of the disk API.
sys/cam/scsi/scsi_pass.c:
sys/cam/scsi/scsi_da.c:
sys/geom/geom_dev.c:
sys/geom/geom_disk.c:
Use attribute changed events to track updates to physical path
information.
sys/cam/scsi/scsi_da.c:
Add AC_ADVINFO_CHANGED to the registered asynchronous CAM
events for this driver. When this event occurs, and
the updated buffer type references our physical path
attribute, emit a GEOM attribute changed event via the
disk_attr_changed() API.
sys/cam/scsi/scsi_pass.c:
Add AC_ADVINFO_CHANGED to the registered asynchronous CAM
events for this driver. When this event occurs, update
the physical patch devfs alias for this pass instance.
Submitted by: gibbs
Sponsored by: Spectra Logic Corporation
2011-06-14 17:10:32 +00:00
|
|
|
gp->attrchanged = mp->attrchanged;
|
Fix a bug which causes a panic in daopen(). The panic is caused by
a da(4) instance going away while GEOM is still probing it.
In this case, the GEOM disk class instance has been created by
disk_create(), and the taste of the disk is queued in the GEOM
event queue.
While that event is queued, the da(4) instance goes away. When the
open call comes into the da(4) driver, it dereferences the freed
(but non-NULL) peripheral pointer provided by GEOM, which results
in a panic.
The solution is to add a callback to the GEOM disk code that is
called when all of its resources are cleaned up. This is
implemented inside GEOM by adding an optional callback that is
called when all consumers have detached from a provider, and the
provider is about to be deleted.
scsi_cd.c,
scsi_da.c: In the register routine for the cd(4) and da(4)
routines, acquire a reference to the CAM peripheral
instance just before we call disk_create().
Use the new GEOM disk d_gone() callback to register
a callback (dadiskgonecb()/cddiskgonecb()) that
decrements the peripheral reference count once GEOM
has finished cleaning up its resources.
In the cd(4) driver, clean up open and close
behavior slightly. GEOM makes sure we only get one
open() and one close call, so there is no need to
set an open flag and decrement the reference count
if we are not the first open.
In the cd(4) driver, use cam_periph_release_locked()
in a couple of error scenarios to avoid extra mutex
calls.
geom.h: Add a new, optional, providergone callback that
is called when a provider is about to be deleted.
geom_disk.h: Add a new d_gone() callback to the GEOM disk
interface.
Bump the DISK_VERSION to version 2. This probably
should have been done after a couple of previous
changes, especially the addition of the d_getattr()
callback.
geom_disk.c: Add a providergone callback for the disk class,
g_disk_providergone(), that calls the user's
d_gone() callback if it exists.
Bump the DISK_VERSION to 2.
geom_subr.c: In g_destroy_provider(), call the providergone
callback if it has been provided.
In g_new_geomf(), propagate the class's
providergone callback to the new geom instance.
blkfront.c: Callers of disk_create() are supposed to pass in
DISK_VERSION, not an explicit disk API version
number. Update the blkfront driver to do that.
disk.9: Update the disk(9) man page to include information
on the new d_gone() callback, as well as the
previously added d_getattr() callback, d_descr
field, and HBA PCI ID fields.
MFC after: 5 days
2012-06-24 04:29:03 +00:00
|
|
|
gp->providergone = mp->providergone;
|
2004-08-08 06:46:27 +00:00
|
|
|
gp->dumpconf = mp->dumpconf;
|
|
|
|
gp->access = mp->access;
|
|
|
|
gp->orphan = mp->orphan;
|
|
|
|
gp->ioctl = mp->ioctl;
|
2012-07-07 20:13:40 +00:00
|
|
|
gp->resize = mp->resize;
|
2002-03-11 21:42:35 +00:00
|
|
|
return (gp);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
g_destroy_geom(struct g_geom *gp)
|
|
|
|
{
|
2017-08-07 21:12:28 +00:00
|
|
|
struct g_geom_alias *gap, *gaptmp;
|
2002-03-11 21:42:35 +00:00
|
|
|
|
|
|
|
g_topology_assert();
|
2004-03-10 08:49:08 +00:00
|
|
|
G_VALID_GEOM(gp);
|
|
|
|
g_trace(G_T_TOPOLOGY, "g_destroy_geom(%p(%s))", gp, gp->name);
|
2002-03-11 21:42:35 +00:00
|
|
|
KASSERT(LIST_EMPTY(&gp->consumer),
|
|
|
|
("g_destroy_geom(%s) with consumer(s) [%p]",
|
|
|
|
gp->name, LIST_FIRST(&gp->consumer)));
|
|
|
|
KASSERT(LIST_EMPTY(&gp->provider),
|
|
|
|
("g_destroy_geom(%s) with provider(s) [%p]",
|
2004-01-11 10:02:42 +00:00
|
|
|
gp->name, LIST_FIRST(&gp->provider)));
|
2003-04-02 20:41:18 +00:00
|
|
|
g_cancel_event(gp);
|
2002-03-11 21:42:35 +00:00
|
|
|
LIST_REMOVE(gp, geom);
|
|
|
|
TAILQ_REMOVE(&geoms, gp, geoms);
|
2017-08-07 21:12:28 +00:00
|
|
|
LIST_FOREACH_SAFE(gap, &gp->aliases, ga_next, gaptmp)
|
|
|
|
g_free(gap);
|
2002-04-23 11:48:45 +00:00
|
|
|
g_free(gp->name);
|
2002-03-11 21:42:35 +00:00
|
|
|
g_free(gp);
|
|
|
|
}
|
|
|
|
|
2003-05-02 06:15:27 +00:00
|
|
|
/*
|
2008-12-08 17:09:02 +00:00
|
|
|
* This function is called (repeatedly) until the geom has withered away.
|
2003-05-02 06:15:27 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
g_wither_geom(struct g_geom *gp, int error)
|
|
|
|
{
|
2004-07-08 16:17:14 +00:00
|
|
|
struct g_provider *pp;
|
2003-05-02 06:15:27 +00:00
|
|
|
|
2004-03-10 08:49:08 +00:00
|
|
|
g_topology_assert();
|
|
|
|
G_VALID_GEOM(gp);
|
2003-05-02 06:15:27 +00:00
|
|
|
g_trace(G_T_TOPOLOGY, "g_wither_geom(%p(%s))", gp, gp->name);
|
|
|
|
if (!(gp->flags & G_GEOM_WITHER)) {
|
|
|
|
gp->flags |= G_GEOM_WITHER;
|
|
|
|
LIST_FOREACH(pp, &gp->provider, provider)
|
2004-03-07 17:33:15 +00:00
|
|
|
if (!(pp->flags & G_PF_ORPHAN))
|
|
|
|
g_orphan_provider(pp, error);
|
2003-05-02 06:15:27 +00:00
|
|
|
}
|
2004-07-08 16:17:14 +00:00
|
|
|
g_do_wither();
|
|
|
|
}
|
|
|
|
|
2006-04-10 03:55:13 +00:00
|
|
|
/*
|
|
|
|
* Convenience function to destroy a particular provider.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
g_wither_provider(struct g_provider *pp, int error)
|
|
|
|
{
|
|
|
|
|
|
|
|
pp->flags |= G_PF_WITHER;
|
|
|
|
if (!(pp->flags & G_PF_ORPHAN))
|
|
|
|
g_orphan_provider(pp, error);
|
|
|
|
}
|
|
|
|
|
2004-10-29 09:19:03 +00:00
|
|
|
/*
|
|
|
|
* This function is called (repeatedly) until the has withered away.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
g_wither_geom_close(struct g_geom *gp, int error)
|
|
|
|
{
|
|
|
|
struct g_consumer *cp;
|
|
|
|
|
|
|
|
g_topology_assert();
|
|
|
|
G_VALID_GEOM(gp);
|
|
|
|
g_trace(G_T_TOPOLOGY, "g_wither_geom_close(%p(%s))", gp, gp->name);
|
|
|
|
LIST_FOREACH(cp, &gp->consumer, consumer)
|
|
|
|
if (cp->acr || cp->acw || cp->ace)
|
|
|
|
g_access(cp, -cp->acr, -cp->acw, -cp->ace);
|
|
|
|
g_wither_geom(gp, error);
|
|
|
|
}
|
|
|
|
|
2004-07-08 16:17:14 +00:00
|
|
|
/*
|
|
|
|
* This function is called (repeatedly) until we cant wash away more
|
2013-03-24 03:15:20 +00:00
|
|
|
* withered bits at present.
|
2004-07-08 16:17:14 +00:00
|
|
|
*/
|
2013-03-24 03:15:20 +00:00
|
|
|
void
|
2004-07-08 16:17:14 +00:00
|
|
|
g_wither_washer()
|
|
|
|
{
|
|
|
|
struct g_class *mp;
|
|
|
|
struct g_geom *gp, *gp2;
|
|
|
|
struct g_provider *pp, *pp2;
|
|
|
|
struct g_consumer *cp, *cp2;
|
|
|
|
|
|
|
|
g_topology_assert();
|
|
|
|
LIST_FOREACH(mp, &g_classes, class) {
|
|
|
|
LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
|
|
|
|
LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) {
|
|
|
|
if (!(pp->flags & G_PF_WITHER))
|
|
|
|
continue;
|
|
|
|
if (LIST_EMPTY(&pp->consumers))
|
|
|
|
g_destroy_provider(pp);
|
|
|
|
}
|
|
|
|
if (!(gp->flags & G_GEOM_WITHER))
|
|
|
|
continue;
|
|
|
|
LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) {
|
|
|
|
if (LIST_EMPTY(&pp->consumers))
|
|
|
|
g_destroy_provider(pp);
|
|
|
|
}
|
|
|
|
LIST_FOREACH_SAFE(cp, &gp->consumer, consumer, cp2) {
|
2013-03-24 03:15:20 +00:00
|
|
|
if (cp->acr || cp->acw || cp->ace)
|
2004-07-08 16:17:14 +00:00
|
|
|
continue;
|
2004-07-09 14:06:17 +00:00
|
|
|
if (cp->provider != NULL)
|
|
|
|
g_detach(cp);
|
2004-07-08 16:17:14 +00:00
|
|
|
g_destroy_consumer(cp);
|
|
|
|
}
|
|
|
|
if (LIST_EMPTY(&gp->provider) &&
|
|
|
|
LIST_EMPTY(&gp->consumer))
|
|
|
|
g_destroy_geom(gp);
|
|
|
|
}
|
2003-05-02 06:15:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-03-11 21:42:35 +00:00
|
|
|
struct g_consumer *
|
|
|
|
g_new_consumer(struct g_geom *gp)
|
|
|
|
{
|
|
|
|
struct g_consumer *cp;
|
|
|
|
|
|
|
|
g_topology_assert();
|
2004-03-10 08:49:08 +00:00
|
|
|
G_VALID_GEOM(gp);
|
2004-01-23 21:02:49 +00:00
|
|
|
KASSERT(!(gp->flags & G_GEOM_WITHER),
|
|
|
|
("g_new_consumer on WITHERing geom(%s) (class %s)",
|
|
|
|
gp->name, gp->class->name));
|
2002-04-04 09:54:13 +00:00
|
|
|
KASSERT(gp->orphan != NULL,
|
|
|
|
("g_new_consumer on geom(%s) (class %s) without orphan",
|
|
|
|
gp->name, gp->class->name));
|
2002-03-11 21:42:35 +00:00
|
|
|
|
2003-02-19 05:47:46 +00:00
|
|
|
cp = g_malloc(sizeof *cp, M_WAITOK | M_ZERO);
|
2002-03-11 21:42:35 +00:00
|
|
|
cp->geom = gp;
|
2003-03-18 09:42:33 +00:00
|
|
|
cp->stat = devstat_new_entry(cp, -1, 0, DEVSTAT_ALL_SUPPORTED,
|
|
|
|
DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
|
2002-03-11 21:42:35 +00:00
|
|
|
LIST_INSERT_HEAD(&gp->consumer, cp, consumer);
|
|
|
|
return(cp);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
g_destroy_consumer(struct g_consumer *cp)
|
|
|
|
{
|
2003-05-02 06:15:27 +00:00
|
|
|
struct g_geom *gp;
|
2002-03-11 21:42:35 +00:00
|
|
|
|
|
|
|
g_topology_assert();
|
2004-03-10 08:49:08 +00:00
|
|
|
G_VALID_CONSUMER(cp);
|
|
|
|
g_trace(G_T_TOPOLOGY, "g_destroy_consumer(%p)", cp);
|
2002-03-11 21:42:35 +00:00
|
|
|
KASSERT (cp->provider == NULL, ("g_destroy_consumer but attached"));
|
|
|
|
KASSERT (cp->acr == 0, ("g_destroy_consumer with acr"));
|
|
|
|
KASSERT (cp->acw == 0, ("g_destroy_consumer with acw"));
|
|
|
|
KASSERT (cp->ace == 0, ("g_destroy_consumer with ace"));
|
2003-04-02 20:41:18 +00:00
|
|
|
g_cancel_event(cp);
|
2003-05-02 06:15:27 +00:00
|
|
|
gp = cp->geom;
|
2002-03-11 21:42:35 +00:00
|
|
|
LIST_REMOVE(cp, consumer);
|
2003-03-18 09:42:33 +00:00
|
|
|
devstat_remove_entry(cp->stat);
|
2002-03-11 21:42:35 +00:00
|
|
|
g_free(cp);
|
2003-05-02 06:15:27 +00:00
|
|
|
if (gp->flags & G_GEOM_WITHER)
|
2004-07-08 16:17:14 +00:00
|
|
|
g_do_wither();
|
2002-03-11 21:42:35 +00:00
|
|
|
}
|
|
|
|
|
2003-04-23 20:16:13 +00:00
|
|
|
static void
|
|
|
|
g_new_provider_event(void *arg, int flag)
|
|
|
|
{
|
|
|
|
struct g_class *mp;
|
|
|
|
struct g_provider *pp;
|
2012-07-29 11:51:48 +00:00
|
|
|
struct g_consumer *cp, *next_cp;
|
2003-04-23 20:16:13 +00:00
|
|
|
|
|
|
|
g_topology_assert();
|
|
|
|
if (flag == EV_CANCEL)
|
|
|
|
return;
|
|
|
|
if (g_shutdown)
|
|
|
|
return;
|
|
|
|
pp = arg;
|
2004-03-10 08:49:08 +00:00
|
|
|
G_VALID_PROVIDER(pp);
|
2008-05-18 22:50:50 +00:00
|
|
|
KASSERT(!(pp->flags & G_PF_WITHER),
|
|
|
|
("g_new_provider_event but withered"));
|
2012-07-29 11:51:48 +00:00
|
|
|
LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, next_cp) {
|
|
|
|
if ((cp->flags & G_CF_ORPHAN) == 0 &&
|
|
|
|
cp->geom->attrchanged != NULL)
|
|
|
|
cp->geom->attrchanged(cp, "GEOM::media");
|
|
|
|
}
|
2013-09-24 20:05:16 +00:00
|
|
|
if (g_notaste)
|
|
|
|
return;
|
2003-04-23 20:16:13 +00:00
|
|
|
LIST_FOREACH(mp, &g_classes, class) {
|
|
|
|
if (mp->taste == NULL)
|
|
|
|
continue;
|
|
|
|
LIST_FOREACH(cp, &pp->consumers, consumers)
|
2012-07-29 11:51:48 +00:00
|
|
|
if (cp->geom->class == mp &&
|
|
|
|
(cp->flags & G_CF_ORPHAN) == 0)
|
2009-06-05 23:35:43 +00:00
|
|
|
break;
|
|
|
|
if (cp != NULL)
|
2003-04-23 20:16:13 +00:00
|
|
|
continue;
|
2003-05-02 06:42:59 +00:00
|
|
|
mp->taste(mp, pp, 0);
|
2003-04-23 20:16:13 +00:00
|
|
|
g_topology_assert();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-03-11 21:42:35 +00:00
|
|
|
struct g_provider *
|
2002-12-16 22:33:27 +00:00
|
|
|
g_new_providerf(struct g_geom *gp, const char *fmt, ...)
|
2002-03-11 21:42:35 +00:00
|
|
|
{
|
|
|
|
struct g_provider *pp;
|
|
|
|
struct sbuf *sb;
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
g_topology_assert();
|
2004-03-10 08:49:08 +00:00
|
|
|
G_VALID_GEOM(gp);
|
2004-03-11 08:16:23 +00:00
|
|
|
KASSERT(gp->access != NULL,
|
|
|
|
("new provider on geom(%s) without ->access (class %s)",
|
|
|
|
gp->name, gp->class->name));
|
2004-01-23 21:02:49 +00:00
|
|
|
KASSERT(gp->start != NULL,
|
2004-02-02 17:50:09 +00:00
|
|
|
("new provider on geom(%s) without ->start (class %s)",
|
|
|
|
gp->name, gp->class->name));
|
2004-01-23 21:02:49 +00:00
|
|
|
KASSERT(!(gp->flags & G_GEOM_WITHER),
|
2004-02-02 17:50:09 +00:00
|
|
|
("new provider on WITHERing geom(%s) (class %s)",
|
|
|
|
gp->name, gp->class->name));
|
2008-08-09 11:14:05 +00:00
|
|
|
sb = sbuf_new_auto();
|
2003-06-07 10:16:53 +00:00
|
|
|
va_start(ap, fmt);
|
2002-03-11 21:42:35 +00:00
|
|
|
sbuf_vprintf(sb, fmt, ap);
|
2003-06-07 10:16:53 +00:00
|
|
|
va_end(ap);
|
2002-03-11 21:42:35 +00:00
|
|
|
sbuf_finish(sb);
|
2003-02-19 05:47:46 +00:00
|
|
|
pp = g_malloc(sizeof *pp + sbuf_len(sb) + 1, M_WAITOK | M_ZERO);
|
2002-03-11 21:42:35 +00:00
|
|
|
pp->name = (char *)(pp + 1);
|
|
|
|
strcpy(pp->name, sbuf_data(sb));
|
|
|
|
sbuf_delete(sb);
|
|
|
|
LIST_INIT(&pp->consumers);
|
|
|
|
pp->error = ENXIO;
|
|
|
|
pp->geom = gp;
|
2003-03-18 09:42:33 +00:00
|
|
|
pp->stat = devstat_new_entry(pp, -1, 0, DEVSTAT_ALL_SUPPORTED,
|
|
|
|
DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
|
2002-03-11 21:42:35 +00:00
|
|
|
LIST_INSERT_HEAD(&gp->provider, pp, provider);
|
2004-03-11 08:16:23 +00:00
|
|
|
g_post_event(g_new_provider_event, pp, M_WAITOK, pp, gp, NULL);
|
2002-03-11 21:42:35 +00:00
|
|
|
return (pp);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
g_error_provider(struct g_provider *pp, int error)
|
|
|
|
{
|
|
|
|
|
2004-03-10 08:49:08 +00:00
|
|
|
/* G_VALID_PROVIDER(pp); We may not have g_topology */
|
2002-03-11 21:42:35 +00:00
|
|
|
pp->error = error;
|
|
|
|
}
|
|
|
|
|
2012-07-07 20:13:40 +00:00
|
|
|
static void
|
|
|
|
g_resize_provider_event(void *arg, int flag)
|
|
|
|
{
|
|
|
|
struct g_hh00 *hh;
|
|
|
|
struct g_class *mp;
|
|
|
|
struct g_geom *gp;
|
|
|
|
struct g_provider *pp;
|
|
|
|
struct g_consumer *cp, *cp2;
|
|
|
|
off_t size;
|
|
|
|
|
|
|
|
g_topology_assert();
|
|
|
|
if (g_shutdown)
|
|
|
|
return;
|
|
|
|
|
|
|
|
hh = arg;
|
|
|
|
pp = hh->pp;
|
|
|
|
size = hh->size;
|
2012-07-18 07:26:20 +00:00
|
|
|
g_free(hh);
|
2012-07-07 20:13:40 +00:00
|
|
|
|
|
|
|
G_VALID_PROVIDER(pp);
|
2016-06-22 14:39:13 +00:00
|
|
|
KASSERT(!(pp->flags & G_PF_WITHER),
|
|
|
|
("g_resize_provider_event but withered"));
|
2012-07-07 20:13:40 +00:00
|
|
|
g_trace(G_T_TOPOLOGY, "g_resize_provider_event(%p)", pp);
|
|
|
|
|
|
|
|
LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) {
|
|
|
|
gp = cp->geom;
|
2012-07-29 11:51:48 +00:00
|
|
|
if (gp->resize == NULL && size < pp->mediasize) {
|
2017-09-24 19:59:26 +00:00
|
|
|
/*
|
|
|
|
* XXX: g_dev_orphan method does deferred destroying
|
|
|
|
* and it is possible, that other event could already
|
|
|
|
* call the orphan method. Check consumer's flags to
|
|
|
|
* do not schedule it twice.
|
|
|
|
*/
|
|
|
|
if (cp->flags & G_CF_ORPHAN)
|
|
|
|
continue;
|
2012-07-29 11:51:48 +00:00
|
|
|
cp->flags |= G_CF_ORPHAN;
|
2012-07-07 20:13:40 +00:00
|
|
|
cp->geom->orphan(cp);
|
2012-07-29 11:51:48 +00:00
|
|
|
}
|
2012-07-07 20:13:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pp->mediasize = size;
|
|
|
|
|
|
|
|
LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) {
|
|
|
|
gp = cp->geom;
|
2016-07-25 09:12:08 +00:00
|
|
|
if ((gp->flags & G_GEOM_WITHER) == 0 && gp->resize != NULL)
|
2012-07-07 20:13:40 +00:00
|
|
|
gp->resize(cp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* After resizing, the previously invalid GEOM class metadata
|
|
|
|
* might become valid. This means we should retaste.
|
|
|
|
*/
|
|
|
|
LIST_FOREACH(mp, &g_classes, class) {
|
|
|
|
if (mp->taste == NULL)
|
|
|
|
continue;
|
|
|
|
LIST_FOREACH(cp, &pp->consumers, consumers)
|
2012-07-29 11:51:48 +00:00
|
|
|
if (cp->geom->class == mp &&
|
|
|
|
(cp->flags & G_CF_ORPHAN) == 0)
|
2012-07-07 20:13:40 +00:00
|
|
|
break;
|
|
|
|
if (cp != NULL)
|
|
|
|
continue;
|
|
|
|
mp->taste(mp, pp, 0);
|
|
|
|
g_topology_assert();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
g_resize_provider(struct g_provider *pp, off_t size)
|
|
|
|
{
|
|
|
|
struct g_hh00 *hh;
|
|
|
|
|
|
|
|
G_VALID_PROVIDER(pp);
|
2016-06-22 14:39:13 +00:00
|
|
|
if (pp->flags & G_PF_WITHER)
|
|
|
|
return;
|
2012-07-07 20:13:40 +00:00
|
|
|
|
|
|
|
if (size == pp->mediasize)
|
|
|
|
return;
|
|
|
|
|
|
|
|
hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO);
|
|
|
|
hh->pp = pp;
|
|
|
|
hh->size = size;
|
|
|
|
g_post_event(g_resize_provider_event, hh, M_WAITOK, NULL);
|
|
|
|
}
|
|
|
|
|
2012-09-01 10:52:19 +00:00
|
|
|
#ifndef _PATH_DEV
|
|
|
|
#define _PATH_DEV "/dev/"
|
|
|
|
#endif
|
|
|
|
|
2003-06-04 18:17:52 +00:00
|
|
|
struct g_provider *
|
|
|
|
g_provider_by_name(char const *arg)
|
|
|
|
{
|
|
|
|
struct g_class *cp;
|
|
|
|
struct g_geom *gp;
|
2015-03-26 11:02:29 +00:00
|
|
|
struct g_provider *pp, *wpp;
|
2003-06-04 18:17:52 +00:00
|
|
|
|
2012-09-01 10:52:19 +00:00
|
|
|
if (strncmp(arg, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
|
|
|
|
arg += sizeof(_PATH_DEV) - 1;
|
|
|
|
|
2015-03-26 11:02:29 +00:00
|
|
|
wpp = NULL;
|
2003-06-04 18:17:52 +00:00
|
|
|
LIST_FOREACH(cp, &g_classes, class) {
|
|
|
|
LIST_FOREACH(gp, &cp->geom, geom) {
|
|
|
|
LIST_FOREACH(pp, &gp->provider, provider) {
|
2015-03-26 11:02:29 +00:00
|
|
|
if (strcmp(arg, pp->name) != 0)
|
|
|
|
continue;
|
|
|
|
if ((gp->flags & G_GEOM_WITHER) == 0 &&
|
|
|
|
(pp->flags & G_PF_WITHER) == 0)
|
2003-06-04 18:17:52 +00:00
|
|
|
return (pp);
|
2015-03-26 11:02:29 +00:00
|
|
|
else
|
|
|
|
wpp = pp;
|
2003-06-04 18:17:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-09-01 10:52:19 +00:00
|
|
|
|
2015-03-26 11:02:29 +00:00
|
|
|
return (wpp);
|
2003-06-04 18:17:52 +00:00
|
|
|
}
|
2002-03-11 21:42:35 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
g_destroy_provider(struct g_provider *pp)
|
|
|
|
{
|
|
|
|
struct g_geom *gp;
|
|
|
|
|
|
|
|
g_topology_assert();
|
2004-03-10 08:49:08 +00:00
|
|
|
G_VALID_PROVIDER(pp);
|
2002-03-11 21:42:35 +00:00
|
|
|
KASSERT(LIST_EMPTY(&pp->consumers),
|
|
|
|
("g_destroy_provider but attached"));
|
|
|
|
KASSERT (pp->acr == 0, ("g_destroy_provider with acr"));
|
|
|
|
KASSERT (pp->acw == 0, ("g_destroy_provider with acw"));
|
2008-05-18 22:50:50 +00:00
|
|
|
KASSERT (pp->ace == 0, ("g_destroy_provider with ace"));
|
2003-04-02 20:41:18 +00:00
|
|
|
g_cancel_event(pp);
|
2002-03-11 21:42:35 +00:00
|
|
|
LIST_REMOVE(pp, provider);
|
|
|
|
gp = pp->geom;
|
2003-03-18 09:42:33 +00:00
|
|
|
devstat_remove_entry(pp->stat);
|
Fix a bug which causes a panic in daopen(). The panic is caused by
a da(4) instance going away while GEOM is still probing it.
In this case, the GEOM disk class instance has been created by
disk_create(), and the taste of the disk is queued in the GEOM
event queue.
While that event is queued, the da(4) instance goes away. When the
open call comes into the da(4) driver, it dereferences the freed
(but non-NULL) peripheral pointer provided by GEOM, which results
in a panic.
The solution is to add a callback to the GEOM disk code that is
called when all of its resources are cleaned up. This is
implemented inside GEOM by adding an optional callback that is
called when all consumers have detached from a provider, and the
provider is about to be deleted.
scsi_cd.c,
scsi_da.c: In the register routine for the cd(4) and da(4)
routines, acquire a reference to the CAM peripheral
instance just before we call disk_create().
Use the new GEOM disk d_gone() callback to register
a callback (dadiskgonecb()/cddiskgonecb()) that
decrements the peripheral reference count once GEOM
has finished cleaning up its resources.
In the cd(4) driver, clean up open and close
behavior slightly. GEOM makes sure we only get one
open() and one close call, so there is no need to
set an open flag and decrement the reference count
if we are not the first open.
In the cd(4) driver, use cam_periph_release_locked()
in a couple of error scenarios to avoid extra mutex
calls.
geom.h: Add a new, optional, providergone callback that
is called when a provider is about to be deleted.
geom_disk.h: Add a new d_gone() callback to the GEOM disk
interface.
Bump the DISK_VERSION to version 2. This probably
should have been done after a couple of previous
changes, especially the addition of the d_getattr()
callback.
geom_disk.c: Add a providergone callback for the disk class,
g_disk_providergone(), that calls the user's
d_gone() callback if it exists.
Bump the DISK_VERSION to 2.
geom_subr.c: In g_destroy_provider(), call the providergone
callback if it has been provided.
In g_new_geomf(), propagate the class's
providergone callback to the new geom instance.
blkfront.c: Callers of disk_create() are supposed to pass in
DISK_VERSION, not an explicit disk API version
number. Update the blkfront driver to do that.
disk.9: Update the disk(9) man page to include information
on the new d_gone() callback, as well as the
previously added d_getattr() callback, d_descr
field, and HBA PCI ID fields.
MFC after: 5 days
2012-06-24 04:29:03 +00:00
|
|
|
/*
|
|
|
|
* If a callback was provided, send notification that the provider
|
|
|
|
* is now gone.
|
|
|
|
*/
|
|
|
|
if (gp->providergone != NULL)
|
|
|
|
gp->providergone(pp);
|
|
|
|
|
2002-03-11 21:42:35 +00:00
|
|
|
g_free(pp);
|
2003-05-02 06:15:27 +00:00
|
|
|
if ((gp->flags & G_GEOM_WITHER))
|
2004-07-08 16:17:14 +00:00
|
|
|
g_do_wither();
|
2002-03-11 21:42:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We keep the "geoms" list sorted by topological order (== increasing
|
|
|
|
* numerical rank) at all times.
|
|
|
|
* When an attach is done, the attaching geoms rank is invalidated
|
|
|
|
* and it is moved to the tail of the list.
|
|
|
|
* All geoms later in the sequence has their ranks reevaluated in
|
|
|
|
* sequence. If we cannot assign rank to a geom because it's
|
|
|
|
* prerequisites do not have rank, we move that element to the tail
|
|
|
|
* of the sequence with invalid rank as well.
|
|
|
|
* At some point we encounter our original geom and if we stil fail
|
|
|
|
* to assign it a rank, there must be a loop and we fail back to
|
2002-06-09 10:57:34 +00:00
|
|
|
* g_attach() which detach again and calls redo_rank again
|
2002-03-11 21:42:35 +00:00
|
|
|
* to fix up the damage.
|
|
|
|
* It would be much simpler code wise to do it recursively, but we
|
|
|
|
* can't risk that on the kernel stack.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
|
|
|
redo_rank(struct g_geom *gp)
|
|
|
|
{
|
|
|
|
struct g_consumer *cp;
|
|
|
|
struct g_geom *gp1, *gp2;
|
|
|
|
int n, m;
|
|
|
|
|
|
|
|
g_topology_assert();
|
2004-03-10 08:49:08 +00:00
|
|
|
G_VALID_GEOM(gp);
|
2002-03-11 21:42:35 +00:00
|
|
|
|
|
|
|
/* Invalidate this geoms rank and move it to the tail */
|
|
|
|
gp1 = TAILQ_NEXT(gp, geoms);
|
|
|
|
if (gp1 != NULL) {
|
|
|
|
gp->rank = 0;
|
|
|
|
TAILQ_REMOVE(&geoms, gp, geoms);
|
|
|
|
TAILQ_INSERT_TAIL(&geoms, gp, geoms);
|
|
|
|
} else {
|
|
|
|
gp1 = gp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* re-rank the rest of the sequence */
|
|
|
|
for (; gp1 != NULL; gp1 = gp2) {
|
|
|
|
gp1->rank = 0;
|
|
|
|
m = 1;
|
|
|
|
LIST_FOREACH(cp, &gp1->consumer, consumer) {
|
|
|
|
if (cp->provider == NULL)
|
|
|
|
continue;
|
|
|
|
n = cp->provider->geom->rank;
|
|
|
|
if (n == 0) {
|
|
|
|
m = 0;
|
|
|
|
break;
|
|
|
|
} else if (n >= m)
|
|
|
|
m = n + 1;
|
|
|
|
}
|
|
|
|
gp1->rank = m;
|
|
|
|
gp2 = TAILQ_NEXT(gp1, geoms);
|
|
|
|
|
|
|
|
/* got a rank, moving on */
|
|
|
|
if (m != 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* no rank to original geom means loop */
|
2002-04-27 07:07:37 +00:00
|
|
|
if (gp == gp1)
|
2002-03-11 21:42:35 +00:00
|
|
|
return (ELOOP);
|
|
|
|
|
|
|
|
/* no rank, put it at the end move on */
|
|
|
|
TAILQ_REMOVE(&geoms, gp1, geoms);
|
|
|
|
TAILQ_INSERT_TAIL(&geoms, gp1, geoms);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
g_attach(struct g_consumer *cp, struct g_provider *pp)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
g_topology_assert();
|
2004-03-10 08:49:08 +00:00
|
|
|
G_VALID_CONSUMER(cp);
|
|
|
|
G_VALID_PROVIDER(pp);
|
2010-02-18 22:27:38 +00:00
|
|
|
g_trace(G_T_TOPOLOGY, "g_attach(%p, %p)", cp, pp);
|
2002-03-11 21:42:35 +00:00
|
|
|
KASSERT(cp->provider == NULL, ("attach but attached"));
|
|
|
|
cp->provider = pp;
|
2017-10-02 11:57:00 +00:00
|
|
|
cp->flags &= ~G_CF_ORPHAN;
|
2002-03-11 21:42:35 +00:00
|
|
|
LIST_INSERT_HEAD(&pp->consumers, cp, consumers);
|
|
|
|
error = redo_rank(cp->geom);
|
|
|
|
if (error) {
|
|
|
|
LIST_REMOVE(cp, consumers);
|
|
|
|
cp->provider = NULL;
|
|
|
|
redo_rank(cp->geom);
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2002-06-09 10:57:34 +00:00
|
|
|
g_detach(struct g_consumer *cp)
|
2002-03-11 21:42:35 +00:00
|
|
|
{
|
|
|
|
struct g_provider *pp;
|
|
|
|
|
|
|
|
g_topology_assert();
|
2004-03-10 08:49:08 +00:00
|
|
|
G_VALID_CONSUMER(cp);
|
|
|
|
g_trace(G_T_TOPOLOGY, "g_detach(%p)", cp);
|
2002-06-09 10:57:34 +00:00
|
|
|
KASSERT(cp->provider != NULL, ("detach but not attached"));
|
|
|
|
KASSERT(cp->acr == 0, ("detach but nonzero acr"));
|
|
|
|
KASSERT(cp->acw == 0, ("detach but nonzero acw"));
|
|
|
|
KASSERT(cp->ace == 0, ("detach but nonzero ace"));
|
2003-03-09 10:01:16 +00:00
|
|
|
KASSERT(cp->nstart == cp->nend,
|
2003-02-08 13:03:57 +00:00
|
|
|
("detach with active requests"));
|
2002-03-11 21:42:35 +00:00
|
|
|
pp = cp->provider;
|
|
|
|
LIST_REMOVE(cp, consumers);
|
|
|
|
cp->provider = NULL;
|
2013-03-24 03:15:20 +00:00
|
|
|
if ((cp->geom->flags & G_GEOM_WITHER) ||
|
|
|
|
(pp->geom->flags & G_GEOM_WITHER) ||
|
|
|
|
(pp->flags & G_PF_WITHER))
|
2004-07-08 16:17:14 +00:00
|
|
|
g_do_wither();
|
2002-03-11 21:42:35 +00:00
|
|
|
redo_rank(cp->geom);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2004-02-12 22:42:11 +00:00
|
|
|
* g_access()
|
2002-03-11 21:42:35 +00:00
|
|
|
*
|
|
|
|
* Access-check with delta values. The question asked is "can provider
|
|
|
|
* "cp" change the access counters by the relative amounts dc[rwe] ?"
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
2004-02-12 22:42:11 +00:00
|
|
|
g_access(struct g_consumer *cp, int dcr, int dcw, int dce)
|
2002-03-11 21:42:35 +00:00
|
|
|
{
|
|
|
|
struct g_provider *pp;
|
g_access: deal with races created by geoms that drop the topology lock
The problem is that g_access() must be called with the GEOM topology
lock held. And that gives a false impression that the lock is indeed
held across the call. But this isn't always true because many classes,
ZVOL being one of the many, need to drop the lock. It's either to
perform an I/O on the first open or to acquire a different lock (like in
g_mirror_access).
That, of course, can break many assumptions. For example,
g_slice_access() adds an extra exclusive count on the first open. As
described above, an underlying geom may drop the topology lock and that
would open a race with another thread that would also request another
extra exclusive count. In general, two consumers may be granted
incompatible accesses.
To avoid this problem the code is changed to mark a geom with special
flag before calling its access method and clear the flag afterwards. If
another thread sees that flag, then it means that the topology lock has
been dropped (either by the geom in question or downstream from it), so
it is not safe to make another access call. So, the second thread would
use g_topology_sleep() to wait until the flag is cleared and only then
would it proceed with the access.
Also see http://docs.freebsd.org/cgi/mid.cgi?809d9254-ee56-59d8-69a4-08838e985cea
PR: 225960
Reported by: asomers
Reviewed by: markj, mav
MFC after: 3 weeks
Differential Revision: https://reviews.freebsd.org/D14533
2018-03-15 09:16:10 +00:00
|
|
|
struct g_geom *gp;
|
2017-12-25 04:48:39 +00:00
|
|
|
int pw, pe;
|
g_access: deal with races created by geoms that drop the topology lock
The problem is that g_access() must be called with the GEOM topology
lock held. And that gives a false impression that the lock is indeed
held across the call. But this isn't always true because many classes,
ZVOL being one of the many, need to drop the lock. It's either to
perform an I/O on the first open or to acquire a different lock (like in
g_mirror_access).
That, of course, can break many assumptions. For example,
g_slice_access() adds an extra exclusive count on the first open. As
described above, an underlying geom may drop the topology lock and that
would open a race with another thread that would also request another
extra exclusive count. In general, two consumers may be granted
incompatible accesses.
To avoid this problem the code is changed to mark a geom with special
flag before calling its access method and clear the flag afterwards. If
another thread sees that flag, then it means that the topology lock has
been dropped (either by the geom in question or downstream from it), so
it is not safe to make another access call. So, the second thread would
use g_topology_sleep() to wait until the flag is cleared and only then
would it proceed with the access.
Also see http://docs.freebsd.org/cgi/mid.cgi?809d9254-ee56-59d8-69a4-08838e985cea
PR: 225960
Reported by: asomers
Reviewed by: markj, mav
MFC after: 3 weeks
Differential Revision: https://reviews.freebsd.org/D14533
2018-03-15 09:16:10 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
int sr, sw, se;
|
|
|
|
#endif
|
2002-03-11 21:42:35 +00:00
|
|
|
int error;
|
|
|
|
|
2004-03-10 08:49:08 +00:00
|
|
|
g_topology_assert();
|
|
|
|
G_VALID_CONSUMER(cp);
|
2002-03-11 21:42:35 +00:00
|
|
|
pp = cp->provider;
|
2004-03-18 07:17:10 +00:00
|
|
|
KASSERT(pp != NULL, ("access but not attached"));
|
2004-03-10 08:49:08 +00:00
|
|
|
G_VALID_PROVIDER(pp);
|
g_access: deal with races created by geoms that drop the topology lock
The problem is that g_access() must be called with the GEOM topology
lock held. And that gives a false impression that the lock is indeed
held across the call. But this isn't always true because many classes,
ZVOL being one of the many, need to drop the lock. It's either to
perform an I/O on the first open or to acquire a different lock (like in
g_mirror_access).
That, of course, can break many assumptions. For example,
g_slice_access() adds an extra exclusive count on the first open. As
described above, an underlying geom may drop the topology lock and that
would open a race with another thread that would also request another
extra exclusive count. In general, two consumers may be granted
incompatible accesses.
To avoid this problem the code is changed to mark a geom with special
flag before calling its access method and clear the flag afterwards. If
another thread sees that flag, then it means that the topology lock has
been dropped (either by the geom in question or downstream from it), so
it is not safe to make another access call. So, the second thread would
use g_topology_sleep() to wait until the flag is cleared and only then
would it proceed with the access.
Also see http://docs.freebsd.org/cgi/mid.cgi?809d9254-ee56-59d8-69a4-08838e985cea
PR: 225960
Reported by: asomers
Reviewed by: markj, mav
MFC after: 3 weeks
Differential Revision: https://reviews.freebsd.org/D14533
2018-03-15 09:16:10 +00:00
|
|
|
gp = pp->geom;
|
2002-03-11 21:42:35 +00:00
|
|
|
|
2004-02-12 22:42:11 +00:00
|
|
|
g_trace(G_T_ACCESS, "g_access(%p(%s), %d, %d, %d)",
|
2002-03-11 21:42:35 +00:00
|
|
|
cp, pp->name, dcr, dcw, dce);
|
|
|
|
|
|
|
|
KASSERT(cp->acr + dcr >= 0, ("access resulting in negative acr"));
|
|
|
|
KASSERT(cp->acw + dcw >= 0, ("access resulting in negative acw"));
|
|
|
|
KASSERT(cp->ace + dce >= 0, ("access resulting in negative ace"));
|
2004-01-09 16:10:32 +00:00
|
|
|
KASSERT(dcr != 0 || dcw != 0 || dce != 0, ("NOP access request"));
|
g_access: deal with races created by geoms that drop the topology lock
The problem is that g_access() must be called with the GEOM topology
lock held. And that gives a false impression that the lock is indeed
held across the call. But this isn't always true because many classes,
ZVOL being one of the many, need to drop the lock. It's either to
perform an I/O on the first open or to acquire a different lock (like in
g_mirror_access).
That, of course, can break many assumptions. For example,
g_slice_access() adds an extra exclusive count on the first open. As
described above, an underlying geom may drop the topology lock and that
would open a race with another thread that would also request another
extra exclusive count. In general, two consumers may be granted
incompatible accesses.
To avoid this problem the code is changed to mark a geom with special
flag before calling its access method and clear the flag afterwards. If
another thread sees that flag, then it means that the topology lock has
been dropped (either by the geom in question or downstream from it), so
it is not safe to make another access call. So, the second thread would
use g_topology_sleep() to wait until the flag is cleared and only then
would it proceed with the access.
Also see http://docs.freebsd.org/cgi/mid.cgi?809d9254-ee56-59d8-69a4-08838e985cea
PR: 225960
Reported by: asomers
Reviewed by: markj, mav
MFC after: 3 weeks
Differential Revision: https://reviews.freebsd.org/D14533
2018-03-15 09:16:10 +00:00
|
|
|
KASSERT(gp->access != NULL, ("NULL geom->access"));
|
2002-03-11 21:42:35 +00:00
|
|
|
|
|
|
|
/*
|
2002-03-26 21:40:06 +00:00
|
|
|
* If our class cares about being spoiled, and we have been, we
|
2002-03-11 21:42:35 +00:00
|
|
|
* are probably just ahead of the event telling us that. Fail
|
|
|
|
* now rather than having to unravel this later.
|
|
|
|
*/
|
2012-07-29 11:51:48 +00:00
|
|
|
if (cp->geom->spoiled != NULL && (cp->flags & G_CF_SPOILED) &&
|
2004-07-08 10:34:09 +00:00
|
|
|
(dcr > 0 || dcw > 0 || dce > 0))
|
|
|
|
return (ENXIO);
|
2002-03-11 21:42:35 +00:00
|
|
|
|
g_access: deal with races created by geoms that drop the topology lock
The problem is that g_access() must be called with the GEOM topology
lock held. And that gives a false impression that the lock is indeed
held across the call. But this isn't always true because many classes,
ZVOL being one of the many, need to drop the lock. It's either to
perform an I/O on the first open or to acquire a different lock (like in
g_mirror_access).
That, of course, can break many assumptions. For example,
g_slice_access() adds an extra exclusive count on the first open. As
described above, an underlying geom may drop the topology lock and that
would open a race with another thread that would also request another
extra exclusive count. In general, two consumers may be granted
incompatible accesses.
To avoid this problem the code is changed to mark a geom with special
flag before calling its access method and clear the flag afterwards. If
another thread sees that flag, then it means that the topology lock has
been dropped (either by the geom in question or downstream from it), so
it is not safe to make another access call. So, the second thread would
use g_topology_sleep() to wait until the flag is cleared and only then
would it proceed with the access.
Also see http://docs.freebsd.org/cgi/mid.cgi?809d9254-ee56-59d8-69a4-08838e985cea
PR: 225960
Reported by: asomers
Reviewed by: markj, mav
MFC after: 3 weeks
Differential Revision: https://reviews.freebsd.org/D14533
2018-03-15 09:16:10 +00:00
|
|
|
/*
|
|
|
|
* A number of GEOM classes either need to perform an I/O on the first
|
|
|
|
* open or to acquire a different subsystem's lock. To do that they
|
|
|
|
* may have to drop the topology lock.
|
|
|
|
* Other GEOM classes perform special actions when opening a lower rank
|
|
|
|
* geom for the first time. As a result, more than one thread may
|
|
|
|
* end up performing the special actions.
|
|
|
|
* So, we prevent concurrent "first" opens by marking the consumer with
|
|
|
|
* special flag.
|
|
|
|
*
|
|
|
|
* Note that if the geom's access method never drops the topology lock,
|
|
|
|
* then we will never see G_GEOM_IN_ACCESS here.
|
|
|
|
*/
|
|
|
|
while ((gp->flags & G_GEOM_IN_ACCESS) != 0) {
|
|
|
|
g_trace(G_T_ACCESS,
|
|
|
|
"%s: race on geom %s via provider %s and consumer of %s",
|
|
|
|
__func__, gp->name, pp->name, cp->geom->name);
|
|
|
|
gp->flags |= G_GEOM_ACCESS_WAIT;
|
|
|
|
g_topology_sleep(gp, 0);
|
|
|
|
}
|
|
|
|
|
2002-03-11 21:42:35 +00:00
|
|
|
/*
|
|
|
|
* Figure out what counts the provider would have had, if this
|
|
|
|
* consumer had (r0w0e0) at this time.
|
|
|
|
*/
|
|
|
|
pw = pp->acw - cp->acw;
|
|
|
|
pe = pp->ace - cp->ace;
|
|
|
|
|
|
|
|
g_trace(G_T_ACCESS,
|
|
|
|
"open delta:[r%dw%de%d] old:[r%dw%de%d] provider:[r%dw%de%d] %p(%s)",
|
|
|
|
dcr, dcw, dce,
|
|
|
|
cp->acr, cp->acw, cp->ace,
|
|
|
|
pp->acr, pp->acw, pp->ace,
|
|
|
|
pp, pp->name);
|
|
|
|
|
2003-02-12 09:48:27 +00:00
|
|
|
/* If foot-shooting is enabled, any open on rank#1 is OK */
|
g_access: deal with races created by geoms that drop the topology lock
The problem is that g_access() must be called with the GEOM topology
lock held. And that gives a false impression that the lock is indeed
held across the call. But this isn't always true because many classes,
ZVOL being one of the many, need to drop the lock. It's either to
perform an I/O on the first open or to acquire a different lock (like in
g_mirror_access).
That, of course, can break many assumptions. For example,
g_slice_access() adds an extra exclusive count on the first open. As
described above, an underlying geom may drop the topology lock and that
would open a race with another thread that would also request another
extra exclusive count. In general, two consumers may be granted
incompatible accesses.
To avoid this problem the code is changed to mark a geom with special
flag before calling its access method and clear the flag afterwards. If
another thread sees that flag, then it means that the topology lock has
been dropped (either by the geom in question or downstream from it), so
it is not safe to make another access call. So, the second thread would
use g_topology_sleep() to wait until the flag is cleared and only then
would it proceed with the access.
Also see http://docs.freebsd.org/cgi/mid.cgi?809d9254-ee56-59d8-69a4-08838e985cea
PR: 225960
Reported by: asomers
Reviewed by: markj, mav
MFC after: 3 weeks
Differential Revision: https://reviews.freebsd.org/D14533
2018-03-15 09:16:10 +00:00
|
|
|
if ((g_debugflags & 16) && gp->rank == 1)
|
2003-02-12 09:48:27 +00:00
|
|
|
;
|
2002-03-11 21:42:35 +00:00
|
|
|
/* If we try exclusive but already write: fail */
|
2003-02-12 09:48:27 +00:00
|
|
|
else if (dce > 0 && pw > 0)
|
2002-03-11 21:42:35 +00:00
|
|
|
return (EPERM);
|
|
|
|
/* If we try write but already exclusive: fail */
|
2003-02-12 09:48:27 +00:00
|
|
|
else if (dcw > 0 && pe > 0)
|
2002-03-11 21:42:35 +00:00
|
|
|
return (EPERM);
|
|
|
|
/* If we try to open more but provider is error'ed: fail */
|
2016-06-22 14:39:13 +00:00
|
|
|
else if ((dcr > 0 || dcw > 0 || dce > 0) && pp->error != 0) {
|
2017-08-27 12:24:25 +00:00
|
|
|
printf("%s(%d): provider %s has error %d set\n",
|
|
|
|
__func__, __LINE__, pp->name, pp->error);
|
2002-03-11 21:42:35 +00:00
|
|
|
return (pp->error);
|
2016-06-22 14:39:13 +00:00
|
|
|
}
|
2002-03-11 21:42:35 +00:00
|
|
|
|
|
|
|
/* Ok then... */
|
|
|
|
|
g_access: deal with races created by geoms that drop the topology lock
The problem is that g_access() must be called with the GEOM topology
lock held. And that gives a false impression that the lock is indeed
held across the call. But this isn't always true because many classes,
ZVOL being one of the many, need to drop the lock. It's either to
perform an I/O on the first open or to acquire a different lock (like in
g_mirror_access).
That, of course, can break many assumptions. For example,
g_slice_access() adds an extra exclusive count on the first open. As
described above, an underlying geom may drop the topology lock and that
would open a race with another thread that would also request another
extra exclusive count. In general, two consumers may be granted
incompatible accesses.
To avoid this problem the code is changed to mark a geom with special
flag before calling its access method and clear the flag afterwards. If
another thread sees that flag, then it means that the topology lock has
been dropped (either by the geom in question or downstream from it), so
it is not safe to make another access call. So, the second thread would
use g_topology_sleep() to wait until the flag is cleared and only then
would it proceed with the access.
Also see http://docs.freebsd.org/cgi/mid.cgi?809d9254-ee56-59d8-69a4-08838e985cea
PR: 225960
Reported by: asomers
Reviewed by: markj, mav
MFC after: 3 weeks
Differential Revision: https://reviews.freebsd.org/D14533
2018-03-15 09:16:10 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
sr = cp->acr;
|
|
|
|
sw = cp->acw;
|
|
|
|
se = cp->ace;
|
|
|
|
#endif
|
|
|
|
gp->flags |= G_GEOM_IN_ACCESS;
|
|
|
|
error = gp->access(pp, dcr, dcw, dce);
|
2004-02-14 17:58:57 +00:00
|
|
|
KASSERT(dcr > 0 || dcw > 0 || dce > 0 || error == 0,
|
2014-04-15 14:41:41 +00:00
|
|
|
("Geom provider %s::%s dcr=%d dcw=%d dce=%d error=%d failed "
|
g_access: deal with races created by geoms that drop the topology lock
The problem is that g_access() must be called with the GEOM topology
lock held. And that gives a false impression that the lock is indeed
held across the call. But this isn't always true because many classes,
ZVOL being one of the many, need to drop the lock. It's either to
perform an I/O on the first open or to acquire a different lock (like in
g_mirror_access).
That, of course, can break many assumptions. For example,
g_slice_access() adds an extra exclusive count on the first open. As
described above, an underlying geom may drop the topology lock and that
would open a race with another thread that would also request another
extra exclusive count. In general, two consumers may be granted
incompatible accesses.
To avoid this problem the code is changed to mark a geom with special
flag before calling its access method and clear the flag afterwards. If
another thread sees that flag, then it means that the topology lock has
been dropped (either by the geom in question or downstream from it), so
it is not safe to make another access call. So, the second thread would
use g_topology_sleep() to wait until the flag is cleared and only then
would it proceed with the access.
Also see http://docs.freebsd.org/cgi/mid.cgi?809d9254-ee56-59d8-69a4-08838e985cea
PR: 225960
Reported by: asomers
Reviewed by: markj, mav
MFC after: 3 weeks
Differential Revision: https://reviews.freebsd.org/D14533
2018-03-15 09:16:10 +00:00
|
|
|
"closing ->access()", gp->class->name, pp->name, dcr, dcw,
|
2014-04-15 14:41:41 +00:00
|
|
|
dce, error));
|
g_access: deal with races created by geoms that drop the topology lock
The problem is that g_access() must be called with the GEOM topology
lock held. And that gives a false impression that the lock is indeed
held across the call. But this isn't always true because many classes,
ZVOL being one of the many, need to drop the lock. It's either to
perform an I/O on the first open or to acquire a different lock (like in
g_mirror_access).
That, of course, can break many assumptions. For example,
g_slice_access() adds an extra exclusive count on the first open. As
described above, an underlying geom may drop the topology lock and that
would open a race with another thread that would also request another
extra exclusive count. In general, two consumers may be granted
incompatible accesses.
To avoid this problem the code is changed to mark a geom with special
flag before calling its access method and clear the flag afterwards. If
another thread sees that flag, then it means that the topology lock has
been dropped (either by the geom in question or downstream from it), so
it is not safe to make another access call. So, the second thread would
use g_topology_sleep() to wait until the flag is cleared and only then
would it proceed with the access.
Also see http://docs.freebsd.org/cgi/mid.cgi?809d9254-ee56-59d8-69a4-08838e985cea
PR: 225960
Reported by: asomers
Reviewed by: markj, mav
MFC after: 3 weeks
Differential Revision: https://reviews.freebsd.org/D14533
2018-03-15 09:16:10 +00:00
|
|
|
|
|
|
|
g_topology_assert();
|
|
|
|
gp->flags &= ~G_GEOM_IN_ACCESS;
|
|
|
|
KASSERT(cp->acr == sr && cp->acw == sw && cp->ace == se,
|
|
|
|
("Access counts changed during geom->access"));
|
|
|
|
if ((gp->flags & G_GEOM_ACCESS_WAIT) != 0) {
|
|
|
|
gp->flags &= ~G_GEOM_ACCESS_WAIT;
|
|
|
|
wakeup(gp);
|
|
|
|
}
|
|
|
|
|
2002-03-11 21:42:35 +00:00
|
|
|
if (!error) {
|
2003-04-02 13:10:40 +00:00
|
|
|
/*
|
|
|
|
* If we open first write, spoil any partner consumers.
|
Fix a bug that caused some /dev entries to continue to exist after
the underlying drive had been hot-unplugged from the system. Here
is a specific example. Filesystem code had opened /dev/da1s1e.
Subsequently, the drive was hot-unplugged. This (correctly) caused
all of the associated /dev/da1* entries to be deleted. When the
filesystem later realized that the drive was gone it closed the
device, reducing the write-access counts to 0 on the geom providers
for da1s1e, da1s1, and da1. This caused geom to re-taste the
providers, resulting in the devices being created again. When the
drive was hot-plugged back in, it resulted in duplicate /dev entries
for da1s1e, da1s1, and da1.
This fix adds a new disk_gone() function which is called by CAM when a
drive goes away. It orphans all of the providers associated with the
drive, setting an error condition of ENXIO in each one. In addition,
we prevent a re-taste on last close for writing if an error condition
has been set in the provider.
Sponsored by: Isilon Systems
Reviewed by: phk
MFC after: 1 week
2005-11-18 02:43:49 +00:00
|
|
|
* If we close last write and provider is not errored,
|
|
|
|
* trigger re-taste.
|
2003-04-02 13:10:40 +00:00
|
|
|
*/
|
|
|
|
if (pp->acw == 0 && dcw != 0)
|
|
|
|
g_spoil(pp, cp);
|
Fix a bug that caused some /dev entries to continue to exist after
the underlying drive had been hot-unplugged from the system. Here
is a specific example. Filesystem code had opened /dev/da1s1e.
Subsequently, the drive was hot-unplugged. This (correctly) caused
all of the associated /dev/da1* entries to be deleted. When the
filesystem later realized that the drive was gone it closed the
device, reducing the write-access counts to 0 on the geom providers
for da1s1e, da1s1, and da1. This caused geom to re-taste the
providers, resulting in the devices being created again. When the
drive was hot-plugged back in, it resulted in duplicate /dev entries
for da1s1e, da1s1, and da1.
This fix adds a new disk_gone() function which is called by CAM when a
drive goes away. It orphans all of the providers associated with the
drive, setting an error condition of ENXIO in each one. In addition,
we prevent a re-taste on last close for writing if an error condition
has been set in the provider.
Sponsored by: Isilon Systems
Reviewed by: phk
MFC after: 1 week
2005-11-18 02:43:49 +00:00
|
|
|
else if (pp->acw != 0 && pp->acw == -dcw && pp->error == 0 &&
|
g_access: deal with races created by geoms that drop the topology lock
The problem is that g_access() must be called with the GEOM topology
lock held. And that gives a false impression that the lock is indeed
held across the call. But this isn't always true because many classes,
ZVOL being one of the many, need to drop the lock. It's either to
perform an I/O on the first open or to acquire a different lock (like in
g_mirror_access).
That, of course, can break many assumptions. For example,
g_slice_access() adds an extra exclusive count on the first open. As
described above, an underlying geom may drop the topology lock and that
would open a race with another thread that would also request another
extra exclusive count. In general, two consumers may be granted
incompatible accesses.
To avoid this problem the code is changed to mark a geom with special
flag before calling its access method and clear the flag afterwards. If
another thread sees that flag, then it means that the topology lock has
been dropped (either by the geom in question or downstream from it), so
it is not safe to make another access call. So, the second thread would
use g_topology_sleep() to wait until the flag is cleared and only then
would it proceed with the access.
Also see http://docs.freebsd.org/cgi/mid.cgi?809d9254-ee56-59d8-69a4-08838e985cea
PR: 225960
Reported by: asomers
Reviewed by: markj, mav
MFC after: 3 weeks
Differential Revision: https://reviews.freebsd.org/D14533
2018-03-15 09:16:10 +00:00
|
|
|
!(gp->flags & G_GEOM_WITHER))
|
2003-04-23 20:46:12 +00:00
|
|
|
g_post_event(g_new_provider_event, pp, M_WAITOK,
|
|
|
|
pp, NULL);
|
2003-04-02 13:10:40 +00:00
|
|
|
|
2002-03-11 21:42:35 +00:00
|
|
|
pp->acr += dcr;
|
|
|
|
pp->acw += dcw;
|
|
|
|
pp->ace += dce;
|
|
|
|
cp->acr += dcr;
|
|
|
|
cp->acw += dcw;
|
|
|
|
cp->ace += dce;
|
2004-09-05 21:15:58 +00:00
|
|
|
if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)
|
|
|
|
KASSERT(pp->sectorsize > 0,
|
|
|
|
("Provider %s lacks sectorsize", pp->name));
|
2013-03-24 03:15:20 +00:00
|
|
|
if ((cp->geom->flags & G_GEOM_WITHER) &&
|
|
|
|
cp->acr == 0 && cp->acw == 0 && cp->ace == 0)
|
|
|
|
g_do_wither();
|
2002-03-11 21:42:35 +00:00
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2002-12-16 22:33:27 +00:00
|
|
|
g_handleattr_int(struct bio *bp, const char *attribute, int val)
|
2002-03-11 21:42:35 +00:00
|
|
|
{
|
|
|
|
|
2002-06-09 10:57:34 +00:00
|
|
|
return (g_handleattr(bp, attribute, &val, sizeof val));
|
2002-03-11 21:42:35 +00:00
|
|
|
}
|
|
|
|
|
Improve ZFS N-way mirror read performance by using load and locality
information.
The existing algorithm selects a preferred leaf vdev based on offset of the zio
request modulo the number of members in the mirror. It assumes the devices are
of equal performance and that spreading the requests randomly over both drives
will be sufficient to saturate them. In practice this results in the leaf vdevs
being under utilized.
The new algorithm takes into the following additional factors:
* Load of the vdevs (number outstanding I/O requests)
* The locality of last queued I/O vs the new I/O request.
Within the locality calculation additional knowledge about the underlying vdev
is considered such as; is the device backing the vdev a rotating media device.
This results in performance increases across the board as well as significant
increases for predominantly streaming loads and for configurations which don't
have evenly performing devices.
The following are results from a setup with 3 Way Mirror with 2 x HD's and
1 x SSD from a basic test running multiple parrallel dd's.
With pre-fetch disabled (vfs.zfs.prefetch_disable=1):
== Stripe Balanced (default) ==
Read 15360MB using bs: 1048576, readers: 3, took 161 seconds @ 95 MB/s
== Load Balanced (zfslinux) ==
Read 15360MB using bs: 1048576, readers: 3, took 297 seconds @ 51 MB/s
== Load Balanced (locality freebsd) ==
Read 15360MB using bs: 1048576, readers: 3, took 54 seconds @ 284 MB/s
With pre-fetch enabled (vfs.zfs.prefetch_disable=0):
== Stripe Balanced (default) ==
Read 15360MB using bs: 1048576, readers: 3, took 91 seconds @ 168 MB/s
== Load Balanced (zfslinux) ==
Read 15360MB using bs: 1048576, readers: 3, took 108 seconds @ 142 MB/s
== Load Balanced (locality freebsd) ==
Read 15360MB using bs: 1048576, readers: 3, took 48 seconds @ 320 MB/s
In addition to the performance changes the code was also restructured, with
the help of Justin Gibbs, to provide a more logical flow which also ensures
vdevs loads are only calculated from the set of valid candidates.
The following additional sysctls where added to allow the administrator
to tune the behaviour of the load algorithm:
* vfs.zfs.vdev.mirror.rotating_inc
* vfs.zfs.vdev.mirror.rotating_seek_inc
* vfs.zfs.vdev.mirror.rotating_seek_offset
* vfs.zfs.vdev.mirror.non_rotating_inc
* vfs.zfs.vdev.mirror.non_rotating_seek_inc
These changes where based on work started by the zfsonlinux developers:
https://github.com/zfsonlinux/zfs/pull/1487
Reviewed by: gibbs, mav, will
MFC after: 2 weeks
Sponsored by: Multiplay
2013-10-23 09:54:58 +00:00
|
|
|
int
|
|
|
|
g_handleattr_uint16_t(struct bio *bp, const char *attribute, uint16_t val)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (g_handleattr(bp, attribute, &val, sizeof val));
|
|
|
|
}
|
|
|
|
|
2002-03-11 21:42:35 +00:00
|
|
|
int
|
2002-12-16 22:33:27 +00:00
|
|
|
g_handleattr_off_t(struct bio *bp, const char *attribute, off_t val)
|
2002-03-11 21:42:35 +00:00
|
|
|
{
|
|
|
|
|
2002-06-09 10:57:34 +00:00
|
|
|
return (g_handleattr(bp, attribute, &val, sizeof val));
|
2002-03-11 21:42:35 +00:00
|
|
|
}
|
|
|
|
|
2007-05-05 16:33:44 +00:00
|
|
|
int
|
2009-02-01 01:50:09 +00:00
|
|
|
g_handleattr_str(struct bio *bp, const char *attribute, const char *str)
|
2007-05-05 16:33:44 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
return (g_handleattr(bp, attribute, str, 0));
|
|
|
|
}
|
|
|
|
|
2002-03-11 21:42:35 +00:00
|
|
|
int
|
2009-02-01 01:50:09 +00:00
|
|
|
g_handleattr(struct bio *bp, const char *attribute, const void *val, int len)
|
2002-03-11 21:42:35 +00:00
|
|
|
{
|
2007-05-05 16:33:44 +00:00
|
|
|
int error = 0;
|
2002-03-11 21:42:35 +00:00
|
|
|
|
|
|
|
if (strcmp(bp->bio_attribute, attribute))
|
|
|
|
return (0);
|
2007-05-05 16:33:44 +00:00
|
|
|
if (len == 0) {
|
|
|
|
bzero(bp->bio_data, bp->bio_length);
|
|
|
|
if (strlcpy(bp->bio_data, val, bp->bio_length) >=
|
|
|
|
bp->bio_length) {
|
2018-04-05 13:56:40 +00:00
|
|
|
printf("%s: %s %s bio_length %jd strlen %zu -> EFAULT\n",
|
|
|
|
__func__, bp->bio_to->name, attribute,
|
2007-05-05 16:33:44 +00:00
|
|
|
(intmax_t)bp->bio_length, strlen(val));
|
|
|
|
error = EFAULT;
|
|
|
|
}
|
|
|
|
} else if (bp->bio_length == len) {
|
2002-03-11 21:42:35 +00:00
|
|
|
bcopy(val, bp->bio_data, len);
|
2007-05-05 16:33:44 +00:00
|
|
|
} else {
|
2018-04-05 13:56:40 +00:00
|
|
|
printf("%s: %s %s bio_length %jd len %d -> EFAULT\n", __func__,
|
|
|
|
bp->bio_to->name, attribute, (intmax_t)bp->bio_length, len);
|
2007-05-05 16:33:44 +00:00
|
|
|
error = EFAULT;
|
2002-03-11 21:42:35 +00:00
|
|
|
}
|
2009-02-03 07:07:13 +00:00
|
|
|
if (error == 0)
|
|
|
|
bp->bio_completed = bp->bio_length;
|
2002-09-30 08:54:46 +00:00
|
|
|
g_io_deliver(bp, error);
|
2002-03-11 21:42:35 +00:00
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2004-03-10 08:49:08 +00:00
|
|
|
g_std_access(struct g_provider *pp,
|
2002-03-11 21:42:35 +00:00
|
|
|
int dr __unused, int dw __unused, int de __unused)
|
|
|
|
{
|
|
|
|
|
2004-03-10 08:49:08 +00:00
|
|
|
g_topology_assert();
|
|
|
|
G_VALID_PROVIDER(pp);
|
2002-03-11 21:42:35 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
g_std_done(struct bio *bp)
|
|
|
|
{
|
|
|
|
struct bio *bp2;
|
|
|
|
|
2003-02-07 21:09:51 +00:00
|
|
|
bp2 = bp->bio_parent;
|
2002-10-09 07:11:59 +00:00
|
|
|
if (bp2->bio_error == 0)
|
|
|
|
bp2->bio_error = bp->bio_error;
|
|
|
|
bp2->bio_completed += bp->bio_completed;
|
2002-03-11 21:42:35 +00:00
|
|
|
g_destroy_bio(bp);
|
2003-02-07 23:08:24 +00:00
|
|
|
bp2->bio_inbed++;
|
|
|
|
if (bp2->bio_children == bp2->bio_inbed)
|
2002-10-09 07:11:59 +00:00
|
|
|
g_io_deliver(bp2, bp2->bio_error);
|
2002-03-11 21:42:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* XXX: maybe this is only g_slice_spoiled */
|
|
|
|
|
|
|
|
void
|
|
|
|
g_std_spoiled(struct g_consumer *cp)
|
|
|
|
{
|
2003-05-02 06:42:59 +00:00
|
|
|
struct g_geom *gp;
|
|
|
|
struct g_provider *pp;
|
2002-03-11 21:42:35 +00:00
|
|
|
|
|
|
|
g_topology_assert();
|
2004-03-10 08:49:08 +00:00
|
|
|
G_VALID_CONSUMER(cp);
|
|
|
|
g_trace(G_T_TOPOLOGY, "g_std_spoiled(%p)", cp);
|
2012-07-29 11:51:48 +00:00
|
|
|
cp->flags |= G_CF_ORPHAN;
|
2003-05-02 06:42:59 +00:00
|
|
|
g_detach(cp);
|
|
|
|
gp = cp->geom;
|
|
|
|
LIST_FOREACH(pp, &gp->provider, provider)
|
|
|
|
g_orphan_provider(pp, ENXIO);
|
|
|
|
g_destroy_consumer(cp);
|
|
|
|
if (LIST_EMPTY(&gp->provider) && LIST_EMPTY(&gp->consumer))
|
|
|
|
g_destroy_geom(gp);
|
|
|
|
else
|
|
|
|
gp->flags |= G_GEOM_WITHER;
|
2002-03-11 21:42:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Spoiling happens when a provider is opened for writing, but consumers
|
|
|
|
* which are configured by in-band data are attached (slicers for instance).
|
|
|
|
* Since the write might potentially change the in-band data, such consumers
|
|
|
|
* need to re-evaluate their existence after the writing session closes.
|
|
|
|
* We do this by (offering to) tear them down when the open for write happens
|
|
|
|
* in return for a re-taste when it closes again.
|
|
|
|
* Together with the fact that such consumers grab an 'e' bit whenever they
|
|
|
|
* are open, regardless of mode, this ends up DTRT.
|
|
|
|
*/
|
|
|
|
|
2003-04-23 20:06:38 +00:00
|
|
|
static void
|
|
|
|
g_spoil_event(void *arg, int flag)
|
|
|
|
{
|
|
|
|
struct g_provider *pp;
|
|
|
|
struct g_consumer *cp, *cp2;
|
|
|
|
|
|
|
|
g_topology_assert();
|
|
|
|
if (flag == EV_CANCEL)
|
|
|
|
return;
|
|
|
|
pp = arg;
|
2004-03-10 08:49:08 +00:00
|
|
|
G_VALID_PROVIDER(pp);
|
2014-05-19 16:08:15 +00:00
|
|
|
g_trace(G_T_TOPOLOGY, "%s %p(%s:%s:%s)", __func__, pp,
|
|
|
|
pp->geom->class->name, pp->geom->name, pp->name);
|
2003-04-23 20:06:38 +00:00
|
|
|
for (cp = LIST_FIRST(&pp->consumers); cp != NULL; cp = cp2) {
|
|
|
|
cp2 = LIST_NEXT(cp, consumers);
|
2012-07-29 11:51:48 +00:00
|
|
|
if ((cp->flags & G_CF_SPOILED) == 0)
|
2003-04-23 20:06:38 +00:00
|
|
|
continue;
|
2012-07-29 11:51:48 +00:00
|
|
|
cp->flags &= ~G_CF_SPOILED;
|
2003-04-23 20:06:38 +00:00
|
|
|
if (cp->geom->spoiled == NULL)
|
|
|
|
continue;
|
|
|
|
cp->geom->spoiled(cp);
|
|
|
|
g_topology_assert();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-03-11 21:42:35 +00:00
|
|
|
void
|
|
|
|
g_spoil(struct g_provider *pp, struct g_consumer *cp)
|
|
|
|
{
|
|
|
|
struct g_consumer *cp2;
|
|
|
|
|
|
|
|
g_topology_assert();
|
2004-03-10 08:49:08 +00:00
|
|
|
G_VALID_PROVIDER(pp);
|
|
|
|
G_VALID_CONSUMER(cp);
|
2002-03-11 21:42:35 +00:00
|
|
|
|
|
|
|
LIST_FOREACH(cp2, &pp->consumers, consumers) {
|
|
|
|
if (cp2 == cp)
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
KASSERT(cp2->acr == 0, ("spoiling cp->acr = %d", cp2->acr));
|
|
|
|
KASSERT(cp2->acw == 0, ("spoiling cp->acw = %d", cp2->acw));
|
|
|
|
*/
|
|
|
|
KASSERT(cp2->ace == 0, ("spoiling cp->ace = %d", cp2->ace));
|
2012-07-29 11:51:48 +00:00
|
|
|
cp2->flags |= G_CF_SPOILED;
|
2002-03-11 21:42:35 +00:00
|
|
|
}
|
2003-04-23 20:46:12 +00:00
|
|
|
g_post_event(g_spoil_event, pp, M_WAITOK, pp, NULL);
|
2002-03-11 21:42:35 +00:00
|
|
|
}
|
|
|
|
|
2012-07-29 11:51:48 +00:00
|
|
|
static void
|
|
|
|
g_media_changed_event(void *arg, int flag)
|
|
|
|
{
|
|
|
|
struct g_provider *pp;
|
|
|
|
int retaste;
|
|
|
|
|
|
|
|
g_topology_assert();
|
|
|
|
if (flag == EV_CANCEL)
|
|
|
|
return;
|
|
|
|
pp = arg;
|
|
|
|
G_VALID_PROVIDER(pp);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If provider was not open for writing, queue retaste after spoiling.
|
|
|
|
* If it was, retaste will happen automatically on close.
|
|
|
|
*/
|
|
|
|
retaste = (pp->acw == 0 && pp->error == 0 &&
|
|
|
|
!(pp->geom->flags & G_GEOM_WITHER));
|
|
|
|
g_spoil_event(arg, flag);
|
|
|
|
if (retaste)
|
|
|
|
g_post_event(g_new_provider_event, pp, M_WAITOK, pp, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
g_media_changed(struct g_provider *pp, int flag)
|
|
|
|
{
|
|
|
|
struct g_consumer *cp;
|
|
|
|
|
|
|
|
LIST_FOREACH(cp, &pp->consumers, consumers)
|
|
|
|
cp->flags |= G_CF_SPOILED;
|
|
|
|
return (g_post_event(g_media_changed_event, pp, flag, pp, NULL));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
g_media_gone(struct g_provider *pp, int flag)
|
|
|
|
{
|
|
|
|
struct g_consumer *cp;
|
|
|
|
|
|
|
|
LIST_FOREACH(cp, &pp->consumers, consumers)
|
|
|
|
cp->flags |= G_CF_SPOILED;
|
|
|
|
return (g_post_event(g_spoil_event, pp, flag, pp, NULL));
|
|
|
|
}
|
|
|
|
|
2002-04-09 15:13:42 +00:00
|
|
|
int
|
|
|
|
g_getattr__(const char *attr, struct g_consumer *cp, void *var, int len)
|
|
|
|
{
|
|
|
|
int error, i;
|
|
|
|
|
|
|
|
i = len;
|
|
|
|
error = g_io_getattr(attr, cp, &i, var);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
if (i != len)
|
|
|
|
return (EINVAL);
|
|
|
|
return (0);
|
|
|
|
}
|
2002-04-23 11:48:45 +00:00
|
|
|
|
2011-04-27 00:10:26 +00:00
|
|
|
static int
|
|
|
|
g_get_device_prefix_len(const char *name)
|
|
|
|
{
|
|
|
|
int len;
|
|
|
|
|
|
|
|
if (strncmp(name, "ada", 3) == 0)
|
|
|
|
len = 3;
|
|
|
|
else if (strncmp(name, "ad", 2) == 0)
|
|
|
|
len = 2;
|
|
|
|
else
|
|
|
|
return (0);
|
|
|
|
if (name[len] < '0' || name[len] > '9')
|
|
|
|
return (0);
|
|
|
|
do {
|
|
|
|
len++;
|
|
|
|
} while (name[len] >= '0' && name[len] <= '9');
|
|
|
|
return (len);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
g_compare_names(const char *namea, const char *nameb)
|
|
|
|
{
|
|
|
|
int deva, devb;
|
|
|
|
|
|
|
|
if (strcmp(namea, nameb) == 0)
|
|
|
|
return (1);
|
|
|
|
deva = g_get_device_prefix_len(namea);
|
|
|
|
if (deva == 0)
|
|
|
|
return (0);
|
|
|
|
devb = g_get_device_prefix_len(nameb);
|
|
|
|
if (devb == 0)
|
|
|
|
return (0);
|
|
|
|
if (strcmp(namea + deva, nameb + devb) == 0)
|
|
|
|
return (1);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2017-08-07 21:12:28 +00:00
|
|
|
void
|
|
|
|
g_geom_add_alias(struct g_geom *gp, const char *alias)
|
|
|
|
{
|
|
|
|
struct g_geom_alias *gap;
|
|
|
|
|
|
|
|
gap = (struct g_geom_alias *)g_malloc(
|
|
|
|
sizeof(struct g_geom_alias) + strlen(alias) + 1, M_WAITOK);
|
|
|
|
strcpy((char *)(gap + 1), alias);
|
|
|
|
gap->ga_alias = (const char *)(gap + 1);
|
|
|
|
LIST_INSERT_HEAD(&gp->aliases, gap, ga_next);
|
|
|
|
}
|
|
|
|
|
2006-09-15 16:36:45 +00:00
|
|
|
#if defined(DIAGNOSTIC) || defined(DDB)
|
2003-11-15 18:44:43 +00:00
|
|
|
/*
|
2010-04-19 20:07:35 +00:00
|
|
|
* This function walks the mesh and returns a non-zero integer if it
|
|
|
|
* finds the argument pointer is an object. The return value indicates
|
|
|
|
* which type of object it is believed to be. If topology is not locked,
|
|
|
|
* this function is potentially dangerous, but we don't assert that the
|
|
|
|
* topology lock is held when called from debugger.
|
2003-11-15 18:44:43 +00:00
|
|
|
*/
|
2004-03-10 08:49:08 +00:00
|
|
|
int
|
2003-11-15 18:44:43 +00:00
|
|
|
g_valid_obj(void const *ptr)
|
|
|
|
{
|
|
|
|
struct g_class *mp;
|
|
|
|
struct g_geom *gp;
|
|
|
|
struct g_consumer *cp;
|
|
|
|
struct g_provider *pp;
|
|
|
|
|
2010-04-19 20:07:35 +00:00
|
|
|
#ifdef KDB
|
|
|
|
if (kdb_active == 0)
|
|
|
|
#endif
|
|
|
|
g_topology_assert();
|
2009-07-01 20:16:29 +00:00
|
|
|
|
2003-11-15 18:44:43 +00:00
|
|
|
LIST_FOREACH(mp, &g_classes, class) {
|
|
|
|
if (ptr == mp)
|
|
|
|
return (1);
|
|
|
|
LIST_FOREACH(gp, &mp->geom, geom) {
|
|
|
|
if (ptr == gp)
|
2004-03-10 08:49:08 +00:00
|
|
|
return (2);
|
2003-11-15 18:44:43 +00:00
|
|
|
LIST_FOREACH(cp, &gp->consumer, consumer)
|
|
|
|
if (ptr == cp)
|
2004-03-10 08:49:08 +00:00
|
|
|
return (3);
|
2003-11-15 18:44:43 +00:00
|
|
|
LIST_FOREACH(pp, &gp->provider, provider)
|
|
|
|
if (ptr == pp)
|
2004-03-10 08:49:08 +00:00
|
|
|
return (4);
|
2003-11-15 18:44:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return(0);
|
|
|
|
}
|
2004-03-10 08:49:08 +00:00
|
|
|
#endif
|
2006-09-15 16:36:45 +00:00
|
|
|
|
|
|
|
#ifdef DDB
|
|
|
|
|
|
|
|
#define gprintf(...) do { \
|
2009-05-26 10:03:44 +00:00
|
|
|
db_printf("%*s", indent, ""); \
|
|
|
|
db_printf(__VA_ARGS__); \
|
2006-09-15 16:36:45 +00:00
|
|
|
} while (0)
|
|
|
|
#define gprintln(...) do { \
|
|
|
|
gprintf(__VA_ARGS__); \
|
2009-05-26 10:03:44 +00:00
|
|
|
db_printf("\n"); \
|
2006-09-15 16:36:45 +00:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define ADDFLAG(obj, flag, sflag) do { \
|
|
|
|
if ((obj)->flags & (flag)) { \
|
|
|
|
if (comma) \
|
|
|
|
strlcat(str, ",", size); \
|
|
|
|
strlcat(str, (sflag), size); \
|
|
|
|
comma = 1; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
static char *
|
|
|
|
provider_flags_to_string(struct g_provider *pp, char *str, size_t size)
|
|
|
|
{
|
|
|
|
int comma = 0;
|
|
|
|
|
|
|
|
bzero(str, size);
|
|
|
|
if (pp->flags == 0) {
|
|
|
|
strlcpy(str, "NONE", size);
|
|
|
|
return (str);
|
|
|
|
}
|
|
|
|
ADDFLAG(pp, G_PF_WITHER, "G_PF_WITHER");
|
|
|
|
ADDFLAG(pp, G_PF_ORPHAN, "G_PF_ORPHAN");
|
|
|
|
return (str);
|
|
|
|
}
|
|
|
|
|
|
|
|
static char *
|
|
|
|
geom_flags_to_string(struct g_geom *gp, char *str, size_t size)
|
|
|
|
{
|
|
|
|
int comma = 0;
|
|
|
|
|
|
|
|
bzero(str, size);
|
|
|
|
if (gp->flags == 0) {
|
|
|
|
strlcpy(str, "NONE", size);
|
|
|
|
return (str);
|
|
|
|
}
|
|
|
|
ADDFLAG(gp, G_GEOM_WITHER, "G_GEOM_WITHER");
|
|
|
|
return (str);
|
|
|
|
}
|
|
|
|
static void
|
|
|
|
db_show_geom_consumer(int indent, struct g_consumer *cp)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (indent == 0) {
|
|
|
|
gprintln("consumer: %p", cp);
|
|
|
|
gprintln(" class: %s (%p)", cp->geom->class->name,
|
|
|
|
cp->geom->class);
|
|
|
|
gprintln(" geom: %s (%p)", cp->geom->name, cp->geom);
|
|
|
|
if (cp->provider == NULL)
|
|
|
|
gprintln(" provider: none");
|
|
|
|
else {
|
|
|
|
gprintln(" provider: %s (%p)", cp->provider->name,
|
|
|
|
cp->provider);
|
|
|
|
}
|
|
|
|
gprintln(" access: r%dw%de%d", cp->acr, cp->acw, cp->ace);
|
2012-07-29 11:51:48 +00:00
|
|
|
gprintln(" flags: 0x%04x", cp->flags);
|
2006-09-15 16:36:45 +00:00
|
|
|
gprintln(" nstart: %u", cp->nstart);
|
|
|
|
gprintln(" nend: %u", cp->nend);
|
|
|
|
} else {
|
|
|
|
gprintf("consumer: %p (%s), access=r%dw%de%d", cp,
|
|
|
|
cp->provider != NULL ? cp->provider->name : "none",
|
|
|
|
cp->acr, cp->acw, cp->ace);
|
2012-07-29 11:51:48 +00:00
|
|
|
if (cp->flags)
|
|
|
|
db_printf(", flags=0x%04x", cp->flags);
|
2009-05-26 10:03:44 +00:00
|
|
|
db_printf("\n");
|
2006-09-15 16:36:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
db_show_geom_provider(int indent, struct g_provider *pp)
|
|
|
|
{
|
|
|
|
struct g_consumer *cp;
|
|
|
|
char flags[64];
|
|
|
|
|
|
|
|
if (indent == 0) {
|
|
|
|
gprintln("provider: %s (%p)", pp->name, pp);
|
|
|
|
gprintln(" class: %s (%p)", pp->geom->class->name,
|
|
|
|
pp->geom->class);
|
|
|
|
gprintln(" geom: %s (%p)", pp->geom->name, pp->geom);
|
|
|
|
gprintln(" mediasize: %jd", (intmax_t)pp->mediasize);
|
|
|
|
gprintln(" sectorsize: %u", pp->sectorsize);
|
2018-10-27 16:14:42 +00:00
|
|
|
gprintln(" stripesize: %ju", (uintmax_t)pp->stripesize);
|
|
|
|
gprintln(" stripeoffset: %ju", (uintmax_t)pp->stripeoffset);
|
2006-09-15 16:36:45 +00:00
|
|
|
gprintln(" access: r%dw%de%d", pp->acr, pp->acw,
|
|
|
|
pp->ace);
|
|
|
|
gprintln(" flags: %s (0x%04x)",
|
|
|
|
provider_flags_to_string(pp, flags, sizeof(flags)),
|
|
|
|
pp->flags);
|
|
|
|
gprintln(" error: %d", pp->error);
|
|
|
|
gprintln(" nstart: %u", pp->nstart);
|
|
|
|
gprintln(" nend: %u", pp->nend);
|
|
|
|
if (LIST_EMPTY(&pp->consumers))
|
|
|
|
gprintln(" consumers: none");
|
|
|
|
} else {
|
|
|
|
gprintf("provider: %s (%p), access=r%dw%de%d",
|
|
|
|
pp->name, pp, pp->acr, pp->acw, pp->ace);
|
|
|
|
if (pp->flags != 0) {
|
2009-05-26 10:03:44 +00:00
|
|
|
db_printf(", flags=%s (0x%04x)",
|
2006-09-15 16:36:45 +00:00
|
|
|
provider_flags_to_string(pp, flags, sizeof(flags)),
|
|
|
|
pp->flags);
|
|
|
|
}
|
2009-05-26 10:03:44 +00:00
|
|
|
db_printf("\n");
|
2006-09-15 16:36:45 +00:00
|
|
|
}
|
|
|
|
if (!LIST_EMPTY(&pp->consumers)) {
|
2008-05-18 21:13:10 +00:00
|
|
|
LIST_FOREACH(cp, &pp->consumers, consumers) {
|
2006-09-15 16:36:45 +00:00
|
|
|
db_show_geom_consumer(indent + 2, cp);
|
2008-05-18 21:13:10 +00:00
|
|
|
if (db_pager_quit)
|
|
|
|
break;
|
|
|
|
}
|
2006-09-15 16:36:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
db_show_geom_geom(int indent, struct g_geom *gp)
|
|
|
|
{
|
|
|
|
struct g_provider *pp;
|
|
|
|
struct g_consumer *cp;
|
|
|
|
char flags[64];
|
|
|
|
|
|
|
|
if (indent == 0) {
|
|
|
|
gprintln("geom: %s (%p)", gp->name, gp);
|
|
|
|
gprintln(" class: %s (%p)", gp->class->name, gp->class);
|
|
|
|
gprintln(" flags: %s (0x%04x)",
|
|
|
|
geom_flags_to_string(gp, flags, sizeof(flags)), gp->flags);
|
|
|
|
gprintln(" rank: %d", gp->rank);
|
|
|
|
if (LIST_EMPTY(&gp->provider))
|
|
|
|
gprintln(" providers: none");
|
|
|
|
if (LIST_EMPTY(&gp->consumer))
|
|
|
|
gprintln(" consumers: none");
|
|
|
|
} else {
|
|
|
|
gprintf("geom: %s (%p), rank=%d", gp->name, gp, gp->rank);
|
|
|
|
if (gp->flags != 0) {
|
2009-05-26 10:03:44 +00:00
|
|
|
db_printf(", flags=%s (0x%04x)",
|
2006-09-15 16:36:45 +00:00
|
|
|
geom_flags_to_string(gp, flags, sizeof(flags)),
|
|
|
|
gp->flags);
|
|
|
|
}
|
2009-05-26 10:03:44 +00:00
|
|
|
db_printf("\n");
|
2006-09-15 16:36:45 +00:00
|
|
|
}
|
|
|
|
if (!LIST_EMPTY(&gp->provider)) {
|
2008-05-18 21:13:10 +00:00
|
|
|
LIST_FOREACH(pp, &gp->provider, provider) {
|
2006-09-15 16:36:45 +00:00
|
|
|
db_show_geom_provider(indent + 2, pp);
|
2008-05-18 21:13:10 +00:00
|
|
|
if (db_pager_quit)
|
|
|
|
break;
|
|
|
|
}
|
2006-09-15 16:36:45 +00:00
|
|
|
}
|
|
|
|
if (!LIST_EMPTY(&gp->consumer)) {
|
2008-05-18 21:13:10 +00:00
|
|
|
LIST_FOREACH(cp, &gp->consumer, consumer) {
|
2006-09-15 16:36:45 +00:00
|
|
|
db_show_geom_consumer(indent + 2, cp);
|
2008-05-18 21:13:10 +00:00
|
|
|
if (db_pager_quit)
|
|
|
|
break;
|
|
|
|
}
|
2006-09-15 16:36:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
db_show_geom_class(struct g_class *mp)
|
|
|
|
{
|
|
|
|
struct g_geom *gp;
|
|
|
|
|
2009-05-26 10:03:44 +00:00
|
|
|
db_printf("class: %s (%p)\n", mp->name, mp);
|
2008-05-18 21:13:10 +00:00
|
|
|
LIST_FOREACH(gp, &mp->geom, geom) {
|
2006-09-15 16:36:45 +00:00
|
|
|
db_show_geom_geom(2, gp);
|
2008-05-18 21:13:10 +00:00
|
|
|
if (db_pager_quit)
|
|
|
|
break;
|
|
|
|
}
|
2006-09-15 16:36:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Print the GEOM topology or the given object.
|
|
|
|
*/
|
|
|
|
DB_SHOW_COMMAND(geom, db_show_geom)
|
|
|
|
{
|
|
|
|
struct g_class *mp;
|
|
|
|
|
|
|
|
if (!have_addr) {
|
|
|
|
/* No address given, print the entire topology. */
|
|
|
|
LIST_FOREACH(mp, &g_classes, class) {
|
|
|
|
db_show_geom_class(mp);
|
2009-05-26 10:03:44 +00:00
|
|
|
db_printf("\n");
|
2008-05-18 21:13:10 +00:00
|
|
|
if (db_pager_quit)
|
|
|
|
break;
|
2006-09-15 16:36:45 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
switch (g_valid_obj((void *)addr)) {
|
|
|
|
case 1:
|
|
|
|
db_show_geom_class((struct g_class *)addr);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
db_show_geom_geom(0, (struct g_geom *)addr);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
db_show_geom_consumer(0, (struct g_consumer *)addr);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
db_show_geom_provider(0, (struct g_provider *)addr);
|
|
|
|
break;
|
|
|
|
default:
|
2009-05-26 10:03:44 +00:00
|
|
|
db_printf("Not a GEOM object.\n");
|
2006-09-15 16:36:45 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-26 07:29:17 +00:00
|
|
|
static void
|
|
|
|
db_print_bio_cmd(struct bio *bp)
|
|
|
|
{
|
2009-05-26 10:03:44 +00:00
|
|
|
db_printf(" cmd: ");
|
2009-05-26 07:29:17 +00:00
|
|
|
switch (bp->bio_cmd) {
|
2009-05-26 10:03:44 +00:00
|
|
|
case BIO_READ: db_printf("BIO_READ"); break;
|
|
|
|
case BIO_WRITE: db_printf("BIO_WRITE"); break;
|
|
|
|
case BIO_DELETE: db_printf("BIO_DELETE"); break;
|
|
|
|
case BIO_GETATTR: db_printf("BIO_GETATTR"); break;
|
|
|
|
case BIO_FLUSH: db_printf("BIO_FLUSH"); break;
|
|
|
|
case BIO_CMD0: db_printf("BIO_CMD0"); break;
|
|
|
|
case BIO_CMD1: db_printf("BIO_CMD1"); break;
|
|
|
|
case BIO_CMD2: db_printf("BIO_CMD2"); break;
|
Add support for managing Shingled Magnetic Recording (SMR) drives.
This change includes support for SCSI SMR drives (which conform to the
Zoned Block Commands or ZBC spec) and ATA SMR drives (which conform to
the Zoned ATA Command Set or ZAC spec) behind SAS expanders.
This includes full management support through the GEOM BIO interface, and
through a new userland utility, zonectl(8), and through camcontrol(8).
This is now ready for filesystems to use to detect and manage zoned drives.
(There is no work in progress that I know of to use this for ZFS or UFS, if
anyone is interested, let me know and I may have some suggestions.)
Also, improve ATA command passthrough and dispatch support, both via ATA
and ATA passthrough over SCSI.
Also, add support to camcontrol(8) for the ATA Extended Power Conditions
feature set. You can now manage ATA device power states, and set various
idle time thresholds for a drive to enter lower power states.
Note that this change cannot be MFCed in full, because it depends on
changes to the struct bio API that break compatilibity. In order to
avoid breaking the stable API, only changes that don't touch or depend on
the struct bio changes can be merged. For example, the camcontrol(8)
changes don't depend on the new bio API, but zonectl(8) and the probe
changes to the da(4) and ada(4) drivers do depend on it.
Also note that the SMR changes have not yet been tested with an actual
SCSI ZBC device, or a SCSI to ATA translation layer (SAT) that supports
ZBC to ZAC translation. I have not yet gotten a suitable drive or SAT
layer, so any testing help would be appreciated. These changes have been
tested with Seagate Host Aware SATA drives attached to both SAS and SATA
controllers. Also, I do not have any SATA Host Managed devices, and I
suspect that it may take additional (hopefully minor) changes to support
them.
Thanks to Seagate for supplying the test hardware and answering questions.
sbin/camcontrol/Makefile:
Add epc.c and zone.c.
sbin/camcontrol/camcontrol.8:
Document the zone and epc subcommands.
sbin/camcontrol/camcontrol.c:
Add the zone and epc subcommands.
Add auxiliary register support to build_ata_cmd(). Make sure to
set the CAM_ATAIO_NEEDRESULT, CAM_ATAIO_DMA, and CAM_ATAIO_FPDMA
flags as appropriate for ATA commands.
Add a new get_ata_status() function to parse ATA result from SCSI
sense descriptors (for ATA passthrough over SCSI) and ATA I/O
requests.
sbin/camcontrol/camcontrol.h:
Update the build_ata_cmd() prototype
Add get_ata_status(), zone(), and epc().
sbin/camcontrol/epc.c:
Support for ATA Extended Power Conditions features. This includes
support for all features documented in the ACS-4 Revision 12
specification from t13.org (dated February 18, 2016).
The EPC feature set allows putting a drive into a power power mode
immediately, or setting timeouts so that the drive will
automatically enter progressively lower power states after various
idle times.
sbin/camcontrol/fwdownload.c:
Update the firmware download code for the new build_ata_cmd()
arguments.
sbin/camcontrol/zone.c:
Implement support for Shingled Magnetic Recording (SMR) drives
via SCSI Zoned Block Commands (ZBC) and ATA Zoned Device ATA
Command Set (ZAC).
These specs were developed in concert, and are functionally
identical. The primary differences are due to SCSI and ATA
differences. (SCSI is big endian, ATA is little endian, for
example.)
This includes support for all commands defined in the ZBC and
ZAC specs.
sys/cam/ata/ata_all.c:
Decode a number of additional ATA command names in ata_op_string().
Add a new CCB building function, ata_read_log().
Add ata_zac_mgmt_in() and ata_zac_mgmt_out() CCB building
functions. These support both DMA and NCQ encapsulation.
sys/cam/ata/ata_all.h:
Add prototypes for ata_read_log(), ata_zac_mgmt_out(), and
ata_zac_mgmt_in().
sys/cam/ata/ata_da.c:
Revamp the ada(4) driver to support zoned devices.
Add four new probe states to gather information needed for zone
support.
Add a new adasetflags() function to avoid duplication of large
blocks of flag setting between the async handler and register
functions.
Add new sysctl variables that describe zone support and paramters.
Add support for the new BIO_ZONE bio, and all of its subcommands:
DISK_ZONE_OPEN, DISK_ZONE_CLOSE, DISK_ZONE_FINISH, DISK_ZONE_RWP,
DISK_ZONE_REPORT_ZONES, and DISK_ZONE_GET_PARAMS.
sys/cam/scsi/scsi_all.c:
Add command descriptions for the ZBC IN/OUT commands.
Add descriptions for ZBC Host Managed devices.
Add a new function, scsi_ata_pass() to do ATA passthrough over
SCSI. This will eventually replace scsi_ata_pass_16() -- it
can create the 12, 16, and 32-byte variants of the ATA
PASS-THROUGH command, and supports setting all of the
registers defined as of SAT-4, Revision 5 (March 11, 2016).
Change scsi_ata_identify() to use scsi_ata_pass() instead of
scsi_ata_pass_16().
Add a new scsi_ata_read_log() function to facilitate reading
ATA logs via SCSI.
sys/cam/scsi/scsi_all.h:
Add the new ATA PASS-THROUGH(32) command CDB. Add extended and
variable CDB opcodes.
Add Zoned Block Device Characteristics VPD page.
Add ATA Return SCSI sense descriptor.
Add prototypes for scsi_ata_read_log() and scsi_ata_pass().
sys/cam/scsi/scsi_da.c:
Revamp the da(4) driver to support zoned devices.
Add five new probe states, four of which are needed for ATA
devices.
Add five new sysctl variables that describe zone support and
parameters.
The da(4) driver supports SCSI ZBC devices, as well as ATA ZAC
devices when they are attached via a SCSI to ATA Translation (SAT)
layer. Since ZBC -> ZAC translation is a new feature in the T10
SAT-4 spec, most SATA drives will be supported via ATA commands
sent via the SCSI ATA PASS-THROUGH command. The da(4) driver will
prefer the ZBC interface, if it is available, for performance
reasons, but will use the ATA PASS-THROUGH interface to the ZAC
command set if the SAT layer doesn't support translation yet.
As I mentioned above, ZBC command support is untested.
Add support for the new BIO_ZONE bio, and all of its subcommands:
DISK_ZONE_OPEN, DISK_ZONE_CLOSE, DISK_ZONE_FINISH, DISK_ZONE_RWP,
DISK_ZONE_REPORT_ZONES, and DISK_ZONE_GET_PARAMS.
Add scsi_zbc_in() and scsi_zbc_out() CCB building functions.
Add scsi_ata_zac_mgmt_out() and scsi_ata_zac_mgmt_in() CCB/CDB
building functions. Note that these have return values, unlike
almost all other CCB building functions in CAM. The reason is
that they can fail, depending upon the particular combination
of input parameters. The primary failure case is if the user
wants NCQ, but fails to specify additional CDB storage. NCQ
requires using the 32-byte version of the SCSI ATA PASS-THROUGH
command, and the current CAM CDB size is 16 bytes.
sys/cam/scsi/scsi_da.h:
Add ZBC IN and ZBC OUT CDBs and opcodes.
Add SCSI Report Zones data structures.
Add scsi_zbc_in(), scsi_zbc_out(), scsi_ata_zac_mgmt_out(), and
scsi_ata_zac_mgmt_in() prototypes.
sys/dev/ahci/ahci.c:
Fix SEND / RECEIVE FPDMA QUEUED in the ahci(4) driver.
ahci_setup_fis() previously set the top bits of the sector count
register in the FIS to 0 for FPDMA commands. This is okay for
read and write, because the PRIO field is in the only thing in
those bits, and we don't implement that further up the stack.
But, for SEND and RECEIVE FPDMA QUEUED, the subcommand is in that
byte, so it needs to be transmitted to the drive.
In ahci_setup_fis(), always set the the top 8 bits of the
sector count register. We need it in both the standard
and NCQ / FPDMA cases.
sys/geom/eli/g_eli.c:
Pass BIO_ZONE commands through the GELI class.
sys/geom/geom.h:
Add g_io_zonecmd() prototype.
sys/geom/geom_dev.c:
Add new DIOCZONECMD ioctl, which allows sending zone commands to
disks.
sys/geom/geom_disk.c:
Add support for BIO_ZONE commands.
sys/geom/geom_disk.h:
Add a new flag, DISKFLAG_CANZONE, that indicates that a given
GEOM disk client can handle BIO_ZONE commands.
sys/geom/geom_io.c:
Add a new function, g_io_zonecmd(), that handles execution of
BIO_ZONE commands.
Add permissions check for BIO_ZONE commands.
Add command decoding for BIO_ZONE commands.
sys/geom/geom_subr.c:
Add DDB command decoding for BIO_ZONE commands.
sys/kern/subr_devstat.c:
Record statistics for REPORT ZONES commands. Note that the
number of bytes transferred for REPORT ZONES won't quite match
what is received from the harware. This is because we're
necessarily counting bytes coming from the da(4) / ada(4) drivers,
which are using the disk_zone.h interface to communicate up
the stack. The structure sizes it uses are slightly different
than the SCSI and ATA structure sizes.
sys/sys/ata.h:
Add many bit and structure definitions for ZAC, NCQ, and EPC
command support.
sys/sys/bio.h:
Convert the bio_cmd field to a straight enumeration. This will
yield more space for additional commands in the future. After
change r297955 and other related changes, this is now possible.
Converting to an enumeration will also prevent use as a bitmask
in the future.
sys/sys/disk.h:
Define the DIOCZONECMD ioctl.
sys/sys/disk_zone.h:
Add a new API for managing zoned disks. This is very close to
the SCSI ZBC and ATA ZAC standards, but uses integers in native
byte order instead of big endian (SCSI) or little endian (ATA)
byte arrays.
This is intended to offer to the complete feature set of the ZBC
and ZAC disk management without requiring the application developer
to include SCSI or ATA headers. We also use one set of headers
for ioctl consumers and kernel bio-level consumers.
sys/sys/param.h:
Bump __FreeBSD_version for sys/bio.h command changes, and inclusion
of SMR support.
usr.sbin/Makefile:
Add the zonectl utility.
usr.sbin/diskinfo/diskinfo.c
Add disk zoning capability to the 'diskinfo -v' output.
usr.sbin/zonectl/Makefile:
Add zonectl makefile.
usr.sbin/zonectl/zonectl.8
zonectl(8) man page.
usr.sbin/zonectl/zonectl.c
The zonectl(8) utility. This allows managing SCSI or ATA zoned
disks via the disk_zone.h API. You can report zones, reset write
pointers, get parameters, etc.
Sponsored by: Spectra Logic
Differential Revision: https://reviews.freebsd.org/D6147
Reviewed by: wblock (documentation)
2016-05-19 14:08:36 +00:00
|
|
|
case BIO_ZONE: db_printf("BIO_ZONE"); break;
|
2009-05-26 10:03:44 +00:00
|
|
|
default: db_printf("UNKNOWN"); break;
|
2009-05-26 07:29:17 +00:00
|
|
|
}
|
2009-05-26 10:03:44 +00:00
|
|
|
db_printf("\n");
|
2009-05-26 07:29:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
db_print_bio_flags(struct bio *bp)
|
|
|
|
{
|
|
|
|
int comma;
|
|
|
|
|
|
|
|
comma = 0;
|
2009-05-26 10:03:44 +00:00
|
|
|
db_printf(" flags: ");
|
2009-05-26 07:29:17 +00:00
|
|
|
if (bp->bio_flags & BIO_ERROR) {
|
2009-05-26 10:03:44 +00:00
|
|
|
db_printf("BIO_ERROR");
|
2009-05-26 07:29:17 +00:00
|
|
|
comma = 1;
|
|
|
|
}
|
|
|
|
if (bp->bio_flags & BIO_DONE) {
|
2009-05-26 10:03:44 +00:00
|
|
|
db_printf("%sBIO_DONE", (comma ? ", " : ""));
|
2009-05-26 07:29:17 +00:00
|
|
|
comma = 1;
|
|
|
|
}
|
|
|
|
if (bp->bio_flags & BIO_ONQUEUE)
|
2009-05-26 10:03:44 +00:00
|
|
|
db_printf("%sBIO_ONQUEUE", (comma ? ", " : ""));
|
|
|
|
db_printf("\n");
|
2009-05-26 07:29:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Print useful information in a BIO
|
|
|
|
*/
|
|
|
|
DB_SHOW_COMMAND(bio, db_show_bio)
|
|
|
|
{
|
|
|
|
struct bio *bp;
|
|
|
|
|
|
|
|
if (have_addr) {
|
|
|
|
bp = (struct bio *)addr;
|
2009-05-26 10:03:44 +00:00
|
|
|
db_printf("BIO %p\n", bp);
|
2009-05-26 07:29:17 +00:00
|
|
|
db_print_bio_cmd(bp);
|
|
|
|
db_print_bio_flags(bp);
|
2016-04-14 05:10:41 +00:00
|
|
|
db_printf(" cflags: 0x%hx\n", bp->bio_cflags);
|
|
|
|
db_printf(" pflags: 0x%hx\n", bp->bio_pflags);
|
2009-05-26 14:15:06 +00:00
|
|
|
db_printf(" offset: %jd\n", (intmax_t)bp->bio_offset);
|
|
|
|
db_printf(" length: %jd\n", (intmax_t)bp->bio_length);
|
2009-05-26 10:03:44 +00:00
|
|
|
db_printf(" bcount: %ld\n", bp->bio_bcount);
|
|
|
|
db_printf(" resid: %ld\n", bp->bio_resid);
|
2009-05-26 14:15:06 +00:00
|
|
|
db_printf(" completed: %jd\n", (intmax_t)bp->bio_completed);
|
2009-05-26 10:03:44 +00:00
|
|
|
db_printf(" children: %u\n", bp->bio_children);
|
|
|
|
db_printf(" inbed: %u\n", bp->bio_inbed);
|
|
|
|
db_printf(" error: %d\n", bp->bio_error);
|
|
|
|
db_printf(" parent: %p\n", bp->bio_parent);
|
|
|
|
db_printf(" driver1: %p\n", bp->bio_driver1);
|
|
|
|
db_printf(" driver2: %p\n", bp->bio_driver2);
|
|
|
|
db_printf(" caller1: %p\n", bp->bio_caller1);
|
|
|
|
db_printf(" caller2: %p\n", bp->bio_caller2);
|
|
|
|
db_printf(" bio_from: %p\n", bp->bio_from);
|
|
|
|
db_printf(" bio_to: %p\n", bp->bio_to);
|
2016-10-31 23:09:52 +00:00
|
|
|
|
|
|
|
#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
|
|
|
|
db_printf(" bio_track_bp: %p\n", bp->bio_track_bp);
|
|
|
|
#endif
|
2009-05-26 07:29:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-09-15 16:36:45 +00:00
|
|
|
#undef gprintf
|
|
|
|
#undef gprintln
|
|
|
|
#undef ADDFLAG
|
|
|
|
|
|
|
|
#endif /* DDB */
|