Implement read-only support for volumes in optimal state (without using

redundancy) for the following RAID levels: RAID4/5E/5EE/6/MDF.
This commit is contained in:
Alexander Motin 2012-05-04 07:32:57 +00:00
parent 6632cb429f
commit 4b97ff6137
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=234993
3 changed files with 78 additions and 40 deletions

View File

@ -24,7 +24,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd April 30, 2012
.Dd May 3, 2012
.Dt GRAID 8
.Os
.Sh NAME
@ -261,9 +261,11 @@ own risk: RAID1 (3+ disks), RAID10 (6+ disks).
.Sh SUPPORTED RAID LEVELS
The GEOM RAID class follows a modular design, allowing different RAID levels
to be used.
Support for the following RAID levels is currently implemented: RAID0, RAID1,
RAID1E, RAID5, RAID10, SINGLE, CONCAT.
RAID5 support is read-only and only for volumes in optimal state.
Full support for the following RAID levels is currently implemented:
RAID0, RAID1, RAID1E, RAID10, SINGLE, CONCAT.
The following RAID levels supported as read-only for volumes in optimal
state (without using redundancy): RAID4, RAID5, RAID5E, RAID5EE, RAID6,
RAIDMDF.
.Sh RAID LEVEL MIGRATION
The GEOM RAID class has no support for RAID level migration, allowed by some
metadata formats.

View File

@ -376,17 +376,17 @@ g_raid_volume_str2level(const char *str, int *level, int *qual)
else if (strcasecmp(str, "RAID3-P0") == 0) {
*level = G_RAID_VOLUME_RL_RAID3;
*qual = G_RAID_VOLUME_RLQ_R3P0;
} else if (strcasecmp(str, "RAID3-PN") == 0 &&
} else if (strcasecmp(str, "RAID3-PN") == 0 ||
strcasecmp(str, "RAID3") == 0) {
*level = G_RAID_VOLUME_RL_RAID3;
*qual = G_RAID_VOLUME_RLQ_R3P0;
*qual = G_RAID_VOLUME_RLQ_R3PN;
} else if (strcasecmp(str, "RAID4-P0") == 0) {
*level = G_RAID_VOLUME_RL_RAID4;
*qual = G_RAID_VOLUME_RLQ_R4P0;
} else if (strcasecmp(str, "RAID4-PN") == 0 &&
} else if (strcasecmp(str, "RAID4-PN") == 0 ||
strcasecmp(str, "RAID4") == 0) {
*level = G_RAID_VOLUME_RL_RAID4;
*qual = G_RAID_VOLUME_RLQ_R4P0;
*qual = G_RAID_VOLUME_RLQ_R4PN;
} else if (strcasecmp(str, "RAID5-RA") == 0) {
*level = G_RAID_VOLUME_RL_RAID5;
*qual = G_RAID_VOLUME_RLQ_R5RA;

View File

@ -106,9 +106,16 @@ g_raid_tr_taste_raid5(struct g_raid_tr_object *tr, struct g_raid_volume *vol)
trs = (struct g_raid_tr_raid5_object *)tr;
qual = tr->tro_volume->v_raid_level_qualifier;
if (tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAID5 &&
if (tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAID4 &&
qual >= 0 && qual <= 1) {
/* RAID4 */
} else if ((tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAID5 ||
tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAID5E ||
tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAID5EE ||
tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAID6 ||
tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAIDMDF) &&
qual >= 0 && qual <= 3) {
/* RAID5 */
/* RAID5/5E/5EE/6/MDF */
} else
return (G_RAID_TR_TASTE_FAIL);
trs->trso_starting = 1;
@ -203,30 +210,55 @@ g_raid_tr_iostart_raid5_read(struct g_raid_tr_object *tr, struct bio *bp)
struct bio *cbp;
char *addr;
off_t offset, start, length, nstripe, remain;
int no, pno;
u_int strip_size, qual;
int no, pno, ddisks, pdisks;
u_int strip_size, lvl, qual;
vol = tr->tro_volume;
addr = bp->bio_data;
strip_size = vol->v_strip_size;
lvl = tr->tro_volume->v_raid_level;
qual = tr->tro_volume->v_raid_level_qualifier;
/* Stripe number. */
nstripe = bp->bio_offset / strip_size;
/* Start position in stripe. */
start = bp->bio_offset % strip_size;
/* Number of data and parity disks. */
if (lvl == G_RAID_VOLUME_RL_RAIDMDF)
pdisks = 3;
else if (lvl == G_RAID_VOLUME_RL_RAID5EE ||
lvl == G_RAID_VOLUME_RL_RAID6)
pdisks = 2;
else
pdisks = 1;
ddisks = vol->v_disks_count - pdisks;
/* Parity disk number. */
pno = nstripe / (vol->v_disks_count - 1) % vol->v_disks_count;
if (qual >= 2)
pno = (vol->v_disks_count - 1) - pno;
/* Disk number. */
no = nstripe % (vol->v_disks_count - 1);
if (qual & 1) {
no = (pno + no + 1) % vol->v_disks_count;
} else if (no >= pno)
no++;
if (lvl == G_RAID_VOLUME_RL_RAID4) {
if (qual == 0) /* P0 */
pno = 0;
else /* PN */
pno = ddisks;
} else {
pno = (nstripe / ddisks) % vol->v_disks_count;
if (qual >= 2) { /* PN/Left */
pno = ddisks - pno;
if (pno < 0)
pno += vol->v_disks_count;
}
}
/* Data disk number. */
no = nstripe % ddisks;
if (lvl == G_RAID_VOLUME_RL_RAID4) {
if (qual == 0)
no += pdisks;
} else if (qual & 1) { /* Continuation/Symmetric */
no = (pno + pdisks + no) % vol->v_disks_count;
} else if (no >= pno) /* Restart/Asymmetric */
no += pdisks;
else
no += imax(0, pno + pdisks - vol->v_disks_count);
/* Stripe start position in disk. */
offset = (nstripe / (vol->v_disks_count - 1)) * strip_size;
offset = (nstripe / ddisks) * strip_size;
/* Length of data to operate. */
remain = bp->bio_length;
@ -242,33 +274,37 @@ g_raid_tr_iostart_raid5_read(struct g_raid_tr_object *tr, struct bio *bp)
cbp->bio_caller1 = &vol->v_subdisks[no];
bioq_insert_tail(&queue, cbp);
no++;
if (qual & 1) {
if (lvl == G_RAID_VOLUME_RL_RAID4) {
no %= vol->v_disks_count;
if (no == pno)
no = (no + pdisks) % vol->v_disks_count;
} else if (qual & 1) { /* Continuation/Symmetric */
no %= vol->v_disks_count;
if (no == pno) {
if (qual < 2) {
pno = (pno + 1) % vol->v_disks_count;
no = (no + 2) % vol->v_disks_count;
} else if (pno == 0)
pno = vol->v_disks_count - 1;
else
pno--;
if (qual < 2) /* P0/Right */
pno++;
else /* PN/Left */
pno += vol->v_disks_count - 1;
pno %= vol->v_disks_count;
no = (pno + pdisks) % vol->v_disks_count;
offset += strip_size;
}
} else {
} else { /* Restart/Asymmetric */
if (no == pno)
no++;
no += pdisks;
if (no >= vol->v_disks_count) {
no %= vol->v_disks_count;
if (qual < 2)
pno = (pno + 1) % vol->v_disks_count;
else if (pno == 0)
pno = vol->v_disks_count - 1;
no -= vol->v_disks_count;
if (qual < 2) /* P0/Right */
pno++;
else /* PN/Left */
pno += vol->v_disks_count - 1;
pno %= vol->v_disks_count;
if (no == pno)
no += pdisks;
else
pno--;
no += imax(0, pno + pdisks - vol->v_disks_count);
offset += strip_size;
}
if (no == pno)
no++;
}
remain -= length;
addr += length;