2000-10-13 13:04:45 +00:00
|
|
|
|
/*-
|
2003-02-20 20:02:32 +00:00
|
|
|
|
* Copyright (c) 2000 - 2003 S<EFBFBD>ren Schmidt <sos@FreeBSD.org>
|
2000-10-13 13:04:45 +00:00
|
|
|
|
* All rights reserved.
|
|
|
|
|
*
|
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
|
* are met:
|
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
|
* notice, this list of conditions and the following disclaimer,
|
|
|
|
|
* without modification, immediately at the beginning of the file.
|
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
|
* 3. The name of the author may not be used to endorse or promote products
|
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
|
*
|
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
*/
|
|
|
|
|
|
2003-08-24 17:55:58 +00:00
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
|
2000-10-13 13:04:45 +00:00
|
|
|
|
#include "opt_ata.h"
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
|
#include <sys/systm.h>
|
2001-03-15 15:36:25 +00:00
|
|
|
|
#include <sys/ata.h>
|
2000-10-13 13:04:45 +00:00
|
|
|
|
#include <sys/kernel.h>
|
2002-03-15 15:39:54 +00:00
|
|
|
|
#include <sys/proc.h>
|
2000-10-13 13:04:45 +00:00
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
|
#include <sys/bio.h>
|
|
|
|
|
#include <sys/bus.h>
|
|
|
|
|
#include <sys/conf.h>
|
|
|
|
|
#include <sys/disk.h>
|
|
|
|
|
#include <sys/cons.h>
|
2002-03-15 15:39:54 +00:00
|
|
|
|
#include <sys/unistd.h>
|
|
|
|
|
#include <sys/kthread.h>
|
2003-08-24 09:22:26 +00:00
|
|
|
|
#include <sys/taskqueue.h>
|
2000-10-13 13:04:45 +00:00
|
|
|
|
#include <machine/bus.h>
|
2001-02-06 16:44:25 +00:00
|
|
|
|
#include <sys/rman.h>
|
2003-04-01 15:06:26 +00:00
|
|
|
|
#include <geom/geom_disk.h>
|
2003-08-24 09:22:26 +00:00
|
|
|
|
#include <dev/pci/pcivar.h>
|
|
|
|
|
#include <dev/pci/pcireg.h>
|
2000-10-13 13:04:45 +00:00
|
|
|
|
#include <dev/ata/ata-all.h>
|
2003-02-20 20:02:32 +00:00
|
|
|
|
#include <dev/ata/ata-pci.h>
|
2000-10-13 13:04:45 +00:00
|
|
|
|
#include <dev/ata/ata-disk.h>
|
|
|
|
|
#include <dev/ata/ata-raid.h>
|
|
|
|
|
|
|
|
|
|
/* device structures */
|
2003-02-25 15:33:36 +00:00
|
|
|
|
static disk_strategy_t arstrategy;
|
2003-05-04 12:16:47 +00:00
|
|
|
|
static dumper_t ardump;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
|
|
|
|
|
/* prototypes */
|
2002-03-27 10:58:59 +00:00
|
|
|
|
static void ar_attach_raid(struct ar_softc *, int);
|
2000-10-13 13:04:45 +00:00
|
|
|
|
static void ar_done(struct bio *);
|
2002-02-12 11:35:15 +00:00
|
|
|
|
static void ar_config_changed(struct ar_softc *, int);
|
2002-03-15 15:39:54 +00:00
|
|
|
|
static void ar_rebuild(void *);
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
static int ar_highpoint_read_conf(struct ad_softc *, struct ar_softc **);
|
2002-02-12 11:35:15 +00:00
|
|
|
|
static int ar_highpoint_write_conf(struct ar_softc *);
|
2002-03-27 10:58:59 +00:00
|
|
|
|
static int ar_promise_read_conf(struct ad_softc *, struct ar_softc **, int);
|
2002-02-12 11:35:15 +00:00
|
|
|
|
static int ar_promise_write_conf(struct ar_softc *);
|
2002-03-03 15:36:21 +00:00
|
|
|
|
static int ar_rw(struct ad_softc *, u_int32_t, int, caddr_t, int);
|
2002-03-27 10:58:59 +00:00
|
|
|
|
static struct ata_device *ar_locate_disk(int);
|
2003-04-08 07:48:52 +00:00
|
|
|
|
static void ar_print_conf(struct ar_softc *);
|
2000-10-13 13:04:45 +00:00
|
|
|
|
|
|
|
|
|
/* internal vars */
|
2002-02-12 11:35:15 +00:00
|
|
|
|
static struct ar_softc **ar_table = NULL;
|
2000-12-08 20:09:00 +00:00
|
|
|
|
static MALLOC_DEFINE(M_AR, "AR driver", "ATA RAID driver");
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
|
2000-10-13 13:04:45 +00:00
|
|
|
|
int
|
2002-03-03 15:36:21 +00:00
|
|
|
|
ata_raiddisk_attach(struct ad_softc *adp)
|
|
|
|
|
{
|
|
|
|
|
struct ar_softc *rdp;
|
|
|
|
|
int array, disk;
|
|
|
|
|
|
|
|
|
|
if (ar_table) {
|
|
|
|
|
for (array = 0; array < MAX_ARRAYS; array++) {
|
|
|
|
|
if (!(rdp = ar_table[array]) || !rdp->flags)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
for (disk = 0; disk < rdp->total_disks; disk++) {
|
2002-04-10 11:18:07 +00:00
|
|
|
|
if ((rdp->disks[disk].flags & AR_DF_ASSIGNED) &&
|
|
|
|
|
rdp->disks[disk].device == adp->device) {
|
2002-03-03 15:36:21 +00:00
|
|
|
|
ata_prtdev(rdp->disks[disk].device,
|
|
|
|
|
"inserted into ar%d disk%d as spare\n",
|
|
|
|
|
array, disk);
|
2002-04-10 11:18:07 +00:00
|
|
|
|
rdp->disks[disk].flags |= (AR_DF_PRESENT | AR_DF_SPARE);
|
2003-04-07 14:14:29 +00:00
|
|
|
|
AD_SOFTC(rdp->disks[disk])->flags |= AD_F_RAID_SUBDISK;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
ar_config_changed(rdp, 1);
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2002-02-12 11:35:15 +00:00
|
|
|
|
if (!ar_table)
|
|
|
|
|
ar_table = malloc(sizeof(struct ar_soft *) * MAX_ARRAYS,
|
|
|
|
|
M_AR, M_NOWAIT | M_ZERO);
|
|
|
|
|
if (!ar_table) {
|
|
|
|
|
ata_prtdev(adp->device, "no memory for ATA raid array\n");
|
2002-03-03 15:36:21 +00:00
|
|
|
|
return 0;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
2003-08-24 09:22:26 +00:00
|
|
|
|
switch(pci_get_vendor(device_get_parent(adp->device->channel->dev))) {
|
2003-02-20 20:02:32 +00:00
|
|
|
|
case ATA_PROMISE_ID:
|
2002-02-12 11:35:15 +00:00
|
|
|
|
/* test RAID bit in PCI reg XXX */
|
2002-03-27 10:58:59 +00:00
|
|
|
|
return (ar_promise_read_conf(adp, ar_table, 0));
|
2000-10-13 13:04:45 +00:00
|
|
|
|
|
2003-02-20 20:02:32 +00:00
|
|
|
|
case ATA_HIGHPOINT_ID:
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
return (ar_highpoint_read_conf(adp, ar_table));
|
2002-03-27 10:58:59 +00:00
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
return (ar_promise_read_conf(adp, ar_table, 1));
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
2002-03-03 15:36:21 +00:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
|
ata_raiddisk_detach(struct ad_softc *adp)
|
|
|
|
|
{
|
|
|
|
|
struct ar_softc *rdp;
|
|
|
|
|
int array, disk;
|
|
|
|
|
|
|
|
|
|
if (ar_table) {
|
|
|
|
|
for (array = 0; array < MAX_ARRAYS; array++) {
|
|
|
|
|
if (!(rdp = ar_table[array]) || !rdp->flags)
|
|
|
|
|
continue;
|
|
|
|
|
for (disk = 0; disk < rdp->total_disks; disk++) {
|
|
|
|
|
if (rdp->disks[disk].device == adp->device) {
|
|
|
|
|
ata_prtdev(rdp->disks[disk].device,
|
|
|
|
|
"deleted from ar%d disk%d\n", array, disk);
|
|
|
|
|
rdp->disks[disk].flags &= ~(AR_DF_PRESENT | AR_DF_ONLINE);
|
2002-03-27 10:58:59 +00:00
|
|
|
|
AD_SOFTC(rdp->disks[disk])->flags &= ~AD_F_RAID_SUBDISK;
|
2003-04-07 14:14:29 +00:00
|
|
|
|
rdp->disks[disk].device = NULL;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
ar_config_changed(rdp, 1);
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
void
|
2002-02-12 11:35:15 +00:00
|
|
|
|
ata_raid_attach()
|
2000-10-13 13:04:45 +00:00
|
|
|
|
{
|
2002-03-03 15:36:21 +00:00
|
|
|
|
struct ar_softc *rdp;
|
2002-03-27 10:58:59 +00:00
|
|
|
|
int array;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
|
2002-02-16 08:10:24 +00:00
|
|
|
|
if (!ar_table)
|
|
|
|
|
return;
|
|
|
|
|
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
for (array = 0; array < MAX_ARRAYS; array++) {
|
2002-03-03 15:36:21 +00:00
|
|
|
|
if (!(rdp = ar_table[array]) || !rdp->flags)
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
continue;
|
2003-04-08 07:48:52 +00:00
|
|
|
|
if (bootverbose)
|
|
|
|
|
ar_print_conf(rdp);
|
2002-03-27 10:58:59 +00:00
|
|
|
|
ar_attach_raid(rdp, 0);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
ar_attach_raid(struct ar_softc *rdp, int update)
|
|
|
|
|
{
|
|
|
|
|
int disk;
|
|
|
|
|
|
|
|
|
|
ar_config_changed(rdp, update);
|
2003-02-25 15:33:36 +00:00
|
|
|
|
rdp->disk.d_strategy = arstrategy;
|
2003-05-04 12:16:47 +00:00
|
|
|
|
rdp->disk.d_dump = ardump;
|
2003-02-25 15:33:36 +00:00
|
|
|
|
rdp->disk.d_name = "ar";
|
|
|
|
|
rdp->disk.d_sectorsize = DEV_BSIZE;
|
|
|
|
|
rdp->disk.d_mediasize = (off_t)rdp->total_sectors * DEV_BSIZE;
|
|
|
|
|
rdp->disk.d_fwsectors = rdp->sectors;
|
|
|
|
|
rdp->disk.d_fwheads = rdp->heads;
|
2003-09-02 13:26:02 +00:00
|
|
|
|
rdp->disk.d_maxsize = 128 * DEV_BSIZE;
|
2003-02-25 15:33:36 +00:00
|
|
|
|
rdp->disk.d_drv1 = rdp;
|
|
|
|
|
disk_create(rdp->lun, &rdp->disk, 0, NULL, NULL);
|
2002-03-27 10:58:59 +00:00
|
|
|
|
|
|
|
|
|
printf("ar%d: %lluMB <ATA ", rdp->lun, (unsigned long long)
|
|
|
|
|
(rdp->total_sectors / ((1024L * 1024L) / DEV_BSIZE)));
|
|
|
|
|
switch (rdp->flags & (AR_F_RAID0 | AR_F_RAID1 | AR_F_SPAN)) {
|
|
|
|
|
case AR_F_RAID0:
|
|
|
|
|
printf("RAID0 "); break;
|
|
|
|
|
case AR_F_RAID1:
|
|
|
|
|
printf("RAID1 "); break;
|
|
|
|
|
case AR_F_SPAN:
|
|
|
|
|
printf("SPAN "); break;
|
|
|
|
|
case (AR_F_RAID0 | AR_F_RAID1):
|
|
|
|
|
printf("RAID0+1 "); break;
|
|
|
|
|
default:
|
|
|
|
|
printf("unknown 0x%x> ", rdp->flags);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
printf("array> [%d/%d/%d] status: ",
|
|
|
|
|
rdp->cylinders, rdp->heads, rdp->sectors);
|
|
|
|
|
switch (rdp->flags & (AR_F_DEGRADED | AR_F_READY)) {
|
|
|
|
|
case AR_F_READY:
|
|
|
|
|
printf("READY");
|
|
|
|
|
break;
|
|
|
|
|
case (AR_F_DEGRADED | AR_F_READY):
|
|
|
|
|
printf("DEGRADED");
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
printf("BROKEN");
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
printf(" subdisks:\n");
|
|
|
|
|
for (disk = 0; disk < rdp->total_disks; disk++) {
|
2003-05-02 12:41:44 +00:00
|
|
|
|
if (rdp->disks[disk].device &&
|
|
|
|
|
AD_SOFTC(rdp->disks[disk])->flags & AD_F_RAID_SUBDISK) {
|
|
|
|
|
if (rdp->disks[disk].flags & AR_DF_PRESENT) {
|
|
|
|
|
if (rdp->disks[disk].flags & AR_DF_ONLINE)
|
|
|
|
|
printf(" disk%d READY ", disk);
|
|
|
|
|
else if (rdp->disks[disk].flags & AR_DF_SPARE)
|
|
|
|
|
printf(" disk%d SPARE ", disk);
|
|
|
|
|
else
|
|
|
|
|
printf(" disk%d FREE ", disk);
|
2003-08-24 09:22:26 +00:00
|
|
|
|
printf("on %s at ata%d-%s\n", rdp->disks[disk].device->name,
|
2003-05-02 12:41:44 +00:00
|
|
|
|
device_get_unit(rdp->disks[disk].device->channel->dev),
|
|
|
|
|
(rdp->disks[disk].device->unit == ATA_MASTER) ?
|
|
|
|
|
"master" : "slave");
|
|
|
|
|
}
|
|
|
|
|
else if (rdp->disks[disk].flags & AR_DF_ASSIGNED)
|
|
|
|
|
printf(" disk%d DOWN\n", disk);
|
2002-04-10 11:18:07 +00:00
|
|
|
|
else
|
2003-05-02 12:41:44 +00:00
|
|
|
|
printf(" disk%d INVALID no RAID config on this disk\n", disk);
|
2002-04-10 11:18:07 +00:00
|
|
|
|
}
|
2002-03-27 10:58:59 +00:00
|
|
|
|
else
|
2003-05-02 12:41:44 +00:00
|
|
|
|
printf(" disk%d DOWN no device found for this disk\n", disk);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
|
ata_raid_addspare(int array, int disk)
|
|
|
|
|
{
|
|
|
|
|
struct ar_softc *rdp;
|
|
|
|
|
struct ata_device *atadev;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
if (!ar_table || !(rdp = ar_table[array]))
|
|
|
|
|
return ENXIO;
|
|
|
|
|
if (!(rdp->flags & AR_F_RAID1))
|
|
|
|
|
return EPERM;
|
|
|
|
|
if (rdp->flags & AR_F_REBUILDING)
|
|
|
|
|
return EBUSY;
|
|
|
|
|
if (!(rdp->flags & AR_F_DEGRADED) || !(rdp->flags & AR_F_READY))
|
|
|
|
|
return ENXIO;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < rdp->total_disks; i++ ) {
|
|
|
|
|
if (((rdp->disks[i].flags & (AR_DF_PRESENT | AR_DF_ONLINE)) ==
|
|
|
|
|
(AR_DF_PRESENT | AR_DF_ONLINE)) && rdp->disks[i].device)
|
|
|
|
|
continue;
|
|
|
|
|
if ((atadev = ar_locate_disk(disk))) {
|
2003-08-24 09:22:26 +00:00
|
|
|
|
if (((struct ad_softc*)(atadev->softc))->flags & AD_F_RAID_SUBDISK)
|
2003-05-02 12:41:44 +00:00
|
|
|
|
return EBUSY;
|
|
|
|
|
rdp->disks[i].device = atadev;
|
|
|
|
|
rdp->disks[i].flags |= (AR_DF_PRESENT|AR_DF_ASSIGNED|AR_DF_SPARE);
|
|
|
|
|
AD_SOFTC(rdp->disks[i])->flags |= AD_F_RAID_SUBDISK;
|
|
|
|
|
ata_prtdev(rdp->disks[i].device,
|
|
|
|
|
"inserted into ar%d disk%d as spare\n", array, i);
|
|
|
|
|
ar_config_changed(rdp, 1);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2002-03-27 10:58:59 +00:00
|
|
|
|
}
|
2003-05-02 12:41:44 +00:00
|
|
|
|
return ENXIO;
|
2002-03-27 10:58:59 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
|
ata_raid_create(struct raid_setup *setup)
|
|
|
|
|
{
|
|
|
|
|
struct ata_device *atadev;
|
|
|
|
|
struct ar_softc *rdp;
|
|
|
|
|
int array, disk;
|
|
|
|
|
int ctlr = 0, disk_size = 0, total_disks = 0;
|
|
|
|
|
|
|
|
|
|
if (!ar_table)
|
|
|
|
|
ar_table = malloc(sizeof(struct ar_soft *) * MAX_ARRAYS,
|
|
|
|
|
M_AR, M_NOWAIT | M_ZERO);
|
|
|
|
|
if (!ar_table) {
|
|
|
|
|
printf("ar: no memory for ATA raid array\n");
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
for (array = 0; array < MAX_ARRAYS; array++) {
|
|
|
|
|
if (!ar_table[array])
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
break;
|
2002-03-27 10:58:59 +00:00
|
|
|
|
}
|
|
|
|
|
if (array >= MAX_ARRAYS)
|
|
|
|
|
return ENOSPC;
|
|
|
|
|
|
|
|
|
|
if (!(rdp = (struct ar_softc*)malloc(sizeof(struct ar_softc), M_AR,
|
|
|
|
|
M_NOWAIT | M_ZERO))) {
|
|
|
|
|
printf("ar%d: failed to allocate raid config storage\n", array);
|
|
|
|
|
return ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (disk = 0; disk < setup->total_disks; disk++) {
|
|
|
|
|
if ((atadev = ar_locate_disk(setup->disks[disk]))) {
|
|
|
|
|
rdp->disks[disk].device = atadev;
|
|
|
|
|
if (AD_SOFTC(rdp->disks[disk])->flags & AD_F_RAID_SUBDISK) {
|
|
|
|
|
setup->disks[disk] = -1;
|
|
|
|
|
free(rdp, M_AR);
|
|
|
|
|
return EBUSY;
|
|
|
|
|
}
|
|
|
|
|
|
2003-08-24 09:22:26 +00:00
|
|
|
|
switch(pci_get_vendor(device_get_parent(
|
|
|
|
|
rdp->disks[disk].device->channel->dev))) {
|
2003-02-20 20:02:32 +00:00
|
|
|
|
case ATA_HIGHPOINT_ID:
|
2002-03-27 10:58:59 +00:00
|
|
|
|
ctlr |= AR_F_HIGHPOINT_RAID;
|
|
|
|
|
rdp->disks[disk].disk_sectors =
|
|
|
|
|
AD_SOFTC(rdp->disks[disk])->total_secs;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
ctlr |= AR_F_FREEBSD_RAID;
|
|
|
|
|
/* FALLTHROUGH */
|
|
|
|
|
|
2003-02-20 20:02:32 +00:00
|
|
|
|
case ATA_PROMISE_ID:
|
2002-03-27 10:58:59 +00:00
|
|
|
|
ctlr |= AR_F_PROMISE_RAID;
|
|
|
|
|
rdp->disks[disk].disk_sectors =
|
|
|
|
|
PR_LBA(AD_SOFTC(rdp->disks[disk]));
|
|
|
|
|
break;
|
|
|
|
|
}
|
2003-08-24 09:22:26 +00:00
|
|
|
|
|
2002-03-27 10:58:59 +00:00
|
|
|
|
if (rdp->flags & (AR_F_PROMISE_RAID|AR_F_HIGHPOINT_RAID) &&
|
2002-04-10 11:18:07 +00:00
|
|
|
|
(rdp->flags & (AR_F_PROMISE_RAID|AR_F_HIGHPOINT_RAID)) !=
|
|
|
|
|
(ctlr & (AR_F_PROMISE_RAID|AR_F_HIGHPOINT_RAID))) {
|
2002-03-27 10:58:59 +00:00
|
|
|
|
free(rdp, M_AR);
|
|
|
|
|
return EXDEV;
|
|
|
|
|
}
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
else
|
2002-03-27 10:58:59 +00:00
|
|
|
|
rdp->flags |= ctlr;
|
|
|
|
|
|
|
|
|
|
if (disk_size)
|
2003-02-20 20:02:32 +00:00
|
|
|
|
disk_size = min(rdp->disks[disk].disk_sectors, disk_size);
|
2002-03-27 10:58:59 +00:00
|
|
|
|
else
|
|
|
|
|
disk_size = rdp->disks[disk].disk_sectors;
|
|
|
|
|
rdp->disks[disk].flags =
|
|
|
|
|
(AR_DF_PRESENT | AR_DF_ASSIGNED | AR_DF_ONLINE);
|
|
|
|
|
|
|
|
|
|
total_disks++;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
}
|
2002-03-27 10:58:59 +00:00
|
|
|
|
else {
|
|
|
|
|
setup->disks[disk] = -1;
|
|
|
|
|
free(rdp, M_AR);
|
|
|
|
|
return ENXIO;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (!total_disks) {
|
|
|
|
|
free(rdp, M_AR);
|
|
|
|
|
return ENODEV;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (setup->type) {
|
|
|
|
|
case 1:
|
|
|
|
|
rdp->flags |= AR_F_RAID0;
|
|
|
|
|
break;
|
|
|
|
|
case 2:
|
|
|
|
|
rdp->flags |= AR_F_RAID1;
|
|
|
|
|
if (total_disks != 2) {
|
|
|
|
|
free(rdp, M_AR);
|
|
|
|
|
return EPERM;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case 3:
|
|
|
|
|
rdp->flags |= (AR_F_RAID0 | AR_F_RAID1);
|
|
|
|
|
if (total_disks % 2 != 0) {
|
|
|
|
|
free(rdp, M_AR);
|
|
|
|
|
return EPERM;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case 4:
|
|
|
|
|
rdp->flags |= AR_F_SPAN;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (disk = 0; disk < total_disks; disk++)
|
2003-04-07 14:14:29 +00:00
|
|
|
|
AD_SOFTC(rdp->disks[disk])->flags |= AD_F_RAID_SUBDISK;
|
2002-03-27 10:58:59 +00:00
|
|
|
|
|
|
|
|
|
rdp->lun = array;
|
|
|
|
|
if (rdp->flags & AR_F_RAID0) {
|
|
|
|
|
int bit = 0;
|
|
|
|
|
|
|
|
|
|
while (setup->interleave >>= 1)
|
|
|
|
|
bit++;
|
|
|
|
|
if (rdp->flags & AR_F_PROMISE_RAID)
|
|
|
|
|
rdp->interleave = min(max(2, 1 << bit), 2048);
|
|
|
|
|
if (rdp->flags & AR_F_HIGHPOINT_RAID)
|
|
|
|
|
rdp->interleave = min(max(32, 1 << bit), 128);
|
|
|
|
|
}
|
|
|
|
|
rdp->total_disks = total_disks;
|
|
|
|
|
rdp->width = total_disks / ((rdp->flags & AR_F_RAID1) ? 2 : 1);
|
|
|
|
|
rdp->total_sectors = disk_size * rdp->width;
|
|
|
|
|
rdp->heads = 255;
|
|
|
|
|
rdp->sectors = 63;
|
|
|
|
|
rdp->cylinders = rdp->total_sectors / (255 * 63);
|
|
|
|
|
if (rdp->flags & AR_F_PROMISE_RAID) {
|
|
|
|
|
rdp->offset = 0;
|
|
|
|
|
rdp->reserved = 63;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
2002-03-27 10:58:59 +00:00
|
|
|
|
if (rdp->flags & AR_F_HIGHPOINT_RAID) {
|
|
|
|
|
rdp->offset = HPT_LBA + 1;
|
|
|
|
|
rdp->reserved = HPT_LBA + 1;
|
|
|
|
|
}
|
|
|
|
|
rdp->lock_start = rdp->lock_end = 0xffffffff;
|
|
|
|
|
rdp->flags |= AR_F_READY;
|
|
|
|
|
|
|
|
|
|
ar_table[array] = rdp;
|
2003-05-02 12:41:44 +00:00
|
|
|
|
#if 0
|
2003-02-20 20:02:32 +00:00
|
|
|
|
/* kick off rebuild here */
|
|
|
|
|
if (setup->type == 2) {
|
|
|
|
|
rdp->disks[1].flags &= ~AR_DF_ONLINE;
|
|
|
|
|
rdp->disks[1].flags |= AR_DF_SPARE;
|
|
|
|
|
}
|
2003-05-02 12:41:44 +00:00
|
|
|
|
#endif
|
2002-03-27 10:58:59 +00:00
|
|
|
|
ar_attach_raid(rdp, 1);
|
2003-02-20 20:02:32 +00:00
|
|
|
|
ata_raid_rebuild(array);
|
2002-03-27 10:58:59 +00:00
|
|
|
|
setup->unit = array;
|
|
|
|
|
return 0;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
2002-03-27 10:58:59 +00:00
|
|
|
|
int
|
|
|
|
|
ata_raid_delete(int array)
|
|
|
|
|
{
|
|
|
|
|
struct ar_softc *rdp;
|
|
|
|
|
int disk;
|
|
|
|
|
|
|
|
|
|
if (!ar_table) {
|
|
|
|
|
printf("ar: no memory for ATA raid array\n");
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
if (!(rdp = ar_table[array]))
|
|
|
|
|
return ENXIO;
|
|
|
|
|
|
|
|
|
|
rdp->flags &= ~AR_F_READY;
|
|
|
|
|
for (disk = 0; disk < rdp->total_disks; disk++) {
|
2002-03-28 11:48:36 +00:00
|
|
|
|
if ((rdp->disks[disk].flags&AR_DF_PRESENT) && rdp->disks[disk].device) {
|
2002-03-27 10:58:59 +00:00
|
|
|
|
AD_SOFTC(rdp->disks[disk])->flags &= ~AD_F_RAID_SUBDISK;
|
2003-08-24 09:22:26 +00:00
|
|
|
|
/* SOS
|
2003-01-27 09:04:29 +00:00
|
|
|
|
ata_enclosure_leds(rdp->disks[disk].device, ATA_LED_GREEN);
|
2003-08-24 09:22:26 +00:00
|
|
|
|
XXX */
|
2002-03-27 10:58:59 +00:00
|
|
|
|
rdp->disks[disk].flags = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (rdp->flags & AR_F_PROMISE_RAID)
|
|
|
|
|
ar_promise_write_conf(rdp);
|
|
|
|
|
else
|
|
|
|
|
ar_highpoint_write_conf(rdp);
|
2003-02-21 15:13:26 +00:00
|
|
|
|
disk_destroy(&rdp->disk);
|
2002-03-27 10:58:59 +00:00
|
|
|
|
free(rdp, M_AR);
|
|
|
|
|
ar_table[array] = NULL;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2002-04-02 13:48:17 +00:00
|
|
|
|
|
|
|
|
|
int
|
|
|
|
|
ata_raid_status(int array, struct raid_status *status)
|
|
|
|
|
{
|
|
|
|
|
struct ar_softc *rdp;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
if (!ar_table || !(rdp = ar_table[array]))
|
|
|
|
|
return ENXIO;
|
|
|
|
|
|
|
|
|
|
switch (rdp->flags & (AR_F_RAID0 | AR_F_RAID1 | AR_F_SPAN)) {
|
|
|
|
|
case AR_F_RAID0:
|
|
|
|
|
status->type = AR_RAID0;
|
|
|
|
|
break;
|
|
|
|
|
case AR_F_RAID1:
|
|
|
|
|
status->type = AR_RAID1;
|
|
|
|
|
break;
|
|
|
|
|
case AR_F_RAID0 | AR_F_RAID1:
|
|
|
|
|
status->type = AR_RAID0 | AR_RAID1;
|
|
|
|
|
break;
|
|
|
|
|
case AR_F_SPAN:
|
|
|
|
|
status->type = AR_SPAN;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
status->total_disks = rdp->total_disks;
|
|
|
|
|
for (i = 0; i < rdp->total_disks; i++ ) {
|
|
|
|
|
if ((rdp->disks[i].flags & AR_DF_PRESENT) && rdp->disks[i].device)
|
|
|
|
|
status->disks[i] = AD_SOFTC(rdp->disks[i])->lun;
|
|
|
|
|
else
|
|
|
|
|
status->disks[i] = -1;
|
|
|
|
|
}
|
|
|
|
|
status->interleave = rdp->interleave;
|
|
|
|
|
status->status = 0;
|
|
|
|
|
if (rdp->flags & AR_F_READY)
|
|
|
|
|
status->status |= AR_READY;
|
|
|
|
|
if (rdp->flags & AR_F_DEGRADED)
|
|
|
|
|
status->status |= AR_DEGRADED;
|
|
|
|
|
if (rdp->flags & AR_F_REBUILDING) {
|
|
|
|
|
status->status |= AR_REBUILDING;
|
|
|
|
|
status->progress = 100*rdp->lock_start/(rdp->total_sectors/rdp->width);
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2002-03-03 15:36:21 +00:00
|
|
|
|
int
|
|
|
|
|
ata_raid_rebuild(int array)
|
|
|
|
|
{
|
|
|
|
|
struct ar_softc *rdp;
|
|
|
|
|
|
|
|
|
|
if (!ar_table || !(rdp = ar_table[array]))
|
|
|
|
|
return ENXIO;
|
2002-03-15 15:39:54 +00:00
|
|
|
|
if (rdp->flags & AR_F_REBUILDING)
|
|
|
|
|
return EBUSY;
|
2002-10-02 07:44:29 +00:00
|
|
|
|
return kthread_create(ar_rebuild, rdp, &rdp->pid, RFNOWAIT, 0,
|
2002-03-15 15:39:54 +00:00
|
|
|
|
"rebuilding ar%d", array);
|
2002-03-03 15:36:21 +00:00
|
|
|
|
}
|
|
|
|
|
|
2003-05-04 12:16:47 +00:00
|
|
|
|
static int
|
|
|
|
|
ardump(void *arg, void *virtual, vm_offset_t physical,
|
|
|
|
|
off_t offset, size_t length)
|
|
|
|
|
{
|
|
|
|
|
struct ar_softc *rdp;
|
|
|
|
|
struct disk *dp, *ap;
|
|
|
|
|
vm_offset_t pdata;
|
|
|
|
|
caddr_t vdata;
|
|
|
|
|
int blkno, count, chunk, error1, error2, lba, lbs, tmplba;
|
|
|
|
|
int drv = 0;
|
|
|
|
|
|
|
|
|
|
dp = arg;
|
|
|
|
|
rdp = dp->d_drv1;
|
|
|
|
|
if (!rdp || !(rdp->flags & AR_F_READY))
|
|
|
|
|
return ENXIO;
|
|
|
|
|
|
|
|
|
|
if (length == 0) {
|
|
|
|
|
for (drv = 0; drv < rdp->total_disks; drv++) {
|
|
|
|
|
if (rdp->disks[drv].flags & AR_DF_ONLINE) {
|
|
|
|
|
ap = &AD_SOFTC(rdp->disks[drv])->disk;
|
|
|
|
|
(void) ap->d_dump(ap, NULL, 0, 0, 0);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
blkno = offset / DEV_BSIZE;
|
|
|
|
|
vdata = virtual;
|
|
|
|
|
pdata = physical;
|
|
|
|
|
|
|
|
|
|
for (count = howmany(length, DEV_BSIZE); count > 0;
|
|
|
|
|
count -= chunk, blkno += chunk, vdata += (chunk * DEV_BSIZE),
|
|
|
|
|
pdata += (chunk * DEV_BSIZE)) {
|
|
|
|
|
|
|
|
|
|
switch (rdp->flags & (AR_F_RAID0 | AR_F_RAID1 | AR_F_SPAN)) {
|
|
|
|
|
case AR_F_SPAN:
|
|
|
|
|
lba = blkno;
|
|
|
|
|
while (lba >= AD_SOFTC(rdp->disks[drv])->total_secs-rdp->reserved)
|
|
|
|
|
lba -= AD_SOFTC(rdp->disks[drv++])->total_secs-rdp->reserved;
|
|
|
|
|
chunk = min(AD_SOFTC(rdp->disks[drv])->total_secs-rdp->reserved-lba,
|
|
|
|
|
count);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case AR_F_RAID0:
|
|
|
|
|
case AR_F_RAID0 | AR_F_RAID1:
|
|
|
|
|
tmplba = blkno / rdp->interleave;
|
|
|
|
|
chunk = blkno % rdp->interleave;
|
|
|
|
|
if (blkno >= (rdp->total_sectors / (rdp->interleave * rdp->width)) *
|
|
|
|
|
(rdp->interleave * rdp->width) ) {
|
2003-08-24 09:22:26 +00:00
|
|
|
|
lbs = (rdp->total_sectors -
|
2003-05-04 12:16:47 +00:00
|
|
|
|
((rdp->total_sectors / (rdp->interleave * rdp->width)) *
|
|
|
|
|
(rdp->interleave * rdp->width))) / rdp->width;
|
2003-08-24 09:22:26 +00:00
|
|
|
|
drv = (blkno -
|
2003-05-04 12:16:47 +00:00
|
|
|
|
((rdp->total_sectors / (rdp->interleave * rdp->width)) *
|
|
|
|
|
(rdp->interleave * rdp->width))) / lbs;
|
2003-08-24 09:22:26 +00:00
|
|
|
|
lba = ((tmplba / rdp->width) * rdp->interleave) +
|
|
|
|
|
(blkno - ((tmplba / rdp->width) * rdp->interleave)) % lbs;
|
|
|
|
|
chunk = min(count, lbs);
|
2003-05-04 12:16:47 +00:00
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
drv = tmplba % rdp->width;
|
|
|
|
|
lba = ((tmplba / rdp->width) * rdp->interleave) + chunk;
|
|
|
|
|
chunk = min(count, rdp->interleave - chunk);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case AR_F_RAID1:
|
|
|
|
|
drv = 0;
|
|
|
|
|
lba = blkno;
|
|
|
|
|
chunk = count;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
printf("ar%d: unknown array type in ardump\n", rdp->lun);
|
|
|
|
|
return EIO;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (drv > 0)
|
|
|
|
|
lba += rdp->offset;
|
|
|
|
|
|
|
|
|
|
switch (rdp->flags & (AR_F_RAID0 | AR_F_RAID1 | AR_F_SPAN)) {
|
|
|
|
|
case AR_F_SPAN:
|
|
|
|
|
case AR_F_RAID0:
|
|
|
|
|
if (rdp->disks[drv].flags & AR_DF_ONLINE) {
|
|
|
|
|
ap = &AD_SOFTC(rdp->disks[drv])->disk;
|
|
|
|
|
error1 = ap->d_dump(ap, vdata, pdata,
|
|
|
|
|
(off_t) lba * DEV_BSIZE,
|
|
|
|
|
chunk * DEV_BSIZE);
|
|
|
|
|
} else
|
|
|
|
|
error1 = EIO;
|
|
|
|
|
if (error1)
|
|
|
|
|
return error1;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case AR_F_RAID1:
|
|
|
|
|
case AR_F_RAID0 | AR_F_RAID1:
|
|
|
|
|
if ((rdp->disks[drv].flags & AR_DF_ONLINE) ||
|
|
|
|
|
((rdp->flags & AR_F_REBUILDING) &&
|
|
|
|
|
(rdp->disks[drv].flags & AR_DF_SPARE))) {
|
|
|
|
|
ap = &AD_SOFTC(rdp->disks[drv])->disk;
|
|
|
|
|
error1 = ap->d_dump(ap, vdata, pdata,
|
|
|
|
|
(off_t) lba * DEV_BSIZE,
|
|
|
|
|
chunk * DEV_BSIZE);
|
|
|
|
|
} else
|
|
|
|
|
error1 = EIO;
|
|
|
|
|
if ((rdp->disks[drv + rdp->width].flags & AR_DF_ONLINE) ||
|
|
|
|
|
((rdp->flags & AR_F_REBUILDING) &&
|
|
|
|
|
(rdp->disks[drv + rdp->width].flags & AR_DF_SPARE))) {
|
|
|
|
|
ap = &AD_SOFTC(rdp->disks[drv + rdp->width])->disk;
|
|
|
|
|
error2 = ap->d_dump(ap, vdata, pdata,
|
|
|
|
|
(off_t) lba * DEV_BSIZE,
|
|
|
|
|
chunk * DEV_BSIZE);
|
|
|
|
|
} else
|
|
|
|
|
error2 = EIO;
|
|
|
|
|
if (error1 && error2)
|
|
|
|
|
return error1;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
printf("ar%d: unknown array type in ardump\n", rdp->lun);
|
|
|
|
|
return EIO;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2000-10-13 13:04:45 +00:00
|
|
|
|
static void
|
|
|
|
|
arstrategy(struct bio *bp)
|
|
|
|
|
{
|
2003-02-25 15:33:36 +00:00
|
|
|
|
struct ar_softc *rdp = bp->bio_disk->d_drv1;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
int blkno, count, chunk, lba, lbs, tmplba;
|
|
|
|
|
int drv = 0, change = 0;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
caddr_t data;
|
|
|
|
|
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
if (!(rdp->flags & AR_F_READY)) {
|
|
|
|
|
bp->bio_flags |= BIO_ERROR;
|
|
|
|
|
bp->bio_error = EIO;
|
|
|
|
|
biodone(bp);
|
|
|
|
|
return;
|
|
|
|
|
}
|
2002-03-08 21:36:49 +00:00
|
|
|
|
|
2000-10-13 13:04:45 +00:00
|
|
|
|
bp->bio_resid = bp->bio_bcount;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
blkno = bp->bio_pblkno;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
data = bp->bio_data;
|
|
|
|
|
for (count = howmany(bp->bio_bcount, DEV_BSIZE); count > 0;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
count -= chunk, blkno += chunk, data += (chunk * DEV_BSIZE)) {
|
2000-10-13 13:04:45 +00:00
|
|
|
|
struct ar_buf *buf1, *buf2;
|
|
|
|
|
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
switch (rdp->flags & (AR_F_RAID0 | AR_F_RAID1 | AR_F_SPAN)) {
|
|
|
|
|
case AR_F_SPAN:
|
2002-03-03 15:36:21 +00:00
|
|
|
|
lba = blkno;
|
|
|
|
|
while (lba >= AD_SOFTC(rdp->disks[drv])->total_secs-rdp->reserved)
|
|
|
|
|
lba -= AD_SOFTC(rdp->disks[drv++])->total_secs-rdp->reserved;
|
|
|
|
|
chunk = min(AD_SOFTC(rdp->disks[drv])->total_secs-rdp->reserved-lba,
|
|
|
|
|
count);
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case AR_F_RAID0:
|
|
|
|
|
case AR_F_RAID0 | AR_F_RAID1:
|
2002-03-03 15:36:21 +00:00
|
|
|
|
tmplba = blkno / rdp->interleave;
|
|
|
|
|
chunk = blkno % rdp->interleave;
|
2003-04-08 07:48:52 +00:00
|
|
|
|
if (blkno >= (rdp->total_sectors / (rdp->interleave * rdp->width)) *
|
|
|
|
|
(rdp->interleave * rdp->width) ) {
|
2003-08-24 09:22:26 +00:00
|
|
|
|
lbs = (rdp->total_sectors -
|
2003-04-08 07:48:52 +00:00
|
|
|
|
((rdp->total_sectors / (rdp->interleave * rdp->width)) *
|
|
|
|
|
(rdp->interleave * rdp->width))) / rdp->width;
|
2003-08-24 09:22:26 +00:00
|
|
|
|
drv = (blkno -
|
2003-04-08 07:48:52 +00:00
|
|
|
|
((rdp->total_sectors / (rdp->interleave * rdp->width)) *
|
|
|
|
|
(rdp->interleave * rdp->width))) / lbs;
|
2003-08-24 09:22:26 +00:00
|
|
|
|
lba = ((tmplba / rdp->width) * rdp->interleave) +
|
|
|
|
|
(blkno - ((tmplba / rdp->width) * rdp->interleave)) % lbs;
|
|
|
|
|
chunk = min(count, lbs);
|
2001-03-21 11:48:14 +00:00
|
|
|
|
}
|
|
|
|
|
else {
|
2002-03-03 15:36:21 +00:00
|
|
|
|
drv = tmplba % rdp->width;
|
|
|
|
|
lba = ((tmplba / rdp->width) * rdp->interleave) + chunk;
|
2001-03-21 11:48:14 +00:00
|
|
|
|
chunk = min(count, rdp->interleave - chunk);
|
|
|
|
|
}
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case AR_F_RAID1:
|
2002-03-03 15:36:21 +00:00
|
|
|
|
drv = 0;
|
|
|
|
|
lba = blkno;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
chunk = count;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
2002-02-12 11:35:15 +00:00
|
|
|
|
printf("ar%d: unknown array type in arstrategy\n", rdp->lun);
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
bp->bio_flags |= BIO_ERROR;
|
|
|
|
|
bp->bio_error = EIO;
|
|
|
|
|
biodone(bp);
|
|
|
|
|
return;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
2002-03-03 15:36:21 +00:00
|
|
|
|
buf1 = malloc(sizeof(struct ar_buf), M_AR, M_NOWAIT | M_ZERO);
|
|
|
|
|
buf1->bp.bio_pblkno = lba;
|
|
|
|
|
if ((buf1->drive = drv) > 0)
|
2000-11-12 20:45:14 +00:00
|
|
|
|
buf1->bp.bio_pblkno += rdp->offset;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
buf1->bp.bio_caller1 = (void *)rdp;
|
|
|
|
|
buf1->bp.bio_bcount = chunk * DEV_BSIZE;
|
|
|
|
|
buf1->bp.bio_data = data;
|
|
|
|
|
buf1->bp.bio_cmd = bp->bio_cmd;
|
|
|
|
|
buf1->bp.bio_flags = bp->bio_flags;
|
|
|
|
|
buf1->bp.bio_done = ar_done;
|
|
|
|
|
buf1->org = bp;
|
|
|
|
|
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
switch (rdp->flags & (AR_F_RAID0 | AR_F_RAID1 | AR_F_SPAN)) {
|
|
|
|
|
case AR_F_SPAN:
|
|
|
|
|
case AR_F_RAID0:
|
2002-04-10 11:18:07 +00:00
|
|
|
|
if ((rdp->disks[buf1->drive].flags &
|
|
|
|
|
(AR_DF_PRESENT|AR_DF_ONLINE))==(AR_DF_PRESENT|AR_DF_ONLINE) &&
|
2003-08-24 09:22:26 +00:00
|
|
|
|
!rdp->disks[buf1->drive].device->softc) {
|
2002-02-12 11:35:15 +00:00
|
|
|
|
rdp->disks[buf1->drive].flags &= ~AR_DF_ONLINE;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
ar_config_changed(rdp, 1);
|
|
|
|
|
free(buf1, M_AR);
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
bp->bio_flags |= BIO_ERROR;
|
|
|
|
|
bp->bio_error = EIO;
|
|
|
|
|
biodone(bp);
|
|
|
|
|
return;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
2003-02-25 15:33:36 +00:00
|
|
|
|
buf1->bp.bio_disk = &AD_SOFTC(rdp->disks[buf1->drive])->disk;
|
2002-03-07 16:32:21 +00:00
|
|
|
|
AR_STRATEGY((struct bio *)buf1);
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case AR_F_RAID1:
|
|
|
|
|
case AR_F_RAID0 | AR_F_RAID1:
|
2002-03-15 15:39:54 +00:00
|
|
|
|
if (rdp->flags & AR_F_REBUILDING && bp->bio_cmd == BIO_WRITE) {
|
|
|
|
|
if ((bp->bio_pblkno >= rdp->lock_start &&
|
|
|
|
|
bp->bio_pblkno < rdp->lock_end) ||
|
|
|
|
|
((bp->bio_pblkno + chunk) > rdp->lock_start &&
|
|
|
|
|
(bp->bio_pblkno + chunk) <= rdp->lock_end)) {
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
tsleep(rdp, PRIBIO, "arwait", 0);
|
|
|
|
|
}
|
|
|
|
|
}
|
2002-04-10 11:18:07 +00:00
|
|
|
|
if ((rdp->disks[buf1->drive].flags &
|
|
|
|
|
(AR_DF_PRESENT|AR_DF_ONLINE))==(AR_DF_PRESENT|AR_DF_ONLINE) &&
|
2003-08-24 09:22:26 +00:00
|
|
|
|
!rdp->disks[buf1->drive].device->softc) {
|
2002-02-12 11:35:15 +00:00
|
|
|
|
rdp->disks[buf1->drive].flags &= ~AR_DF_ONLINE;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
change = 1;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
}
|
2002-04-10 11:18:07 +00:00
|
|
|
|
if ((rdp->disks[buf1->drive + rdp->width].flags &
|
|
|
|
|
(AR_DF_PRESENT|AR_DF_ONLINE))==(AR_DF_PRESENT|AR_DF_ONLINE) &&
|
2003-08-24 09:22:26 +00:00
|
|
|
|
!rdp->disks[buf1->drive + rdp->width].device->softc) {
|
2002-02-12 11:35:15 +00:00
|
|
|
|
rdp->disks[buf1->drive + rdp->width].flags &= ~AR_DF_ONLINE;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
change = 1;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
}
|
2002-03-03 15:36:21 +00:00
|
|
|
|
if (change)
|
|
|
|
|
ar_config_changed(rdp, 1);
|
|
|
|
|
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
if (!(rdp->flags & AR_F_READY)) {
|
2002-03-03 15:36:21 +00:00
|
|
|
|
free(buf1, M_AR);
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
bp->bio_flags |= BIO_ERROR;
|
|
|
|
|
bp->bio_error = EIO;
|
|
|
|
|
biodone(bp);
|
|
|
|
|
return;
|
|
|
|
|
}
|
2002-03-05 09:24:19 +00:00
|
|
|
|
if (bp->bio_cmd == BIO_READ) {
|
2003-05-04 16:17:54 +00:00
|
|
|
|
/* if mirror gone or close to last access on source */
|
|
|
|
|
if (!(rdp->disks[buf1->drive+rdp->width].flags & AR_DF_ONLINE)||
|
|
|
|
|
(buf1->bp.bio_pblkno >=
|
|
|
|
|
(rdp->disks[buf1->drive].last_lba - AR_PROXIMITY) &&
|
|
|
|
|
buf1->bp.bio_pblkno <=
|
|
|
|
|
(rdp->disks[buf1->drive].last_lba + AR_PROXIMITY))) {
|
|
|
|
|
rdp->flags &= ~AR_F_TOGGLE;
|
|
|
|
|
}
|
|
|
|
|
/* if source gone or close to last access on mirror */
|
|
|
|
|
else if (!(rdp->disks[buf1->drive].flags & AR_DF_ONLINE) ||
|
|
|
|
|
(buf1->bp.bio_pblkno >=
|
|
|
|
|
(rdp->disks[buf1->drive + rdp->width].last_lba -
|
|
|
|
|
AR_PROXIMITY) &&
|
|
|
|
|
buf1->bp.bio_pblkno <=
|
|
|
|
|
(rdp->disks[buf1->drive + rdp->width].last_lba +
|
|
|
|
|
AR_PROXIMITY))) {
|
|
|
|
|
buf1->drive = buf1->drive + rdp->width;
|
|
|
|
|
rdp->flags |= AR_F_TOGGLE;
|
|
|
|
|
}
|
|
|
|
|
/* not close to any previous access, toggle */
|
|
|
|
|
else {
|
|
|
|
|
if (rdp->flags & AR_F_TOGGLE)
|
|
|
|
|
rdp->flags &= ~AR_F_TOGGLE;
|
|
|
|
|
else {
|
2002-03-05 09:24:19 +00:00
|
|
|
|
buf1->drive = buf1->drive + rdp->width;
|
2003-05-04 16:17:54 +00:00
|
|
|
|
rdp->flags |= AR_F_TOGGLE;
|
|
|
|
|
}
|
|
|
|
|
}
|
2002-03-05 09:24:19 +00:00
|
|
|
|
}
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
if (bp->bio_cmd == BIO_WRITE) {
|
2002-03-15 15:39:54 +00:00
|
|
|
|
if ((rdp->disks[buf1->drive+rdp->width].flags & AR_DF_ONLINE) ||
|
|
|
|
|
((rdp->flags & AR_F_REBUILDING) &&
|
|
|
|
|
(rdp->disks[buf1->drive+rdp->width].flags & AR_DF_SPARE) &&
|
|
|
|
|
buf1->bp.bio_pblkno < rdp->lock_start)) {
|
|
|
|
|
if ((rdp->disks[buf1->drive].flags & AR_DF_ONLINE) ||
|
|
|
|
|
((rdp->flags & AR_F_REBUILDING) &&
|
|
|
|
|
(rdp->disks[buf1->drive].flags & AR_DF_SPARE) &&
|
|
|
|
|
buf1->bp.bio_pblkno < rdp->lock_start)) {
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
buf2 = malloc(sizeof(struct ar_buf), M_AR, M_NOWAIT);
|
|
|
|
|
bcopy(buf1, buf2, sizeof(struct ar_buf));
|
|
|
|
|
buf1->mirror = buf2;
|
|
|
|
|
buf2->mirror = buf1;
|
|
|
|
|
buf2->drive = buf1->drive + rdp->width;
|
2003-02-25 15:33:36 +00:00
|
|
|
|
buf2->bp.bio_disk =
|
|
|
|
|
&AD_SOFTC(rdp->disks[buf2->drive])->disk;
|
2002-03-07 16:32:21 +00:00
|
|
|
|
AR_STRATEGY((struct bio *)buf2);
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
rdp->disks[buf2->drive].last_lba =
|
2002-03-03 15:36:21 +00:00
|
|
|
|
buf2->bp.bio_pblkno + chunk;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
buf1->drive = buf1->drive + rdp->width;
|
|
|
|
|
}
|
|
|
|
|
}
|
2003-02-25 15:33:36 +00:00
|
|
|
|
buf1->bp.bio_disk = &AD_SOFTC(rdp->disks[buf1->drive])->disk;
|
2002-03-07 16:32:21 +00:00
|
|
|
|
AR_STRATEGY((struct bio *)buf1);
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
rdp->disks[buf1->drive].last_lba = buf1->bp.bio_pblkno + chunk;
|
|
|
|
|
break;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
default:
|
2002-02-12 11:35:15 +00:00
|
|
|
|
printf("ar%d: unknown array type in arstrategy\n", rdp->lun);
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
ar_done(struct bio *bp)
|
|
|
|
|
{
|
|
|
|
|
struct ar_softc *rdp = (struct ar_softc *)bp->bio_caller1;
|
|
|
|
|
struct ar_buf *buf = (struct ar_buf *)bp;
|
|
|
|
|
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
switch (rdp->flags & (AR_F_RAID0 | AR_F_RAID1 | AR_F_SPAN)) {
|
|
|
|
|
case AR_F_SPAN:
|
|
|
|
|
case AR_F_RAID0:
|
2002-03-03 15:36:21 +00:00
|
|
|
|
if (buf->bp.bio_flags & BIO_ERROR) {
|
2002-02-12 11:35:15 +00:00
|
|
|
|
rdp->disks[buf->drive].flags &= ~AR_DF_ONLINE;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
ar_config_changed(rdp, 1);
|
2000-10-13 13:04:45 +00:00
|
|
|
|
buf->org->bio_flags |= BIO_ERROR;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
buf->org->bio_error = EIO;
|
|
|
|
|
biodone(buf->org);
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
else {
|
|
|
|
|
buf->org->bio_resid -= buf->bp.bio_bcount;
|
|
|
|
|
if (buf->org->bio_resid == 0)
|
|
|
|
|
biodone(buf->org);
|
|
|
|
|
}
|
|
|
|
|
break;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
case AR_F_RAID1:
|
|
|
|
|
case AR_F_RAID0 | AR_F_RAID1:
|
2002-03-03 15:36:21 +00:00
|
|
|
|
if (buf->bp.bio_flags & BIO_ERROR) {
|
2002-02-12 11:35:15 +00:00
|
|
|
|
rdp->disks[buf->drive].flags &= ~AR_DF_ONLINE;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
ar_config_changed(rdp, 1);
|
|
|
|
|
if (rdp->flags & AR_F_READY) {
|
|
|
|
|
if (buf->bp.bio_cmd == BIO_READ) {
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
if (buf->drive < rdp->width)
|
|
|
|
|
buf->drive = buf->drive + rdp->width;
|
|
|
|
|
else
|
|
|
|
|
buf->drive = buf->drive - rdp->width;
|
2003-02-25 15:33:36 +00:00
|
|
|
|
buf->bp.bio_disk = &AD_SOFTC(rdp->disks[buf->drive])->disk;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
buf->bp.bio_flags = buf->org->bio_flags;
|
|
|
|
|
buf->bp.bio_error = 0;
|
2002-03-07 16:32:21 +00:00
|
|
|
|
AR_STRATEGY((struct bio *)buf);
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
return;
|
|
|
|
|
}
|
2002-03-03 15:36:21 +00:00
|
|
|
|
if (buf->bp.bio_cmd == BIO_WRITE) {
|
|
|
|
|
if (buf->flags & AB_F_DONE) {
|
|
|
|
|
buf->org->bio_resid -= buf->bp.bio_bcount;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
if (buf->org->bio_resid == 0)
|
|
|
|
|
biodone(buf->org);
|
|
|
|
|
}
|
2002-03-03 15:36:21 +00:00
|
|
|
|
else
|
|
|
|
|
buf->mirror->flags |= AB_F_DONE;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
2002-03-03 15:36:21 +00:00
|
|
|
|
else {
|
|
|
|
|
buf->org->bio_flags |= BIO_ERROR;
|
|
|
|
|
buf->org->bio_error = EIO;
|
|
|
|
|
biodone(buf->org);
|
|
|
|
|
}
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
}
|
2000-10-13 13:04:45 +00:00
|
|
|
|
else {
|
2002-03-03 15:36:21 +00:00
|
|
|
|
if (buf->bp.bio_cmd == BIO_WRITE) {
|
|
|
|
|
if (buf->mirror && !(buf->flags & AB_F_DONE)){
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
buf->mirror->flags |= AB_F_DONE;
|
|
|
|
|
break;
|
|
|
|
|
}
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
2002-03-03 15:36:21 +00:00
|
|
|
|
buf->org->bio_resid -= buf->bp.bio_bcount;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
if (buf->org->bio_resid == 0)
|
|
|
|
|
biodone(buf->org);
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
2002-02-12 11:35:15 +00:00
|
|
|
|
printf("ar%d: unknown array type in ar_done\n", rdp->lun);
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
|
|
|
|
free(buf, M_AR);
|
|
|
|
|
}
|
|
|
|
|
|
2002-02-12 11:35:15 +00:00
|
|
|
|
static void
|
2002-03-03 15:36:21 +00:00
|
|
|
|
ar_config_changed(struct ar_softc *rdp, int writeback)
|
|
|
|
|
{
|
|
|
|
|
int disk, flags;
|
|
|
|
|
|
|
|
|
|
flags = rdp->flags;
|
|
|
|
|
rdp->flags |= AR_F_READY;
|
|
|
|
|
rdp->flags &= ~AR_F_DEGRADED;
|
|
|
|
|
|
2002-04-10 11:18:07 +00:00
|
|
|
|
for (disk = 0; disk < rdp->total_disks; disk++)
|
|
|
|
|
if (!(rdp->disks[disk].flags & AR_DF_PRESENT))
|
|
|
|
|
rdp->disks[disk].flags &= ~AR_DF_ONLINE;
|
|
|
|
|
|
2002-03-03 15:36:21 +00:00
|
|
|
|
for (disk = 0; disk < rdp->total_disks; disk++) {
|
|
|
|
|
switch (rdp->flags & (AR_F_RAID0 | AR_F_RAID1 | AR_F_SPAN)) {
|
|
|
|
|
case AR_F_SPAN:
|
|
|
|
|
case AR_F_RAID0:
|
|
|
|
|
if (!(rdp->disks[disk].flags & AR_DF_ONLINE)) {
|
|
|
|
|
rdp->flags &= ~AR_F_READY;
|
|
|
|
|
printf("ar%d: ERROR - array broken\n", rdp->lun);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case AR_F_RAID1:
|
|
|
|
|
case AR_F_RAID0 | AR_F_RAID1:
|
|
|
|
|
if (disk < rdp->width) {
|
|
|
|
|
if (!(rdp->disks[disk].flags & AR_DF_ONLINE) &&
|
|
|
|
|
!(rdp->disks[disk + rdp->width].flags & AR_DF_ONLINE)) {
|
|
|
|
|
rdp->flags &= ~AR_F_READY;
|
|
|
|
|
printf("ar%d: ERROR - array broken\n", rdp->lun);
|
|
|
|
|
}
|
|
|
|
|
else if (((rdp->disks[disk].flags & AR_DF_ONLINE) &&
|
|
|
|
|
!(rdp->disks
|
|
|
|
|
[disk + rdp->width].flags & AR_DF_ONLINE))||
|
|
|
|
|
(!(rdp->disks[disk].flags & AR_DF_ONLINE) &&
|
|
|
|
|
(rdp->disks
|
|
|
|
|
[disk + rdp->width].flags & AR_DF_ONLINE))) {
|
|
|
|
|
rdp->flags |= AR_F_DEGRADED;
|
|
|
|
|
if (!(flags & AR_F_DEGRADED))
|
|
|
|
|
printf("ar%d: WARNING - mirror lost\n", rdp->lun);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
2002-03-28 11:48:36 +00:00
|
|
|
|
if ((rdp->disks[disk].flags&AR_DF_PRESENT) && rdp->disks[disk].device) {
|
2003-08-24 09:22:26 +00:00
|
|
|
|
/* SOS
|
2002-03-03 15:36:21 +00:00
|
|
|
|
if (rdp->disks[disk].flags & AR_DF_ONLINE)
|
2003-01-27 09:04:29 +00:00
|
|
|
|
ata_enclosure_leds(rdp->disks[disk].device, ATA_LED_GREEN);
|
2002-03-03 15:36:21 +00:00
|
|
|
|
else
|
2003-01-27 09:04:29 +00:00
|
|
|
|
ata_enclosure_leds(rdp->disks[disk].device, ATA_LED_RED);
|
2003-08-24 09:22:26 +00:00
|
|
|
|
XXX */
|
2002-03-03 15:36:21 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (writeback) {
|
|
|
|
|
if (rdp->flags & AR_F_PROMISE_RAID)
|
|
|
|
|
ar_promise_write_conf(rdp);
|
|
|
|
|
if (rdp->flags & AR_F_HIGHPOINT_RAID)
|
|
|
|
|
ar_highpoint_write_conf(rdp);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2002-03-15 15:39:54 +00:00
|
|
|
|
static void
|
|
|
|
|
ar_rebuild(void *arg)
|
2002-02-12 11:35:15 +00:00
|
|
|
|
{
|
2002-03-15 15:39:54 +00:00
|
|
|
|
struct ar_softc *rdp = arg;
|
2002-03-11 21:04:32 +00:00
|
|
|
|
int disk, s, count = 0, error = 0;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
caddr_t buffer;
|
|
|
|
|
|
2003-05-08 16:38:14 +00:00
|
|
|
|
mtx_lock(&Giant);
|
2002-03-03 15:36:21 +00:00
|
|
|
|
if ((rdp->flags & (AR_F_READY|AR_F_DEGRADED)) != (AR_F_READY|AR_F_DEGRADED))
|
2002-03-15 15:39:54 +00:00
|
|
|
|
kthread_exit(EEXIST);
|
2002-03-03 15:36:21 +00:00
|
|
|
|
|
|
|
|
|
for (disk = 0; disk < rdp->total_disks; disk++) {
|
|
|
|
|
if (((rdp->disks[disk].flags&(AR_DF_PRESENT|AR_DF_ONLINE|AR_DF_SPARE))==
|
|
|
|
|
(AR_DF_PRESENT | AR_DF_SPARE)) && rdp->disks[disk].device) {
|
|
|
|
|
if (AD_SOFTC(rdp->disks[disk])->total_secs <
|
|
|
|
|
rdp->disks[disk].disk_sectors) {
|
|
|
|
|
ata_prtdev(rdp->disks[disk].device,
|
|
|
|
|
"disk capacity too small for this RAID config\n");
|
2002-03-27 10:58:59 +00:00
|
|
|
|
#if 0
|
2002-03-03 15:36:21 +00:00
|
|
|
|
rdp->disks[disk].flags &= ~AR_DF_SPARE;
|
2002-03-27 10:58:59 +00:00
|
|
|
|
AD_SOFTC(rdp->disks[disk])->flags &= ~AD_F_RAID_SUBDISK;
|
|
|
|
|
#endif
|
2002-03-03 15:36:21 +00:00
|
|
|
|
continue;
|
|
|
|
|
}
|
2003-08-24 09:22:26 +00:00
|
|
|
|
/* SOS
|
2003-01-27 09:04:29 +00:00
|
|
|
|
ata_enclosure_leds(rdp->disks[disk].device, ATA_LED_ORANGE);
|
2003-08-24 09:22:26 +00:00
|
|
|
|
XXX */
|
2002-03-03 15:36:21 +00:00
|
|
|
|
count++;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (!count)
|
2002-03-15 15:39:54 +00:00
|
|
|
|
kthread_exit(ENODEV);
|
2002-03-03 15:36:21 +00:00
|
|
|
|
|
|
|
|
|
/* setup start conditions */
|
2002-03-11 21:04:32 +00:00
|
|
|
|
s = splbio();
|
2002-03-03 15:36:21 +00:00
|
|
|
|
rdp->lock_start = 0;
|
|
|
|
|
rdp->lock_end = rdp->lock_start + 256;
|
|
|
|
|
rdp->flags |= AR_F_REBUILDING;
|
2002-03-11 21:04:32 +00:00
|
|
|
|
splx(s);
|
2002-03-03 15:36:21 +00:00
|
|
|
|
buffer = malloc(256 * DEV_BSIZE, M_AR, M_NOWAIT | M_ZERO);
|
|
|
|
|
|
|
|
|
|
/* now go copy entire disk(s) */
|
2002-03-07 19:20:23 +00:00
|
|
|
|
while (rdp->lock_end < (rdp->total_sectors / rdp->width)) {
|
2002-03-07 16:32:21 +00:00
|
|
|
|
int size = min(256, (rdp->total_sectors / rdp->width) - rdp->lock_end);
|
|
|
|
|
|
2002-03-03 15:36:21 +00:00
|
|
|
|
for (disk = 0; disk < rdp->width; disk++) {
|
2002-03-07 16:32:21 +00:00
|
|
|
|
struct ad_softc *adp;
|
|
|
|
|
|
2002-03-03 15:36:21 +00:00
|
|
|
|
if (((rdp->disks[disk].flags & AR_DF_ONLINE) &&
|
|
|
|
|
(rdp->disks[disk + rdp->width].flags & AR_DF_ONLINE)) ||
|
|
|
|
|
((rdp->disks[disk].flags & AR_DF_ONLINE) &&
|
|
|
|
|
!(rdp->disks[disk + rdp->width].flags & AR_DF_SPARE)) ||
|
|
|
|
|
((rdp->disks[disk + rdp->width].flags & AR_DF_ONLINE) &&
|
|
|
|
|
!(rdp->disks[disk].flags & AR_DF_SPARE)))
|
|
|
|
|
continue;
|
2002-03-07 16:32:21 +00:00
|
|
|
|
|
2002-03-03 15:36:21 +00:00
|
|
|
|
if (rdp->disks[disk].flags & AR_DF_ONLINE)
|
2002-03-07 16:32:21 +00:00
|
|
|
|
adp = AD_SOFTC(rdp->disks[disk]);
|
2002-03-03 15:36:21 +00:00
|
|
|
|
else
|
2002-03-07 16:32:21 +00:00
|
|
|
|
adp = AD_SOFTC(rdp->disks[disk + rdp->width]);
|
|
|
|
|
if ((error = ar_rw(adp, rdp->lock_start,
|
|
|
|
|
size * DEV_BSIZE, buffer, AR_READ | AR_WAIT)))
|
|
|
|
|
break;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
|
|
|
|
|
if (rdp->disks[disk].flags & AR_DF_ONLINE)
|
2002-03-07 16:32:21 +00:00
|
|
|
|
adp = AD_SOFTC(rdp->disks[disk + rdp->width]);
|
2002-03-03 15:36:21 +00:00
|
|
|
|
else
|
2002-03-07 16:32:21 +00:00
|
|
|
|
adp = AD_SOFTC(rdp->disks[disk]);
|
|
|
|
|
if ((error = ar_rw(adp, rdp->lock_start,
|
|
|
|
|
size * DEV_BSIZE, buffer, AR_WRITE | AR_WAIT)))
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
if (error) {
|
|
|
|
|
wakeup(rdp);
|
|
|
|
|
free(buffer, M_AR);
|
2002-03-15 15:39:54 +00:00
|
|
|
|
kthread_exit(error);
|
2002-03-03 15:36:21 +00:00
|
|
|
|
}
|
2002-03-11 21:04:32 +00:00
|
|
|
|
s = splbio();
|
2002-03-03 15:36:21 +00:00
|
|
|
|
rdp->lock_start = rdp->lock_end;
|
2002-03-07 16:32:21 +00:00
|
|
|
|
rdp->lock_end = rdp->lock_start + size;
|
2002-03-11 21:04:32 +00:00
|
|
|
|
splx(s);
|
2002-03-03 15:36:21 +00:00
|
|
|
|
wakeup(rdp);
|
2002-03-16 15:55:20 +00:00
|
|
|
|
sprintf(rdp->pid->p_comm, "rebuilding ar%d %lld%%", rdp->lun,
|
|
|
|
|
(unsigned long long)(100 * rdp->lock_start /
|
|
|
|
|
(rdp->total_sectors / rdp->width)));
|
2002-03-03 15:36:21 +00:00
|
|
|
|
}
|
|
|
|
|
free(buffer, M_AR);
|
|
|
|
|
for (disk = 0; disk < rdp->total_disks; disk++) {
|
|
|
|
|
if ((rdp->disks[disk].flags&(AR_DF_PRESENT|AR_DF_ONLINE|AR_DF_SPARE))==
|
|
|
|
|
(AR_DF_PRESENT | AR_DF_SPARE)) {
|
|
|
|
|
rdp->disks[disk].flags &= ~AR_DF_SPARE;
|
|
|
|
|
rdp->disks[disk].flags |= (AR_DF_ASSIGNED | AR_DF_ONLINE);
|
|
|
|
|
}
|
|
|
|
|
}
|
2002-03-11 21:04:32 +00:00
|
|
|
|
s = splbio();
|
|
|
|
|
rdp->lock_start = 0xffffffff;
|
|
|
|
|
rdp->lock_end = 0xffffffff;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
rdp->flags &= ~AR_F_REBUILDING;
|
2002-03-11 21:04:32 +00:00
|
|
|
|
splx(s);
|
2002-03-03 15:36:21 +00:00
|
|
|
|
ar_config_changed(rdp, 1);
|
2002-03-15 15:39:54 +00:00
|
|
|
|
kthread_exit(0);
|
2002-02-12 11:35:15 +00:00
|
|
|
|
}
|
|
|
|
|
|
2000-10-13 13:04:45 +00:00
|
|
|
|
static int
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
ar_highpoint_read_conf(struct ad_softc *adp, struct ar_softc **raidp)
|
2000-10-13 13:04:45 +00:00
|
|
|
|
{
|
2001-09-20 15:25:36 +00:00
|
|
|
|
struct highpoint_raid_conf *info;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
struct ar_softc *raid = NULL;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
int array, disk_number = 0, retval = 0;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
|
2001-09-20 15:25:36 +00:00
|
|
|
|
if (!(info = (struct highpoint_raid_conf *)
|
|
|
|
|
malloc(sizeof(struct highpoint_raid_conf), M_AR, M_NOWAIT | M_ZERO)))
|
2002-03-03 15:36:21 +00:00
|
|
|
|
return retval;
|
2001-09-20 15:25:36 +00:00
|
|
|
|
|
2002-03-03 15:36:21 +00:00
|
|
|
|
if (ar_rw(adp, HPT_LBA, sizeof(struct highpoint_raid_conf),
|
|
|
|
|
(caddr_t)info, AR_READ | AR_WAIT)) {
|
2000-10-18 18:49:42 +00:00
|
|
|
|
if (bootverbose)
|
2002-02-12 11:35:15 +00:00
|
|
|
|
printf("ar: HighPoint read conf failed\n");
|
2001-09-20 15:25:36 +00:00
|
|
|
|
goto highpoint_out;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* check if this is a HighPoint RAID struct */
|
2002-02-12 11:35:15 +00:00
|
|
|
|
if (info->magic != HPT_MAGIC_OK && info->magic != HPT_MAGIC_BAD) {
|
2000-10-18 18:49:42 +00:00
|
|
|
|
if (bootverbose)
|
2002-02-12 11:35:15 +00:00
|
|
|
|
printf("ar: HighPoint check1 failed\n");
|
|
|
|
|
goto highpoint_out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* is this disk defined, or an old leftover/spare ? */
|
|
|
|
|
if (!info->magic_0) {
|
|
|
|
|
if (bootverbose)
|
|
|
|
|
printf("ar: HighPoint check2 failed\n");
|
2001-09-20 15:25:36 +00:00
|
|
|
|
goto highpoint_out;
|
2000-11-01 17:35:44 +00:00
|
|
|
|
}
|
2000-10-13 13:04:45 +00:00
|
|
|
|
|
|
|
|
|
/* now convert HighPoint config info into our generic form */
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
for (array = 0; array < MAX_ARRAYS; array++) {
|
2000-11-01 17:35:44 +00:00
|
|
|
|
if (!raidp[array]) {
|
|
|
|
|
raidp[array] =
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
(struct ar_softc*)malloc(sizeof(struct ar_softc), M_AR,
|
2000-12-26 12:05:49 +00:00
|
|
|
|
M_NOWAIT | M_ZERO);
|
2000-11-01 17:35:44 +00:00
|
|
|
|
if (!raidp[array]) {
|
2002-02-12 11:35:15 +00:00
|
|
|
|
printf("ar%d: failed to allocate raid config storage\n", array);
|
2001-09-20 15:25:36 +00:00
|
|
|
|
goto highpoint_out;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
2000-11-01 17:35:44 +00:00
|
|
|
|
raid = raidp[array];
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
if (raid->flags & AR_F_PROMISE_RAID)
|
|
|
|
|
continue;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
|
2001-09-20 15:25:36 +00:00
|
|
|
|
switch (info->type) {
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
case HPT_T_RAID0:
|
2002-03-27 22:05:38 +00:00
|
|
|
|
if ((info->order & (HPT_O_RAID0|HPT_O_OK))==(HPT_O_RAID0|HPT_O_OK))
|
|
|
|
|
goto highpoint_raid1;
|
2002-03-27 10:58:59 +00:00
|
|
|
|
if (info->order & (HPT_O_RAID0 | HPT_O_RAID1))
|
2002-03-08 21:36:49 +00:00
|
|
|
|
goto highpoint_raid01;
|
|
|
|
|
if (raid->magic_0 && raid->magic_0 != info->magic_0)
|
|
|
|
|
continue;
|
|
|
|
|
raid->magic_0 = info->magic_0;
|
|
|
|
|
raid->flags |= AR_F_RAID0;
|
|
|
|
|
raid->interleave = 1 << info->stripe_shift;
|
|
|
|
|
disk_number = info->disk_number;
|
2002-03-27 10:58:59 +00:00
|
|
|
|
if (!(info->order & HPT_O_OK))
|
2002-02-12 11:35:15 +00:00
|
|
|
|
info->magic = 0; /* mark bad */
|
2002-03-08 21:36:49 +00:00
|
|
|
|
break;
|
2000-10-22 12:17:57 +00:00
|
|
|
|
|
2002-03-08 21:36:49 +00:00
|
|
|
|
case HPT_T_RAID1:
|
2002-03-27 22:05:38 +00:00
|
|
|
|
highpoint_raid1:
|
2002-03-08 21:36:49 +00:00
|
|
|
|
if (raid->magic_0 && raid->magic_0 != info->magic_0)
|
|
|
|
|
continue;
|
|
|
|
|
raid->magic_0 = info->magic_0;
|
|
|
|
|
raid->flags |= AR_F_RAID1;
|
|
|
|
|
disk_number = (info->disk_number > 0);
|
|
|
|
|
break;
|
2002-02-12 11:35:15 +00:00
|
|
|
|
|
2002-03-08 21:36:49 +00:00
|
|
|
|
case HPT_T_RAID01_RAID0:
|
|
|
|
|
highpoint_raid01:
|
2002-03-27 10:58:59 +00:00
|
|
|
|
if (info->order & HPT_O_RAID0) {
|
2002-02-12 11:35:15 +00:00
|
|
|
|
if ((raid->magic_0 && raid->magic_0 != info->magic_0) ||
|
|
|
|
|
(raid->magic_1 && raid->magic_1 != info->magic_1))
|
2000-10-22 12:17:57 +00:00
|
|
|
|
continue;
|
2001-09-20 15:25:36 +00:00
|
|
|
|
raid->magic_0 = info->magic_0;
|
2002-02-12 11:35:15 +00:00
|
|
|
|
raid->magic_1 = info->magic_1;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
raid->flags |= (AR_F_RAID0 | AR_F_RAID1);
|
|
|
|
|
raid->interleave = 1 << info->stripe_shift;
|
|
|
|
|
disk_number = info->disk_number;
|
2002-03-08 21:36:49 +00:00
|
|
|
|
}
|
|
|
|
|
else {
|
2002-02-12 11:35:15 +00:00
|
|
|
|
if (raid->magic_1 && raid->magic_1 != info->magic_1)
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
continue;
|
2002-02-12 11:35:15 +00:00
|
|
|
|
raid->magic_1 = info->magic_1;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
raid->flags |= (AR_F_RAID0 | AR_F_RAID1);
|
|
|
|
|
raid->interleave = 1 << info->stripe_shift;
|
|
|
|
|
disk_number = info->disk_number + info->array_width;
|
2002-03-08 21:36:49 +00:00
|
|
|
|
if (!(info->order & HPT_O_RAID1))
|
|
|
|
|
info->magic = 0; /* mark bad */
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case HPT_T_SPAN:
|
2001-09-20 15:25:36 +00:00
|
|
|
|
if (raid->magic_0 && raid->magic_0 != info->magic_0)
|
2000-10-13 13:04:45 +00:00
|
|
|
|
continue;
|
2001-09-20 15:25:36 +00:00
|
|
|
|
raid->magic_0 = info->magic_0;
|
2000-10-22 12:17:57 +00:00
|
|
|
|
raid->flags |= AR_F_SPAN;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
disk_number = info->disk_number;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
2002-02-12 11:35:15 +00:00
|
|
|
|
printf("ar%d: HighPoint unknown RAID type 0x%02x\n",
|
|
|
|
|
array, info->type);
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
goto highpoint_out;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
2002-03-03 15:36:21 +00:00
|
|
|
|
raid->flags |= AR_F_HIGHPOINT_RAID;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
raid->disks[disk_number].device = adp->device;
|
2002-02-12 11:35:15 +00:00
|
|
|
|
raid->disks[disk_number].flags = (AR_DF_PRESENT | AR_DF_ASSIGNED);
|
2003-04-07 14:14:29 +00:00
|
|
|
|
AD_SOFTC(raid->disks[disk_number])->flags |= AD_F_RAID_SUBDISK;
|
2002-03-08 21:36:49 +00:00
|
|
|
|
raid->lun = array;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
if (info->magic == HPT_MAGIC_OK) {
|
2002-02-12 11:35:15 +00:00
|
|
|
|
raid->disks[disk_number].flags |= AR_DF_ONLINE;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
raid->flags |= AR_F_READY;
|
|
|
|
|
raid->width = info->array_width;
|
2000-11-01 17:35:44 +00:00
|
|
|
|
raid->heads = 255;
|
|
|
|
|
raid->sectors = 63;
|
2002-03-07 16:32:21 +00:00
|
|
|
|
raid->cylinders = info->total_sectors / (63 * 255);
|
|
|
|
|
raid->total_sectors = info->total_sectors;
|
|
|
|
|
raid->offset = HPT_LBA + 1;
|
|
|
|
|
raid->reserved = HPT_LBA + 1;
|
2002-03-08 21:36:49 +00:00
|
|
|
|
raid->lock_start = raid->lock_end = info->rebuild_lba;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
raid->disks[disk_number].disk_sectors =
|
|
|
|
|
info->total_sectors / info->array_width;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
2002-02-12 11:35:15 +00:00
|
|
|
|
else
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
raid->disks[disk_number].flags &= ~ AR_DF_ONLINE;
|
2002-02-12 11:35:15 +00:00
|
|
|
|
|
|
|
|
|
if ((raid->flags & AR_F_RAID0) && (raid->total_disks < raid->width))
|
|
|
|
|
raid->total_disks = raid->width;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
if (disk_number >= raid->total_disks)
|
|
|
|
|
raid->total_disks = disk_number + 1;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
retval = 1;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
break;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
2001-09-20 15:25:36 +00:00
|
|
|
|
highpoint_out:
|
|
|
|
|
free(info, M_AR);
|
2002-03-03 15:36:21 +00:00
|
|
|
|
return retval;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
2002-02-12 11:35:15 +00:00
|
|
|
|
static int
|
|
|
|
|
ar_highpoint_write_conf(struct ar_softc *rdp)
|
|
|
|
|
{
|
|
|
|
|
struct highpoint_raid_conf *config;
|
|
|
|
|
struct timeval timestamp;
|
|
|
|
|
int disk;
|
|
|
|
|
|
|
|
|
|
microtime(×tamp);
|
2002-03-08 21:36:49 +00:00
|
|
|
|
rdp->magic_0 = timestamp.tv_sec + 2;
|
2002-02-12 11:35:15 +00:00
|
|
|
|
rdp->magic_1 = timestamp.tv_sec;
|
|
|
|
|
|
|
|
|
|
for (disk = 0; disk < rdp->total_disks; disk++) {
|
2002-03-03 15:36:21 +00:00
|
|
|
|
if (!(config = (struct highpoint_raid_conf *)
|
|
|
|
|
malloc(sizeof(struct highpoint_raid_conf),
|
|
|
|
|
M_AR, M_NOWAIT | M_ZERO))) {
|
|
|
|
|
printf("ar%d: Highpoint write conf failed\n", rdp->lun);
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
2002-02-12 11:35:15 +00:00
|
|
|
|
if ((rdp->disks[disk].flags & (AR_DF_PRESENT | AR_DF_ONLINE)) ==
|
|
|
|
|
(AR_DF_PRESENT | AR_DF_ONLINE))
|
|
|
|
|
config->magic = HPT_MAGIC_OK;
|
2002-03-08 11:33:52 +00:00
|
|
|
|
if (rdp->disks[disk].flags & AR_DF_ASSIGNED) {
|
2002-02-12 11:35:15 +00:00
|
|
|
|
config->magic_0 = rdp->magic_0;
|
2002-03-08 21:36:49 +00:00
|
|
|
|
strcpy(config->name_1, "FreeBSD");
|
2002-03-08 11:33:52 +00:00
|
|
|
|
}
|
2002-02-12 11:35:15 +00:00
|
|
|
|
config->disk_number = disk;
|
|
|
|
|
|
|
|
|
|
switch (rdp->flags & (AR_F_RAID0 | AR_F_RAID1 | AR_F_SPAN)) {
|
|
|
|
|
case AR_F_RAID0:
|
|
|
|
|
config->type = HPT_T_RAID0;
|
2002-03-08 21:36:49 +00:00
|
|
|
|
strcpy(config->name_2, "RAID 0");
|
2002-02-12 11:35:15 +00:00
|
|
|
|
if (rdp->disks[disk].flags & AR_DF_ONLINE)
|
2002-03-27 10:58:59 +00:00
|
|
|
|
config->order = HPT_O_OK;
|
2002-02-12 11:35:15 +00:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case AR_F_RAID1:
|
2002-03-27 22:05:38 +00:00
|
|
|
|
config->type = HPT_T_RAID0;
|
2002-03-08 11:33:52 +00:00
|
|
|
|
strcpy(config->name_2, "RAID 1");
|
2002-03-27 22:05:38 +00:00
|
|
|
|
config->disk_number = (disk < rdp->width) ? disk : disk + 5;
|
2002-03-27 10:58:59 +00:00
|
|
|
|
config->order = HPT_O_RAID0 | HPT_O_OK;
|
2002-02-12 11:35:15 +00:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case AR_F_RAID0 | AR_F_RAID1:
|
|
|
|
|
config->type = HPT_T_RAID01_RAID0;
|
2002-03-08 21:36:49 +00:00
|
|
|
|
strcpy(config->name_2, "RAID 0+1");
|
|
|
|
|
if (rdp->disks[disk].flags & AR_DF_ONLINE) {
|
2002-03-08 11:33:52 +00:00
|
|
|
|
if (disk < rdp->width) {
|
2002-03-27 10:58:59 +00:00
|
|
|
|
config->order = (HPT_O_RAID0 | HPT_O_RAID1);
|
2002-03-08 11:33:52 +00:00
|
|
|
|
config->magic_0 = rdp->magic_0 - 1;
|
|
|
|
|
}
|
|
|
|
|
else {
|
2002-03-08 21:36:49 +00:00
|
|
|
|
config->order = HPT_O_RAID1;
|
2002-03-08 11:33:52 +00:00
|
|
|
|
config->disk_number -= rdp->width;
|
|
|
|
|
}
|
2002-03-08 21:36:49 +00:00
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
config->magic_0 = rdp->magic_0 - 1;
|
|
|
|
|
config->magic_1 = rdp->magic_1;
|
2002-02-12 11:35:15 +00:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case AR_F_SPAN:
|
|
|
|
|
config->type = HPT_T_SPAN;
|
2002-03-08 11:33:52 +00:00
|
|
|
|
strcpy(config->name_2, "SPAN");
|
2002-02-12 11:35:15 +00:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
config->array_width = rdp->width;
|
|
|
|
|
config->stripe_shift = (rdp->width > 1) ? (ffs(rdp->interleave)-1) : 0;
|
|
|
|
|
config->total_sectors = rdp->total_sectors;
|
2002-03-08 21:36:49 +00:00
|
|
|
|
config->rebuild_lba = rdp->lock_start;
|
2002-02-12 11:35:15 +00:00
|
|
|
|
|
2003-08-24 09:22:26 +00:00
|
|
|
|
if (rdp->disks[disk].device && rdp->disks[disk].device->softc &&
|
2002-02-12 11:35:15 +00:00
|
|
|
|
!(rdp->disks[disk].device->flags & ATA_D_DETACHING)) {
|
2002-03-03 15:36:21 +00:00
|
|
|
|
if (ar_rw(AD_SOFTC(rdp->disks[disk]), HPT_LBA,
|
|
|
|
|
sizeof(struct highpoint_raid_conf),
|
|
|
|
|
(caddr_t)config, AR_WRITE)) {
|
2002-03-07 16:32:21 +00:00
|
|
|
|
printf("ar%d: Highpoint write conf failed\n", rdp->lun);
|
2002-02-12 11:35:15 +00:00
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2000-10-13 13:04:45 +00:00
|
|
|
|
static int
|
2002-03-27 10:58:59 +00:00
|
|
|
|
ar_promise_read_conf(struct ad_softc *adp, struct ar_softc **raidp, int local)
|
2000-10-13 13:04:45 +00:00
|
|
|
|
{
|
2001-09-20 15:25:36 +00:00
|
|
|
|
struct promise_raid_conf *info;
|
2000-10-22 12:17:57 +00:00
|
|
|
|
struct ar_softc *raid;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
u_int32_t magic, cksum, *ckptr;
|
2002-03-27 10:58:59 +00:00
|
|
|
|
int array, count, disk, disksum = 0, retval = 0;
|
2001-09-20 15:25:36 +00:00
|
|
|
|
|
|
|
|
|
if (!(info = (struct promise_raid_conf *)
|
|
|
|
|
malloc(sizeof(struct promise_raid_conf), M_AR, M_NOWAIT | M_ZERO)))
|
2002-03-03 15:36:21 +00:00
|
|
|
|
return retval;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
|
2002-03-03 15:36:21 +00:00
|
|
|
|
if (ar_rw(adp, PR_LBA(adp), sizeof(struct promise_raid_conf),
|
|
|
|
|
(caddr_t)info, AR_READ | AR_WAIT)) {
|
2000-10-18 18:49:42 +00:00
|
|
|
|
if (bootverbose)
|
2002-03-27 10:58:59 +00:00
|
|
|
|
printf("ar: %s read conf failed\n", local ? "FreeBSD" : "Promise");
|
2001-09-20 15:25:36 +00:00
|
|
|
|
goto promise_out;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
2002-03-27 10:58:59 +00:00
|
|
|
|
/* check if this is a Promise RAID struct (or our local one) */
|
|
|
|
|
if (local) {
|
|
|
|
|
if (strncmp(info->promise_id, ATA_MAGIC, sizeof(ATA_MAGIC))) {
|
|
|
|
|
if (bootverbose)
|
|
|
|
|
printf("ar: FreeBSD check1 failed\n");
|
|
|
|
|
goto promise_out;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
if (strncmp(info->promise_id, PR_MAGIC, sizeof(PR_MAGIC))) {
|
|
|
|
|
if (bootverbose)
|
|
|
|
|
printf("ar: Promise check1 failed\n");
|
|
|
|
|
goto promise_out;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2000-10-13 13:04:45 +00:00
|
|
|
|
/* check if the checksum is OK */
|
2001-09-20 15:25:36 +00:00
|
|
|
|
for (cksum = 0, ckptr = (int32_t *)info, count = 0; count < 511; count++)
|
2000-10-13 13:04:45 +00:00
|
|
|
|
cksum += *ckptr++;
|
|
|
|
|
if (cksum != *ckptr) {
|
2000-10-18 18:49:42 +00:00
|
|
|
|
if (bootverbose)
|
2002-03-27 10:58:59 +00:00
|
|
|
|
printf("ar: %s check2 failed\n", local ? "FreeBSD" : "Promise");
|
2001-09-20 15:25:36 +00:00
|
|
|
|
goto promise_out;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* now convert Promise config info into our generic form */
|
2002-02-12 11:35:15 +00:00
|
|
|
|
if (info->raid.integrity != PR_I_VALID) {
|
2001-10-04 18:02:26 +00:00
|
|
|
|
if (bootverbose)
|
2002-03-27 10:58:59 +00:00
|
|
|
|
printf("ar: %s check3 failed\n", local ? "FreeBSD" : "Promise");
|
2001-09-20 15:25:36 +00:00
|
|
|
|
goto promise_out;
|
2000-11-01 17:35:44 +00:00
|
|
|
|
}
|
2000-10-13 13:04:45 +00:00
|
|
|
|
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
for (array = 0; array < MAX_ARRAYS; array++) {
|
|
|
|
|
if (!raidp[array]) {
|
|
|
|
|
raidp[array] =
|
|
|
|
|
(struct ar_softc*)malloc(sizeof(struct ar_softc), M_AR,
|
|
|
|
|
M_NOWAIT | M_ZERO);
|
|
|
|
|
if (!raidp[array]) {
|
2002-02-12 11:35:15 +00:00
|
|
|
|
printf("ar%d: failed to allocate raid config storage\n", array);
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
goto promise_out;
|
|
|
|
|
}
|
2000-11-01 17:35:44 +00:00
|
|
|
|
}
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
raid = raidp[array];
|
|
|
|
|
if (raid->flags & AR_F_HIGHPOINT_RAID)
|
|
|
|
|
continue;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
|
2003-08-24 09:22:26 +00:00
|
|
|
|
magic = (pci_get_device(device_get_parent(
|
|
|
|
|
adp->device->channel->dev)) >> 16) |
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
(info->raid.array_number << 16);
|
2000-10-13 13:04:45 +00:00
|
|
|
|
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
if (raid->flags & AR_F_PROMISE_RAID && magic != raid->magic_0)
|
|
|
|
|
continue;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
/* update our knowledge about the array config based on generation */
|
2002-02-12 11:35:15 +00:00
|
|
|
|
if (!info->raid.generation || info->raid.generation > raid->generation){
|
|
|
|
|
raid->generation = info->raid.generation;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
raid->flags = AR_F_PROMISE_RAID;
|
2003-02-20 20:02:32 +00:00
|
|
|
|
if (local)
|
2002-03-27 10:58:59 +00:00
|
|
|
|
raid->flags |= AR_F_FREEBSD_RAID;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
raid->magic_0 = magic;
|
|
|
|
|
raid->lun = array;
|
|
|
|
|
if ((info->raid.status &
|
|
|
|
|
(PR_S_VALID | PR_S_ONLINE | PR_S_INITED | PR_S_READY)) ==
|
|
|
|
|
(PR_S_VALID | PR_S_ONLINE | PR_S_INITED | PR_S_READY)) {
|
|
|
|
|
raid->flags |= AR_F_READY;
|
|
|
|
|
if (info->raid.status & PR_S_DEGRADED)
|
|
|
|
|
raid->flags |= AR_F_DEGRADED;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
raid->flags &= ~AR_F_READY;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
switch (info->raid.type) {
|
|
|
|
|
case PR_T_RAID0:
|
|
|
|
|
raid->flags |= AR_F_RAID0;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case PR_T_RAID1:
|
|
|
|
|
raid->flags |= AR_F_RAID1;
|
|
|
|
|
if (info->raid.array_width > 1)
|
|
|
|
|
raid->flags |= AR_F_RAID0;
|
|
|
|
|
break;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
case PR_T_SPAN:
|
|
|
|
|
raid->flags |= AR_F_SPAN;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
2002-03-27 10:58:59 +00:00
|
|
|
|
printf("ar%d: %s unknown RAID type 0x%02x\n",
|
|
|
|
|
array, local ? "FreeBSD" : "Promise", info->raid.type);
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
goto promise_out;
|
|
|
|
|
}
|
|
|
|
|
raid->interleave = 1 << info->raid.stripe_shift;
|
|
|
|
|
raid->width = info->raid.array_width;
|
|
|
|
|
raid->total_disks = info->raid.total_disks;
|
|
|
|
|
raid->heads = info->raid.heads + 1;
|
|
|
|
|
raid->sectors = info->raid.sectors;
|
|
|
|
|
raid->cylinders = info->raid.cylinders + 1;
|
|
|
|
|
raid->total_sectors = info->raid.total_sectors;
|
|
|
|
|
raid->offset = 0;
|
|
|
|
|
raid->reserved = 63;
|
2002-03-08 21:36:49 +00:00
|
|
|
|
raid->lock_start = raid->lock_end = info->raid.rebuild_lba;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
|
|
|
|
|
/* convert disk flags to our internal types */
|
|
|
|
|
for (disk = 0; disk < info->raid.total_disks; disk++) {
|
|
|
|
|
raid->disks[disk].flags = 0;
|
2002-03-27 10:58:59 +00:00
|
|
|
|
disksum += info->raid.disk[disk].flags;
|
2002-02-12 11:35:15 +00:00
|
|
|
|
if (info->raid.disk[disk].flags & PR_F_ONLINE)
|
|
|
|
|
raid->disks[disk].flags |= AR_DF_ONLINE;
|
|
|
|
|
if (info->raid.disk[disk].flags & PR_F_ASSIGNED)
|
|
|
|
|
raid->disks[disk].flags |= AR_DF_ASSIGNED;
|
2002-04-10 11:18:07 +00:00
|
|
|
|
if (info->raid.disk[disk].flags & PR_F_SPARE) {
|
2002-02-12 11:35:15 +00:00
|
|
|
|
raid->disks[disk].flags &= ~AR_DF_ONLINE;
|
2002-04-10 11:18:07 +00:00
|
|
|
|
raid->disks[disk].flags |= AR_DF_SPARE;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
}
|
2002-04-10 11:18:07 +00:00
|
|
|
|
if (info->raid.disk[disk].flags & (PR_F_REDIR | PR_F_DOWN))
|
|
|
|
|
raid->disks[disk].flags &= ~AR_DF_ONLINE;
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
}
|
2002-03-27 10:58:59 +00:00
|
|
|
|
if (!disksum) {
|
|
|
|
|
free(raidp[array], M_AR);
|
|
|
|
|
raidp[array] = NULL;
|
|
|
|
|
goto promise_out;
|
|
|
|
|
}
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
2003-05-02 12:41:44 +00:00
|
|
|
|
if (info->raid.generation >= raid->generation) {
|
|
|
|
|
if (raid->disks[info->raid.disk_number].flags && adp->device) {
|
|
|
|
|
raid->disks[info->raid.disk_number].device = adp->device;
|
|
|
|
|
raid->disks[info->raid.disk_number].flags |= AR_DF_PRESENT;
|
|
|
|
|
raid->disks[info->raid.disk_number].disk_sectors =
|
|
|
|
|
info->raid.disk_sectors;
|
|
|
|
|
if ((raid->disks[info->raid.disk_number].flags &
|
|
|
|
|
(AR_DF_PRESENT | AR_DF_ASSIGNED | AR_DF_ONLINE)) ==
|
|
|
|
|
(AR_DF_PRESENT | AR_DF_ASSIGNED | AR_DF_ONLINE)) {
|
|
|
|
|
AD_SOFTC(raid->disks[info->raid.disk_number])->flags |=
|
|
|
|
|
AD_F_RAID_SUBDISK;
|
|
|
|
|
retval = 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
2002-02-12 11:35:15 +00:00
|
|
|
|
}
|
Major update of the ATA RAID code, part 1:
Overhaul of the attach/detach code and structures, there were some nasty
bugs in the old implementation. This made it possible to collapse the
ATA/ATAPI device control structures into one generic structure.
A note here, the kernel is NOT ready for detach of active devices,
it fails all over in random places, but for inactive devices it works.
However for ATA RAID this works, since the RAID abstration layer
insulates the buggy^H^H^H^H^H^Hfragile device subsystem from the
physical disks.
Proberly detect the RAID's from the BIOS, and mark critical RAID1
arrays as such, but continue if there is enough of the mirror left
to do so.
Properly fail arrays on a live system. For RAID0 that means return EIO,
and for RAID1 it means continue on the still working part of the mirror
if possible, else return EIO.
If the state changes, log this to the console.
Allow for Promise & Highpoint controllers/arrays to coexist on the
same machine. It is not possible to distribute arrays over different
makes of controllers though.
If Promise SuperSwap enclosures are used, signal disk state on the
status LED on the front.
Misc fixes that I had lying around for various minor bugs.
Sponsored by: Advanis Inc.
2002-02-04 19:23:40 +00:00
|
|
|
|
break;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
2001-09-20 15:25:36 +00:00
|
|
|
|
promise_out:
|
|
|
|
|
free(info, M_AR);
|
2002-03-03 15:36:21 +00:00
|
|
|
|
return retval;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
2002-02-12 11:35:15 +00:00
|
|
|
|
static int
|
|
|
|
|
ar_promise_write_conf(struct ar_softc *rdp)
|
|
|
|
|
{
|
|
|
|
|
struct promise_raid_conf *config;
|
|
|
|
|
struct timeval timestamp;
|
|
|
|
|
u_int32_t *ckptr;
|
|
|
|
|
int count, disk, drive;
|
2002-03-27 10:58:59 +00:00
|
|
|
|
int local = rdp->flags & AR_F_FREEBSD_RAID;
|
2002-02-12 11:35:15 +00:00
|
|
|
|
|
|
|
|
|
rdp->generation++;
|
|
|
|
|
microtime(×tamp);
|
|
|
|
|
|
|
|
|
|
for (disk = 0; disk < rdp->total_disks; disk++) {
|
2002-03-03 15:36:21 +00:00
|
|
|
|
if (!(config = (struct promise_raid_conf *)
|
|
|
|
|
malloc(sizeof(struct promise_raid_conf), M_AR, M_NOWAIT))) {
|
2002-03-27 10:58:59 +00:00
|
|
|
|
printf("ar%d: %s write conf failed\n",
|
|
|
|
|
rdp->lun, local ? "FreeBSD" : "Promise");
|
2002-03-03 15:36:21 +00:00
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
for (count = 0; count < sizeof(struct promise_raid_conf); count++)
|
|
|
|
|
*(((u_int8_t *)config) + count) = 255 - (count % 256);
|
|
|
|
|
|
2002-02-12 11:35:15 +00:00
|
|
|
|
config->dummy_0 = 0x00020000;
|
|
|
|
|
config->magic_0 = PR_MAGIC0(rdp->disks[disk]) | timestamp.tv_sec;
|
|
|
|
|
config->magic_1 = timestamp.tv_sec >> 16;
|
|
|
|
|
config->magic_2 = timestamp.tv_sec;
|
|
|
|
|
config->raid.integrity = PR_I_VALID;
|
|
|
|
|
|
|
|
|
|
config->raid.disk_number = disk;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
if (rdp->disks[disk].flags & AR_DF_PRESENT && rdp->disks[disk].device) {
|
2002-02-12 11:35:15 +00:00
|
|
|
|
config->raid.channel = rdp->disks[disk].device->channel->unit;
|
|
|
|
|
config->raid.device = (rdp->disks[disk].device->unit != 0);
|
2003-08-24 09:22:26 +00:00
|
|
|
|
if (rdp->disks[disk].device->softc)
|
2002-02-12 11:35:15 +00:00
|
|
|
|
config->raid.disk_sectors = PR_LBA(AD_SOFTC(rdp->disks[disk]));
|
|
|
|
|
/*config->raid.disk_offset*/
|
|
|
|
|
}
|
|
|
|
|
config->raid.magic_0 = config->magic_0;
|
2002-03-08 21:36:49 +00:00
|
|
|
|
config->raid.rebuild_lba = rdp->lock_start;
|
2002-02-12 11:35:15 +00:00
|
|
|
|
config->raid.generation = rdp->generation;
|
|
|
|
|
|
|
|
|
|
if (rdp->flags & AR_F_READY) {
|
2002-04-10 11:18:07 +00:00
|
|
|
|
config->raid.flags = (PR_F_VALID | PR_F_ASSIGNED | PR_F_ONLINE);
|
2002-02-12 11:35:15 +00:00
|
|
|
|
config->raid.status =
|
|
|
|
|
(PR_S_VALID | PR_S_ONLINE | PR_S_INITED | PR_S_READY);
|
|
|
|
|
if (rdp->flags & AR_F_DEGRADED)
|
|
|
|
|
config->raid.status |= PR_S_DEGRADED;
|
|
|
|
|
else
|
|
|
|
|
config->raid.status |= PR_S_FUNCTIONAL;
|
|
|
|
|
}
|
2002-04-10 11:18:07 +00:00
|
|
|
|
else {
|
|
|
|
|
config->raid.flags = PR_F_DOWN;
|
2002-02-12 11:35:15 +00:00
|
|
|
|
config->raid.status = 0;
|
2002-04-10 11:18:07 +00:00
|
|
|
|
}
|
2002-02-12 11:35:15 +00:00
|
|
|
|
|
|
|
|
|
switch (rdp->flags & (AR_F_RAID0 | AR_F_RAID1 | AR_F_SPAN)) {
|
|
|
|
|
case AR_F_RAID0:
|
|
|
|
|
config->raid.type = PR_T_RAID0;
|
|
|
|
|
break;
|
|
|
|
|
case AR_F_RAID1:
|
|
|
|
|
config->raid.type = PR_T_RAID1;
|
|
|
|
|
break;
|
|
|
|
|
case AR_F_RAID0 | AR_F_RAID1:
|
|
|
|
|
config->raid.type = PR_T_RAID1;
|
|
|
|
|
break;
|
|
|
|
|
case AR_F_SPAN:
|
|
|
|
|
config->raid.type = PR_T_SPAN;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
config->raid.total_disks = rdp->total_disks;
|
|
|
|
|
config->raid.stripe_shift = ffs(rdp->interleave) - 1;
|
|
|
|
|
config->raid.array_width = rdp->width;
|
2002-03-30 11:15:46 +00:00
|
|
|
|
config->raid.array_number = rdp->lun;
|
2002-02-12 11:35:15 +00:00
|
|
|
|
config->raid.total_sectors = rdp->total_sectors;
|
|
|
|
|
config->raid.cylinders = rdp->cylinders - 1;
|
|
|
|
|
config->raid.heads = rdp->heads - 1;
|
|
|
|
|
config->raid.sectors = rdp->sectors;
|
|
|
|
|
config->raid.magic_1 = (u_int64_t)config->magic_2<<16 | config->magic_1;
|
|
|
|
|
|
|
|
|
|
bzero(&config->raid.disk, 8 * 12);
|
|
|
|
|
for (drive = 0; drive < rdp->total_disks; drive++) {
|
|
|
|
|
config->raid.disk[drive].flags = 0;
|
|
|
|
|
if (rdp->disks[drive].flags & AR_DF_PRESENT)
|
|
|
|
|
config->raid.disk[drive].flags |= PR_F_VALID;
|
|
|
|
|
if (rdp->disks[drive].flags & AR_DF_ASSIGNED)
|
|
|
|
|
config->raid.disk[drive].flags |= PR_F_ASSIGNED;
|
|
|
|
|
if (rdp->disks[drive].flags & AR_DF_ONLINE)
|
|
|
|
|
config->raid.disk[drive].flags |= PR_F_ONLINE;
|
|
|
|
|
else
|
2002-03-27 10:58:59 +00:00
|
|
|
|
if (rdp->disks[drive].flags & AR_DF_PRESENT)
|
|
|
|
|
config->raid.disk[drive].flags = (PR_F_REDIR | PR_F_DOWN);
|
2002-02-12 11:35:15 +00:00
|
|
|
|
if (rdp->disks[drive].flags & AR_DF_SPARE)
|
|
|
|
|
config->raid.disk[drive].flags |= PR_F_SPARE;
|
|
|
|
|
config->raid.disk[drive].dummy_0 = 0x0;
|
|
|
|
|
if (rdp->disks[drive].device) {
|
|
|
|
|
config->raid.disk[drive].channel =
|
|
|
|
|
rdp->disks[drive].device->channel->unit;
|
|
|
|
|
config->raid.disk[drive].device =
|
|
|
|
|
(rdp->disks[drive].device->unit != 0);
|
|
|
|
|
}
|
|
|
|
|
config->raid.disk[drive].magic_0 =
|
|
|
|
|
PR_MAGIC0(rdp->disks[drive]) | timestamp.tv_sec;
|
|
|
|
|
}
|
|
|
|
|
|
2003-08-24 09:22:26 +00:00
|
|
|
|
if (rdp->disks[disk].device && rdp->disks[disk].device->softc &&
|
2002-02-12 11:35:15 +00:00
|
|
|
|
!(rdp->disks[disk].device->flags & ATA_D_DETACHING)) {
|
2003-05-02 12:41:44 +00:00
|
|
|
|
if ((rdp->disks[disk].flags & (AR_DF_PRESENT | AR_DF_ONLINE)) ==
|
2003-08-24 09:22:26 +00:00
|
|
|
|
(AR_DF_PRESENT | AR_DF_ONLINE)) {
|
2003-05-02 12:41:44 +00:00
|
|
|
|
if (local)
|
|
|
|
|
bcopy(ATA_MAGIC, config->promise_id, sizeof(ATA_MAGIC));
|
|
|
|
|
else
|
|
|
|
|
bcopy(PR_MAGIC, config->promise_id, sizeof(PR_MAGIC));
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
bzero(config->promise_id, sizeof(config->promise_id));
|
|
|
|
|
config->checksum = 0;
|
|
|
|
|
for (ckptr = (int32_t *)config, count = 0; count < 511; count++)
|
|
|
|
|
config->checksum += *ckptr++;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
if (ar_rw(AD_SOFTC(rdp->disks[disk]),
|
|
|
|
|
PR_LBA(AD_SOFTC(rdp->disks[disk])),
|
|
|
|
|
sizeof(struct promise_raid_conf),
|
|
|
|
|
(caddr_t)config, AR_WRITE)) {
|
2002-03-27 10:58:59 +00:00
|
|
|
|
printf("ar%d: %s write conf failed\n",
|
|
|
|
|
rdp->lun, local ? "FreeBSD" : "Promise");
|
2002-02-12 11:35:15 +00:00
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2002-03-03 15:36:21 +00:00
|
|
|
|
static void
|
|
|
|
|
ar_rw_done(struct bio *bp)
|
2000-10-13 13:04:45 +00:00
|
|
|
|
{
|
2002-03-03 15:36:21 +00:00
|
|
|
|
free(bp->bio_data, M_AR);
|
|
|
|
|
free(bp, M_AR);
|
2002-02-12 11:35:15 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2002-03-03 15:36:21 +00:00
|
|
|
|
ar_rw(struct ad_softc *adp, u_int32_t lba, int count, caddr_t data, int flags)
|
2002-02-12 11:35:15 +00:00
|
|
|
|
{
|
2002-03-03 15:36:21 +00:00
|
|
|
|
struct bio *bp;
|
2002-03-15 15:39:54 +00:00
|
|
|
|
int retry = 0, error = 0;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
|
|
|
|
|
if (!(bp = (struct bio *)malloc(sizeof(struct bio), M_AR, M_NOWAIT|M_ZERO)))
|
2002-02-12 11:35:15 +00:00
|
|
|
|
return 1;
|
2003-02-25 15:33:36 +00:00
|
|
|
|
bp->bio_disk = &adp->disk;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
bp->bio_data = data;
|
|
|
|
|
bp->bio_pblkno = lba;
|
|
|
|
|
bp->bio_bcount = count;
|
|
|
|
|
if (flags & AR_READ)
|
|
|
|
|
bp->bio_cmd = BIO_READ;
|
|
|
|
|
if (flags & AR_WRITE)
|
|
|
|
|
bp->bio_cmd = BIO_WRITE;
|
|
|
|
|
if (flags & AR_WAIT)
|
|
|
|
|
bp->bio_done = (void *)wakeup;
|
|
|
|
|
else
|
2002-03-08 11:33:52 +00:00
|
|
|
|
bp->bio_done = ar_rw_done;
|
2002-03-15 15:39:54 +00:00
|
|
|
|
|
2002-03-07 16:32:21 +00:00
|
|
|
|
AR_STRATEGY(bp);
|
2002-03-15 15:39:54 +00:00
|
|
|
|
|
2002-03-03 15:36:21 +00:00
|
|
|
|
if (flags & AR_WAIT) {
|
2002-03-15 15:39:54 +00:00
|
|
|
|
while ((retry++ < (15*hz/10)) && (error = !(bp->bio_flags & BIO_DONE)))
|
|
|
|
|
error = tsleep(bp, PRIBIO, "arrw", 10);
|
2002-03-07 19:20:23 +00:00
|
|
|
|
if (!error && bp->bio_flags & BIO_ERROR)
|
|
|
|
|
error = bp->bio_error;
|
2002-03-03 15:36:21 +00:00
|
|
|
|
free(bp, M_AR);
|
2002-02-12 11:35:15 +00:00
|
|
|
|
}
|
2002-03-07 16:32:21 +00:00
|
|
|
|
return error;
|
2000-10-13 13:04:45 +00:00
|
|
|
|
}
|
2002-03-27 10:58:59 +00:00
|
|
|
|
|
|
|
|
|
static struct ata_device *
|
|
|
|
|
ar_locate_disk(int diskno)
|
|
|
|
|
{
|
|
|
|
|
struct ata_channel *ch;
|
|
|
|
|
int ctlr;
|
|
|
|
|
|
|
|
|
|
for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) {
|
|
|
|
|
if (!(ch = devclass_get_softc(ata_devclass, ctlr)))
|
|
|
|
|
continue;
|
|
|
|
|
if (ch->devices & ATA_ATA_MASTER)
|
2003-08-24 09:22:26 +00:00
|
|
|
|
if (ch->device[MASTER].softc &&
|
|
|
|
|
((struct ad_softc *)(ch->device[MASTER].softc))->lun == diskno)
|
2002-03-27 10:58:59 +00:00
|
|
|
|
return &ch->device[MASTER];
|
|
|
|
|
if (ch->devices & ATA_ATA_SLAVE)
|
2003-08-24 09:22:26 +00:00
|
|
|
|
if (ch->device[SLAVE].softc &&
|
|
|
|
|
((struct ad_softc *)(ch->device[SLAVE].softc))->lun == diskno)
|
2002-03-27 10:58:59 +00:00
|
|
|
|
return &ch->device[SLAVE];
|
|
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2003-04-08 07:48:52 +00:00
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
ar_print_conf(struct ar_softc *config)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
printf("lun %d\n", config->lun);
|
|
|
|
|
printf("magic_0 0x%08x\n", config->magic_0);
|
|
|
|
|
printf("magic_1 0x%08x\n", config->magic_1);
|
|
|
|
|
printf("flags 0x%02x %b\n", config->flags, config->flags,
|
|
|
|
|
"\20\16HIGHPOINT\15PROMISE\13REBUILDING\12DEGRADED\11READY\3SPAN\2RAID1\1RAID0\n");
|
2003-08-24 09:22:26 +00:00
|
|
|
|
printf("total_disks %d\n", config->total_disks);
|
2003-04-08 07:48:52 +00:00
|
|
|
|
printf("generation %d\n", config->generation);
|
|
|
|
|
printf("width %d\n", config->width);
|
|
|
|
|
printf("heads %d\n", config->heads);
|
|
|
|
|
printf("sectors %d\n", config->sectors);
|
|
|
|
|
printf("cylinders %d\n", config->cylinders);
|
2003-04-08 18:01:30 +00:00
|
|
|
|
printf("total_sectors %lld\n", (long long)config->total_sectors);
|
2003-04-08 07:48:52 +00:00
|
|
|
|
printf("interleave %d\n", config->interleave);
|
|
|
|
|
printf("reserved %d\n", config->reserved);
|
|
|
|
|
printf("offset %d\n", config->offset);
|
|
|
|
|
for (i = 0; i < config->total_disks; i++) {
|
|
|
|
|
printf("disk %d: flags = 0x%02x %b\n", i, config->disks[i].flags, config->disks[i].flags, "\20\4ONLINE\3SPARE\2ASSIGNED\1PRESENT\n");
|
|
|
|
|
if (config->disks[i].device)
|
|
|
|
|
printf(" %s\n", config->disks[i].device->name);
|
2003-08-24 09:22:26 +00:00
|
|
|
|
printf(" sectors %lld\n", (long long)config->disks[i].disk_sectors);
|
2003-04-08 07:48:52 +00:00
|
|
|
|
}
|
|
|
|
|
}
|