freebsd-dev/stand/libofw/devicename.c

147 lines
4.1 KiB
C
Raw Normal View History

/*-
* Copyright (c) 1998 Michael Smith <msmith@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
2004-01-04 23:30:47 +00:00
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <stand.h>
Add initial support for booting from ZFS on sparc64. At least on Sun Fire V100, the firmware is known to be broken and not allowing to simultaneously open disk devices, causing attempts to boot from a mirror or RAIDZ to cause a crash. This will be worked around later. The firmwares of newer sun4u models don't seem to exhibit this problem though. Steps for ZFS booting: 1. create VTOC8 label # gpart create -s vtoc8 da0 2. add partitions, f.e.: # gpart add -t freebsd-zfs -s 60g da0 # gpart add -t freebsd-swap da0 resulting in something like: # gpart show => 0 143331930 da0 VTOC8 (68G) 0 125821080 1 freebsd-zfs (60G) 125821080 17510850 2 freebsd-swap (8.4G) 3. create zpool # zpool create bunker da0a or for mirror/RAIDZ (after preparing additional disks as in steps 1. + 2.): # zpool create bunker mirror da0a da1a # zpool create bunker raidz da0a da1a da2a ... 4. set bootfs # zpool set bootfs=bunker bunker 5. install zfsboot # zpool export bunker # gpart bootcode -p /boot/zfsboot da0 6. write zfsloader to the ZFS Boot Block (so far, there's no dedicated tool for this, so dd(1) has to be used for this purpose) When using mirror/RAIDZ, step 4. and the dd(1) invocation should be repeated for the additional disks in order to be able to boot from another disk in case of failure. # sysctl kern.geom.debugflags=0x10 # dd if=/boot/zfsloader of=/dev/da0a bs=512 oseek=1024 conv=notrunc # zpool import bunker 7. install system on ZFS filesystem Don't forget to set 'zfs_load="YES"' and vfs.root.mountfrom="zfs:bunker" in loader.conf as well as 'zfs_enable="YES"'in rc.conf. 8. copy zpool.cache to the ZFS filesystem cp -p /boot/zfs/zpool.cache /bunker/boot/zfs/zpool.cache 9. set mountpoint # zfs set mountpoint=/ bunker 10. Now, given that aliases for all disks in the zpool exists (check with the `devalias` command on the boot monitor prompt) and disk0 corresponds to da0 (likewise for additional disks), the system can be booted from the ZFS with: {1} ok boot disk0 PR: 165025 Submitted by: Gavin Mu
2012-05-01 17:16:01 +00:00
#include "bootstrap.h"
#include "libofw.h"
#include "libzfs.h"
static int ofw_parsedev(struct ofw_devdesc **, const char *, const char **);
/*
* Point (dev) at an allocated device specifier for the device matching the
* path in (devspec). If it contains an explicit device specification,
* use that. If not, use the default device.
*/
int
ofw_getdev(void **vdev, const char *devspec, const char **path)
{
struct ofw_devdesc **dev = (struct ofw_devdesc **)vdev;
int rv;
/*
* If it looks like this is just a path and no
* device, go with the current device.
*/
if ((devspec == NULL) ||
((strchr(devspec, '@') == NULL) &&
(strchr(devspec, ':') == NULL))) {
if (((rv = ofw_parsedev(dev, getenv("currdev"), NULL)) == 0) &&
(path != NULL))
*path = devspec;
return(rv);
}
/*
* Try to parse the device name off the beginning of the devspec
*/
return(ofw_parsedev(dev, devspec, path));
}
/*
* Point (dev) at an allocated device specifier matching the string version
* at the beginning of (devspec). Return a pointer to the remaining
* text in (path).
*/
static int
ofw_parsedev(struct ofw_devdesc **dev, const char *devspec, const char **path)
{
struct ofw_devdesc *idev;
struct devsw *dv;
phandle_t handle;
const char *p;
const char *s;
Add initial support for booting from ZFS on sparc64. At least on Sun Fire V100, the firmware is known to be broken and not allowing to simultaneously open disk devices, causing attempts to boot from a mirror or RAIDZ to cause a crash. This will be worked around later. The firmwares of newer sun4u models don't seem to exhibit this problem though. Steps for ZFS booting: 1. create VTOC8 label # gpart create -s vtoc8 da0 2. add partitions, f.e.: # gpart add -t freebsd-zfs -s 60g da0 # gpart add -t freebsd-swap da0 resulting in something like: # gpart show => 0 143331930 da0 VTOC8 (68G) 0 125821080 1 freebsd-zfs (60G) 125821080 17510850 2 freebsd-swap (8.4G) 3. create zpool # zpool create bunker da0a or for mirror/RAIDZ (after preparing additional disks as in steps 1. + 2.): # zpool create bunker mirror da0a da1a # zpool create bunker raidz da0a da1a da2a ... 4. set bootfs # zpool set bootfs=bunker bunker 5. install zfsboot # zpool export bunker # gpart bootcode -p /boot/zfsboot da0 6. write zfsloader to the ZFS Boot Block (so far, there's no dedicated tool for this, so dd(1) has to be used for this purpose) When using mirror/RAIDZ, step 4. and the dd(1) invocation should be repeated for the additional disks in order to be able to boot from another disk in case of failure. # sysctl kern.geom.debugflags=0x10 # dd if=/boot/zfsloader of=/dev/da0a bs=512 oseek=1024 conv=notrunc # zpool import bunker 7. install system on ZFS filesystem Don't forget to set 'zfs_load="YES"' and vfs.root.mountfrom="zfs:bunker" in loader.conf as well as 'zfs_enable="YES"'in rc.conf. 8. copy zpool.cache to the ZFS filesystem cp -p /boot/zfs/zpool.cache /bunker/boot/zfs/zpool.cache 9. set mountpoint # zfs set mountpoint=/ bunker 10. Now, given that aliases for all disks in the zpool exists (check with the `devalias` command on the boot monitor prompt) and disk0 corresponds to da0 (likewise for additional disks), the system can be booted from the ZFS with: {1} ok boot disk0 PR: 165025 Submitted by: Gavin Mu
2012-05-01 17:16:01 +00:00
char *ep;
char name[256];
char type[64];
int err;
int len;
int i;
for (p = s = devspec; *s != '\0'; p = s) {
if ((s = strchr(p + 1, '/')) == NULL)
s = strchr(p, '\0');
len = s - devspec;
bcopy(devspec, name, len);
name[len] = '\0';
Add initial support for booting from ZFS on sparc64. At least on Sun Fire V100, the firmware is known to be broken and not allowing to simultaneously open disk devices, causing attempts to boot from a mirror or RAIDZ to cause a crash. This will be worked around later. The firmwares of newer sun4u models don't seem to exhibit this problem though. Steps for ZFS booting: 1. create VTOC8 label # gpart create -s vtoc8 da0 2. add partitions, f.e.: # gpart add -t freebsd-zfs -s 60g da0 # gpart add -t freebsd-swap da0 resulting in something like: # gpart show => 0 143331930 da0 VTOC8 (68G) 0 125821080 1 freebsd-zfs (60G) 125821080 17510850 2 freebsd-swap (8.4G) 3. create zpool # zpool create bunker da0a or for mirror/RAIDZ (after preparing additional disks as in steps 1. + 2.): # zpool create bunker mirror da0a da1a # zpool create bunker raidz da0a da1a da2a ... 4. set bootfs # zpool set bootfs=bunker bunker 5. install zfsboot # zpool export bunker # gpart bootcode -p /boot/zfsboot da0 6. write zfsloader to the ZFS Boot Block (so far, there's no dedicated tool for this, so dd(1) has to be used for this purpose) When using mirror/RAIDZ, step 4. and the dd(1) invocation should be repeated for the additional disks in order to be able to boot from another disk in case of failure. # sysctl kern.geom.debugflags=0x10 # dd if=/boot/zfsloader of=/dev/da0a bs=512 oseek=1024 conv=notrunc # zpool import bunker 7. install system on ZFS filesystem Don't forget to set 'zfs_load="YES"' and vfs.root.mountfrom="zfs:bunker" in loader.conf as well as 'zfs_enable="YES"'in rc.conf. 8. copy zpool.cache to the ZFS filesystem cp -p /boot/zfs/zpool.cache /bunker/boot/zfs/zpool.cache 9. set mountpoint # zfs set mountpoint=/ bunker 10. Now, given that aliases for all disks in the zpool exists (check with the `devalias` command on the boot monitor prompt) and disk0 corresponds to da0 (likewise for additional disks), the system can be booted from the ZFS with: {1} ok boot disk0 PR: 165025 Submitted by: Gavin Mu
2012-05-01 17:16:01 +00:00
if ((handle = OF_finddevice(name)) == -1) {
bcopy(name, type, len);
type[len] = '\0';
} else if (OF_getprop(handle, "device_type", type, sizeof(type)) == -1)
continue;
for (i = 0; (dv = devsw[i]) != NULL; i++) {
if (strncmp(dv->dv_name, type, strlen(dv->dv_name)) == 0)
goto found;
}
}
return(ENOENT);
found:
if (path != NULL)
*path = s;
idev = malloc(sizeof(struct ofw_devdesc));
if (idev == NULL) {
printf("ofw_parsedev: malloc failed\n");
return ENOMEM;
}
strcpy(idev->d_path, name);
idev->dd.d_dev = dv;
if (dv->dv_type == DEVT_ZFS) {
p = devspec + strlen(dv->dv_name);
err = zfs_parsedev((struct zfs_devdesc *)idev, p, path);
if (err != 0) {
free(idev);
return (err);
Add initial support for booting from ZFS on sparc64. At least on Sun Fire V100, the firmware is known to be broken and not allowing to simultaneously open disk devices, causing attempts to boot from a mirror or RAIDZ to cause a crash. This will be worked around later. The firmwares of newer sun4u models don't seem to exhibit this problem though. Steps for ZFS booting: 1. create VTOC8 label # gpart create -s vtoc8 da0 2. add partitions, f.e.: # gpart add -t freebsd-zfs -s 60g da0 # gpart add -t freebsd-swap da0 resulting in something like: # gpart show => 0 143331930 da0 VTOC8 (68G) 0 125821080 1 freebsd-zfs (60G) 125821080 17510850 2 freebsd-swap (8.4G) 3. create zpool # zpool create bunker da0a or for mirror/RAIDZ (after preparing additional disks as in steps 1. + 2.): # zpool create bunker mirror da0a da1a # zpool create bunker raidz da0a da1a da2a ... 4. set bootfs # zpool set bootfs=bunker bunker 5. install zfsboot # zpool export bunker # gpart bootcode -p /boot/zfsboot da0 6. write zfsloader to the ZFS Boot Block (so far, there's no dedicated tool for this, so dd(1) has to be used for this purpose) When using mirror/RAIDZ, step 4. and the dd(1) invocation should be repeated for the additional disks in order to be able to boot from another disk in case of failure. # sysctl kern.geom.debugflags=0x10 # dd if=/boot/zfsloader of=/dev/da0a bs=512 oseek=1024 conv=notrunc # zpool import bunker 7. install system on ZFS filesystem Don't forget to set 'zfs_load="YES"' and vfs.root.mountfrom="zfs:bunker" in loader.conf as well as 'zfs_enable="YES"'in rc.conf. 8. copy zpool.cache to the ZFS filesystem cp -p /boot/zfs/zpool.cache /bunker/boot/zfs/zpool.cache 9. set mountpoint # zfs set mountpoint=/ bunker 10. Now, given that aliases for all disks in the zpool exists (check with the `devalias` command on the boot monitor prompt) and disk0 corresponds to da0 (likewise for additional disks), the system can be booted from the ZFS with: {1} ok boot disk0 PR: 165025 Submitted by: Gavin Mu
2012-05-01 17:16:01 +00:00
}
}
if (dev == NULL) {
free(idev);
} else {
*dev = idev;
}
return(0);
}
int
ofw_setcurrdev(struct env_var *ev, int flags, const void *value)
{
struct ofw_devdesc *ncurr;
int rv;
if ((rv = ofw_parsedev(&ncurr, value, NULL)) != 0)
return (rv);
free(ncurr);
return (mount_currdev(ev, flags, value));
}