freebsd-dev/sys/boot/sparc64/loader/main.c

957 lines
23 KiB
C
Raw Normal View History

/*-
* Initial implementation:
* Copyright (c) 2001 Robert Drehmel
* All rights reserved.
*
* As long as the above copyright statement and this notice remain
* unchanged, you can do what ever you want with this file.
*/
/*-
* Copyright (c) 2008 Marius Strobl <marius@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
2004-01-04 23:21:18 +00:00
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
2008-08-22 20:28:19 +00:00
/*
* FreeBSD/sparc64 kernel loader - machine dependent part
*
* - implements copyin and readin functions that map kernel
* pages on demand. The machine independent code does not
* know the size of the kernel early enough to pre-enter
* TTEs and install just one 4MB mapping seemed to limiting
* to me.
*/
#include <stand.h>
#include <sys/param.h>
- Assert that HEAPSZ is a multiple of PAGE_SIZE as at least the firmware of Sun Fire V1280 doesn't round up the size itself but instead lets claiming of non page-sized amounts of memory fail. - Change parameters and variables related to the TLB slots to unsigned which is more appropriate. - Search the whole OFW device tree instead of only the children of the root nexus device for the BSP as starting with UltraSPARC IV the 'cpu' nodes hang off of from 'cmp' (chip multi-threading processor) or 'core' or combinations thereof. Also in large UltraSPARC III based machines the 'cpu' nodes hang off of 'ssm' (scalable shared memory) nodes which group snooping-coherency domains together instead of directly from the nexus. - Add support for UltraSPARC IV and IV+ BSPs. Due to the fact that these are multi-core each CPU has two Fireplane config registers and thus the module/target ID has to be determined differently so the one specific to a certain core is used. Similarly, starting with UltraSPARC IV the individual cores use a different property in the OFW device tree to indicate the CPU/core ID as it no longer is in coincidence with the shared slot/socket ID. While at it additionally distinguish between CPUs with Fireplane and JBus interconnects as these also use slightly different sizes for the JBus/agent/module/target IDs. - Check the return value of init_heap(). This requires moving it after cons_probe() so we can panic when appropriate. This should be fine as the PowerPC OFW loader uses that order for quite some time now.
2010-02-13 14:13:39 +00:00
#include <sys/exec.h>
#include <sys/linker.h>
- Assert that HEAPSZ is a multiple of PAGE_SIZE as at least the firmware of Sun Fire V1280 doesn't round up the size itself but instead lets claiming of non page-sized amounts of memory fail. - Change parameters and variables related to the TLB slots to unsigned which is more appropriate. - Search the whole OFW device tree instead of only the children of the root nexus device for the BSP as starting with UltraSPARC IV the 'cpu' nodes hang off of from 'cmp' (chip multi-threading processor) or 'core' or combinations thereof. Also in large UltraSPARC III based machines the 'cpu' nodes hang off of 'ssm' (scalable shared memory) nodes which group snooping-coherency domains together instead of directly from the nexus. - Add support for UltraSPARC IV and IV+ BSPs. Due to the fact that these are multi-core each CPU has two Fireplane config registers and thus the module/target ID has to be determined differently so the one specific to a certain core is used. Similarly, starting with UltraSPARC IV the individual cores use a different property in the OFW device tree to indicate the CPU/core ID as it no longer is in coincidence with the shared slot/socket ID. While at it additionally distinguish between CPUs with Fireplane and JBus interconnects as these also use slightly different sizes for the JBus/agent/module/target IDs. - Check the return value of init_heap(). This requires moving it after cons_probe() so we can panic when appropriate. This should be fine as the PowerPC OFW loader uses that order for quite some time now.
2010-02-13 14:13:39 +00:00
#include <sys/queue.h>
#include <sys/types.h>
Add initial support for booting from ZFS on sparc64. At least on Sun Fire V100, the firmware is known to be broken and not allowing to simultaneously open disk devices, causing attempts to boot from a mirror or RAIDZ to cause a crash. This will be worked around later. The firmwares of newer sun4u models don't seem to exhibit this problem though. Steps for ZFS booting: 1. create VTOC8 label # gpart create -s vtoc8 da0 2. add partitions, f.e.: # gpart add -t freebsd-zfs -s 60g da0 # gpart add -t freebsd-swap da0 resulting in something like: # gpart show => 0 143331930 da0 VTOC8 (68G) 0 125821080 1 freebsd-zfs (60G) 125821080 17510850 2 freebsd-swap (8.4G) 3. create zpool # zpool create bunker da0a or for mirror/RAIDZ (after preparing additional disks as in steps 1. + 2.): # zpool create bunker mirror da0a da1a # zpool create bunker raidz da0a da1a da2a ... 4. set bootfs # zpool set bootfs=bunker bunker 5. install zfsboot # zpool export bunker # gpart bootcode -p /boot/zfsboot da0 6. write zfsloader to the ZFS Boot Block (so far, there's no dedicated tool for this, so dd(1) has to be used for this purpose) When using mirror/RAIDZ, step 4. and the dd(1) invocation should be repeated for the additional disks in order to be able to boot from another disk in case of failure. # sysctl kern.geom.debugflags=0x10 # dd if=/boot/zfsloader of=/dev/da0a bs=512 oseek=1024 conv=notrunc # zpool import bunker 7. install system on ZFS filesystem Don't forget to set 'zfs_load="YES"' and vfs.root.mountfrom="zfs:bunker" in loader.conf as well as 'zfs_enable="YES"'in rc.conf. 8. copy zpool.cache to the ZFS filesystem cp -p /boot/zfs/zpool.cache /bunker/boot/zfs/zpool.cache 9. set mountpoint # zfs set mountpoint=/ bunker 10. Now, given that aliases for all disks in the zpool exists (check with the `devalias` command on the boot monitor prompt) and disk0 corresponds to da0 (likewise for additional disks), the system can be booted from the ZFS with: {1} ok boot disk0 PR: 165025 Submitted by: Gavin Mu
2012-05-01 17:16:01 +00:00
#ifdef LOADER_ZFS_SUPPORT
#include <sys/vtoc.h>
#include "../zfs/libzfs.h"
Add initial support for booting from ZFS on sparc64. At least on Sun Fire V100, the firmware is known to be broken and not allowing to simultaneously open disk devices, causing attempts to boot from a mirror or RAIDZ to cause a crash. This will be worked around later. The firmwares of newer sun4u models don't seem to exhibit this problem though. Steps for ZFS booting: 1. create VTOC8 label # gpart create -s vtoc8 da0 2. add partitions, f.e.: # gpart add -t freebsd-zfs -s 60g da0 # gpart add -t freebsd-swap da0 resulting in something like: # gpart show => 0 143331930 da0 VTOC8 (68G) 0 125821080 1 freebsd-zfs (60G) 125821080 17510850 2 freebsd-swap (8.4G) 3. create zpool # zpool create bunker da0a or for mirror/RAIDZ (after preparing additional disks as in steps 1. + 2.): # zpool create bunker mirror da0a da1a # zpool create bunker raidz da0a da1a da2a ... 4. set bootfs # zpool set bootfs=bunker bunker 5. install zfsboot # zpool export bunker # gpart bootcode -p /boot/zfsboot da0 6. write zfsloader to the ZFS Boot Block (so far, there's no dedicated tool for this, so dd(1) has to be used for this purpose) When using mirror/RAIDZ, step 4. and the dd(1) invocation should be repeated for the additional disks in order to be able to boot from another disk in case of failure. # sysctl kern.geom.debugflags=0x10 # dd if=/boot/zfsloader of=/dev/da0a bs=512 oseek=1024 conv=notrunc # zpool import bunker 7. install system on ZFS filesystem Don't forget to set 'zfs_load="YES"' and vfs.root.mountfrom="zfs:bunker" in loader.conf as well as 'zfs_enable="YES"'in rc.conf. 8. copy zpool.cache to the ZFS filesystem cp -p /boot/zfs/zpool.cache /bunker/boot/zfs/zpool.cache 9. set mountpoint # zfs set mountpoint=/ bunker 10. Now, given that aliases for all disks in the zpool exists (check with the `devalias` command on the boot monitor prompt) and disk0 corresponds to da0 (likewise for additional disks), the system can be booted from the ZFS with: {1} ok boot disk0 PR: 165025 Submitted by: Gavin Mu
2012-05-01 17:16:01 +00:00
#endif
#include <vm/vm.h>
#include <machine/asi.h>
- Assert that HEAPSZ is a multiple of PAGE_SIZE as at least the firmware of Sun Fire V1280 doesn't round up the size itself but instead lets claiming of non page-sized amounts of memory fail. - Change parameters and variables related to the TLB slots to unsigned which is more appropriate. - Search the whole OFW device tree instead of only the children of the root nexus device for the BSP as starting with UltraSPARC IV the 'cpu' nodes hang off of from 'cmp' (chip multi-threading processor) or 'core' or combinations thereof. Also in large UltraSPARC III based machines the 'cpu' nodes hang off of 'ssm' (scalable shared memory) nodes which group snooping-coherency domains together instead of directly from the nexus. - Add support for UltraSPARC IV and IV+ BSPs. Due to the fact that these are multi-core each CPU has two Fireplane config registers and thus the module/target ID has to be determined differently so the one specific to a certain core is used. Similarly, starting with UltraSPARC IV the individual cores use a different property in the OFW device tree to indicate the CPU/core ID as it no longer is in coincidence with the shared slot/socket ID. While at it additionally distinguish between CPUs with Fireplane and JBus interconnects as these also use slightly different sizes for the JBus/agent/module/target IDs. - Check the return value of init_heap(). This requires moving it after cons_probe() so we can panic when appropriate. This should be fine as the PowerPC OFW loader uses that order for quite some time now.
2010-02-13 14:13:39 +00:00
#include <machine/cmt.h>
#include <machine/cpufunc.h>
#include <machine/elf.h>
- Assert that HEAPSZ is a multiple of PAGE_SIZE as at least the firmware of Sun Fire V1280 doesn't round up the size itself but instead lets claiming of non page-sized amounts of memory fail. - Change parameters and variables related to the TLB slots to unsigned which is more appropriate. - Search the whole OFW device tree instead of only the children of the root nexus device for the BSP as starting with UltraSPARC IV the 'cpu' nodes hang off of from 'cmp' (chip multi-threading processor) or 'core' or combinations thereof. Also in large UltraSPARC III based machines the 'cpu' nodes hang off of 'ssm' (scalable shared memory) nodes which group snooping-coherency domains together instead of directly from the nexus. - Add support for UltraSPARC IV and IV+ BSPs. Due to the fact that these are multi-core each CPU has two Fireplane config registers and thus the module/target ID has to be determined differently so the one specific to a certain core is used. Similarly, starting with UltraSPARC IV the individual cores use a different property in the OFW device tree to indicate the CPU/core ID as it no longer is in coincidence with the shared slot/socket ID. While at it additionally distinguish between CPUs with Fireplane and JBus interconnects as these also use slightly different sizes for the JBus/agent/module/target IDs. - Check the return value of init_heap(). This requires moving it after cons_probe() so we can panic when appropriate. This should be fine as the PowerPC OFW loader uses that order for quite some time now.
2010-02-13 14:13:39 +00:00
#include <machine/fireplane.h>
#include <machine/jbus.h>
#include <machine/lsu.h>
#include <machine/metadata.h>
#include <machine/tte.h>
#include <machine/tlb.h>
#include <machine/upa.h>
#include <machine/ver.h>
#include <machine/vmparam.h>
#include "bootstrap.h"
#include "libofw.h"
#include "dev_net.h"
#define MAXDEV 31
zfsboot/zfsloader: support accessing filesystems within a pool In zfs loader zfs device name format now is "zfs:pool/fs", fully qualified file path is "zfs:pool/fs:/path/to/file" loader allows accessing files from various pools and filesystems as well as changing currdev to a different pool/filesystem. zfsboot accepts kernel/loader name in a format pool:fs:path/to/file or, as before, pool:path/to/file; in the latter case a default filesystem is used (pool root or bootfs). zfsboot passes guids of the selected pool and dataset to zfsloader to be used as its defaults. zfs support should be architecture independent and is provided in a separate library, but architectures wishing to use this zfs support still have to provide some glue code and their devdesc should be compatible with zfs_devdesc. arch_zfs_probe method is used to discover all disk devices that may be part of ZFS pool(s). libi386 unconditionally includes zfs support, but some zfs-specific functions are stubbed out as weak symbols. The strong definitions are provided in libzfsboot. This change mean that the size of i386_devspec becomes larger to match zfs_devspec. Backward-compatibility shims are provided for recently added sparc64 zfs boot support. Currently that architecture still works the old way and does not support the new features. TODO: - clear up pool root filesystem vs pool bootfs filesystem distinction - update sparc64 support - set vfs.root.mountfrom based on currdev (for zfs) Mid-future TODO: - loader sub-menu for selecting alternative boot environment Distant future TODO: - support accessing snapshots, using a snapshot as readonly root Reviewed by: marius (sparc64), Gavin Mu <gavin.mu@gmail.com> (sparc64) Tested by: Florian Wagner <florian@wagner-flo.net> (x86), marius (sparc64) No objections: fs@, hackers@ MFC after: 1 month
2012-05-12 09:03:30 +00:00
extern char bootprog_name[], bootprog_rev[], bootprog_date[], bootprog_maker[];
enum {
HEAPVA = 0x800000,
HEAPSZ = 0x1000000,
LOADSZ = 0x1000000 /* for kernel and modules */
};
- Assert that HEAPSZ is a multiple of PAGE_SIZE as at least the firmware of Sun Fire V1280 doesn't round up the size itself but instead lets claiming of non page-sized amounts of memory fail. - Change parameters and variables related to the TLB slots to unsigned which is more appropriate. - Search the whole OFW device tree instead of only the children of the root nexus device for the BSP as starting with UltraSPARC IV the 'cpu' nodes hang off of from 'cmp' (chip multi-threading processor) or 'core' or combinations thereof. Also in large UltraSPARC III based machines the 'cpu' nodes hang off of 'ssm' (scalable shared memory) nodes which group snooping-coherency domains together instead of directly from the nexus. - Add support for UltraSPARC IV and IV+ BSPs. Due to the fact that these are multi-core each CPU has two Fireplane config registers and thus the module/target ID has to be determined differently so the one specific to a certain core is used. Similarly, starting with UltraSPARC IV the individual cores use a different property in the OFW device tree to indicate the CPU/core ID as it no longer is in coincidence with the shared slot/socket ID. While at it additionally distinguish between CPUs with Fireplane and JBus interconnects as these also use slightly different sizes for the JBus/agent/module/target IDs. - Check the return value of init_heap(). This requires moving it after cons_probe() so we can panic when appropriate. This should be fine as the PowerPC OFW loader uses that order for quite some time now.
2010-02-13 14:13:39 +00:00
/* At least Sun Fire V1280 require page sized allocations to be claimed. */
CTASSERT(HEAPSZ % PAGE_SIZE == 0);
static struct mmu_ops {
void (*tlb_init)(void);
int (*mmu_mapin)(vm_offset_t va, vm_size_t len);
} *mmu_ops;
typedef void kernel_entry_t(vm_offset_t mdp, u_long o1, u_long o2, u_long o3,
void *openfirmware);
static inline u_long dtlb_get_data_sun4u(u_int, u_int);
static int dtlb_enter_sun4u(u_int, u_long data, vm_offset_t);
static vm_offset_t dtlb_va_to_pa_sun4u(vm_offset_t);
static inline u_long itlb_get_data_sun4u(u_int, u_int);
static int itlb_enter_sun4u(u_int, u_long data, vm_offset_t);
static vm_offset_t itlb_va_to_pa_sun4u(vm_offset_t);
static void itlb_relocate_locked0_sun4u(void);
extern vm_offset_t md_load(char *, vm_offset_t *);
static int sparc64_autoload(void);
static ssize_t sparc64_readin(const int, vm_offset_t, const size_t);
static ssize_t sparc64_copyin(const void *, vm_offset_t, size_t);
static vm_offset_t claim_virt(vm_offset_t, size_t, int);
static vm_offset_t alloc_phys(size_t, int);
static int map_phys(int, size_t, vm_offset_t, vm_offset_t);
static void release_phys(vm_offset_t, u_int);
static int __elfN(exec)(struct preloaded_file *);
static int mmu_mapin_sun4u(vm_offset_t, vm_size_t);
static vm_offset_t init_heap(void);
- Assert that HEAPSZ is a multiple of PAGE_SIZE as at least the firmware of Sun Fire V1280 doesn't round up the size itself but instead lets claiming of non page-sized amounts of memory fail. - Change parameters and variables related to the TLB slots to unsigned which is more appropriate. - Search the whole OFW device tree instead of only the children of the root nexus device for the BSP as starting with UltraSPARC IV the 'cpu' nodes hang off of from 'cmp' (chip multi-threading processor) or 'core' or combinations thereof. Also in large UltraSPARC III based machines the 'cpu' nodes hang off of 'ssm' (scalable shared memory) nodes which group snooping-coherency domains together instead of directly from the nexus. - Add support for UltraSPARC IV and IV+ BSPs. Due to the fact that these are multi-core each CPU has two Fireplane config registers and thus the module/target ID has to be determined differently so the one specific to a certain core is used. Similarly, starting with UltraSPARC IV the individual cores use a different property in the OFW device tree to indicate the CPU/core ID as it no longer is in coincidence with the shared slot/socket ID. While at it additionally distinguish between CPUs with Fireplane and JBus interconnects as these also use slightly different sizes for the JBus/agent/module/target IDs. - Check the return value of init_heap(). This requires moving it after cons_probe() so we can panic when appropriate. This should be fine as the PowerPC OFW loader uses that order for quite some time now.
2010-02-13 14:13:39 +00:00
static phandle_t find_bsp_sun4u(phandle_t, uint32_t);
const char *cpu_cpuid_prop_sun4u(void);
uint32_t cpu_get_mid_sun4u(void);
static void tlb_init_sun4u(void);
#ifdef LOADER_DEBUG
typedef u_int64_t tte_t;
static void pmap_print_tlb_sun4u(void);
static void pmap_print_tte_sun4u(tte_t, tte_t);
#endif
static struct mmu_ops mmu_ops_sun4u = { tlb_init_sun4u, mmu_mapin_sun4u };
/* sun4u */
struct tlb_entry *dtlb_store;
struct tlb_entry *itlb_store;
- Assert that HEAPSZ is a multiple of PAGE_SIZE as at least the firmware of Sun Fire V1280 doesn't round up the size itself but instead lets claiming of non page-sized amounts of memory fail. - Change parameters and variables related to the TLB slots to unsigned which is more appropriate. - Search the whole OFW device tree instead of only the children of the root nexus device for the BSP as starting with UltraSPARC IV the 'cpu' nodes hang off of from 'cmp' (chip multi-threading processor) or 'core' or combinations thereof. Also in large UltraSPARC III based machines the 'cpu' nodes hang off of 'ssm' (scalable shared memory) nodes which group snooping-coherency domains together instead of directly from the nexus. - Add support for UltraSPARC IV and IV+ BSPs. Due to the fact that these are multi-core each CPU has two Fireplane config registers and thus the module/target ID has to be determined differently so the one specific to a certain core is used. Similarly, starting with UltraSPARC IV the individual cores use a different property in the OFW device tree to indicate the CPU/core ID as it no longer is in coincidence with the shared slot/socket ID. While at it additionally distinguish between CPUs with Fireplane and JBus interconnects as these also use slightly different sizes for the JBus/agent/module/target IDs. - Check the return value of init_heap(). This requires moving it after cons_probe() so we can panic when appropriate. This should be fine as the PowerPC OFW loader uses that order for quite some time now.
2010-02-13 14:13:39 +00:00
u_int dtlb_slot;
u_int itlb_slot;
static int cpu_impl;
- Assert that HEAPSZ is a multiple of PAGE_SIZE as at least the firmware of Sun Fire V1280 doesn't round up the size itself but instead lets claiming of non page-sized amounts of memory fail. - Change parameters and variables related to the TLB slots to unsigned which is more appropriate. - Search the whole OFW device tree instead of only the children of the root nexus device for the BSP as starting with UltraSPARC IV the 'cpu' nodes hang off of from 'cmp' (chip multi-threading processor) or 'core' or combinations thereof. Also in large UltraSPARC III based machines the 'cpu' nodes hang off of 'ssm' (scalable shared memory) nodes which group snooping-coherency domains together instead of directly from the nexus. - Add support for UltraSPARC IV and IV+ BSPs. Due to the fact that these are multi-core each CPU has two Fireplane config registers and thus the module/target ID has to be determined differently so the one specific to a certain core is used. Similarly, starting with UltraSPARC IV the individual cores use a different property in the OFW device tree to indicate the CPU/core ID as it no longer is in coincidence with the shared slot/socket ID. While at it additionally distinguish between CPUs with Fireplane and JBus interconnects as these also use slightly different sizes for the JBus/agent/module/target IDs. - Check the return value of init_heap(). This requires moving it after cons_probe() so we can panic when appropriate. This should be fine as the PowerPC OFW loader uses that order for quite some time now.
2010-02-13 14:13:39 +00:00
static u_int dtlb_slot_max;
static u_int itlb_slot_max;
static u_int tlb_locked;
static vm_offset_t curkva = 0;
static vm_offset_t heapva;
static char bootpath[64];
static phandle_t root;
/*
* Machine dependent structures that the machine independent
* loader part uses.
*/
struct devsw *devsw[] = {
#ifdef LOADER_DISK_SUPPORT
&ofwdisk,
#endif
#ifdef LOADER_NET_SUPPORT
&netdev,
Add initial support for booting from ZFS on sparc64. At least on Sun Fire V100, the firmware is known to be broken and not allowing to simultaneously open disk devices, causing attempts to boot from a mirror or RAIDZ to cause a crash. This will be worked around later. The firmwares of newer sun4u models don't seem to exhibit this problem though. Steps for ZFS booting: 1. create VTOC8 label # gpart create -s vtoc8 da0 2. add partitions, f.e.: # gpart add -t freebsd-zfs -s 60g da0 # gpart add -t freebsd-swap da0 resulting in something like: # gpart show => 0 143331930 da0 VTOC8 (68G) 0 125821080 1 freebsd-zfs (60G) 125821080 17510850 2 freebsd-swap (8.4G) 3. create zpool # zpool create bunker da0a or for mirror/RAIDZ (after preparing additional disks as in steps 1. + 2.): # zpool create bunker mirror da0a da1a # zpool create bunker raidz da0a da1a da2a ... 4. set bootfs # zpool set bootfs=bunker bunker 5. install zfsboot # zpool export bunker # gpart bootcode -p /boot/zfsboot da0 6. write zfsloader to the ZFS Boot Block (so far, there's no dedicated tool for this, so dd(1) has to be used for this purpose) When using mirror/RAIDZ, step 4. and the dd(1) invocation should be repeated for the additional disks in order to be able to boot from another disk in case of failure. # sysctl kern.geom.debugflags=0x10 # dd if=/boot/zfsloader of=/dev/da0a bs=512 oseek=1024 conv=notrunc # zpool import bunker 7. install system on ZFS filesystem Don't forget to set 'zfs_load="YES"' and vfs.root.mountfrom="zfs:bunker" in loader.conf as well as 'zfs_enable="YES"'in rc.conf. 8. copy zpool.cache to the ZFS filesystem cp -p /boot/zfs/zpool.cache /bunker/boot/zfs/zpool.cache 9. set mountpoint # zfs set mountpoint=/ bunker 10. Now, given that aliases for all disks in the zpool exists (check with the `devalias` command on the boot monitor prompt) and disk0 corresponds to da0 (likewise for additional disks), the system can be booted from the ZFS with: {1} ok boot disk0 PR: 165025 Submitted by: Gavin Mu
2012-05-01 17:16:01 +00:00
#endif
#ifdef LOADER_ZFS_SUPPORT
&zfs_dev,
#endif
0
};
struct arch_switch archsw;
static struct file_format sparc64_elf = {
__elfN(loadfile),
__elfN(exec)
};
struct file_format *file_formats[] = {
&sparc64_elf,
0
};
Add initial support for booting from ZFS on sparc64. At least on Sun Fire V100, the firmware is known to be broken and not allowing to simultaneously open disk devices, causing attempts to boot from a mirror or RAIDZ to cause a crash. This will be worked around later. The firmwares of newer sun4u models don't seem to exhibit this problem though. Steps for ZFS booting: 1. create VTOC8 label # gpart create -s vtoc8 da0 2. add partitions, f.e.: # gpart add -t freebsd-zfs -s 60g da0 # gpart add -t freebsd-swap da0 resulting in something like: # gpart show => 0 143331930 da0 VTOC8 (68G) 0 125821080 1 freebsd-zfs (60G) 125821080 17510850 2 freebsd-swap (8.4G) 3. create zpool # zpool create bunker da0a or for mirror/RAIDZ (after preparing additional disks as in steps 1. + 2.): # zpool create bunker mirror da0a da1a # zpool create bunker raidz da0a da1a da2a ... 4. set bootfs # zpool set bootfs=bunker bunker 5. install zfsboot # zpool export bunker # gpart bootcode -p /boot/zfsboot da0 6. write zfsloader to the ZFS Boot Block (so far, there's no dedicated tool for this, so dd(1) has to be used for this purpose) When using mirror/RAIDZ, step 4. and the dd(1) invocation should be repeated for the additional disks in order to be able to boot from another disk in case of failure. # sysctl kern.geom.debugflags=0x10 # dd if=/boot/zfsloader of=/dev/da0a bs=512 oseek=1024 conv=notrunc # zpool import bunker 7. install system on ZFS filesystem Don't forget to set 'zfs_load="YES"' and vfs.root.mountfrom="zfs:bunker" in loader.conf as well as 'zfs_enable="YES"'in rc.conf. 8. copy zpool.cache to the ZFS filesystem cp -p /boot/zfs/zpool.cache /bunker/boot/zfs/zpool.cache 9. set mountpoint # zfs set mountpoint=/ bunker 10. Now, given that aliases for all disks in the zpool exists (check with the `devalias` command on the boot monitor prompt) and disk0 corresponds to da0 (likewise for additional disks), the system can be booted from the ZFS with: {1} ok boot disk0 PR: 165025 Submitted by: Gavin Mu
2012-05-01 17:16:01 +00:00
struct fs_ops *file_system[] = {
#ifdef LOADER_UFS_SUPPORT
&ufs_fsops,
#endif
#ifdef LOADER_CD9660_SUPPORT
&cd9660_fsops,
#endif
Add initial support for booting from ZFS on sparc64. At least on Sun Fire V100, the firmware is known to be broken and not allowing to simultaneously open disk devices, causing attempts to boot from a mirror or RAIDZ to cause a crash. This will be worked around later. The firmwares of newer sun4u models don't seem to exhibit this problem though. Steps for ZFS booting: 1. create VTOC8 label # gpart create -s vtoc8 da0 2. add partitions, f.e.: # gpart add -t freebsd-zfs -s 60g da0 # gpart add -t freebsd-swap da0 resulting in something like: # gpart show => 0 143331930 da0 VTOC8 (68G) 0 125821080 1 freebsd-zfs (60G) 125821080 17510850 2 freebsd-swap (8.4G) 3. create zpool # zpool create bunker da0a or for mirror/RAIDZ (after preparing additional disks as in steps 1. + 2.): # zpool create bunker mirror da0a da1a # zpool create bunker raidz da0a da1a da2a ... 4. set bootfs # zpool set bootfs=bunker bunker 5. install zfsboot # zpool export bunker # gpart bootcode -p /boot/zfsboot da0 6. write zfsloader to the ZFS Boot Block (so far, there's no dedicated tool for this, so dd(1) has to be used for this purpose) When using mirror/RAIDZ, step 4. and the dd(1) invocation should be repeated for the additional disks in order to be able to boot from another disk in case of failure. # sysctl kern.geom.debugflags=0x10 # dd if=/boot/zfsloader of=/dev/da0a bs=512 oseek=1024 conv=notrunc # zpool import bunker 7. install system on ZFS filesystem Don't forget to set 'zfs_load="YES"' and vfs.root.mountfrom="zfs:bunker" in loader.conf as well as 'zfs_enable="YES"'in rc.conf. 8. copy zpool.cache to the ZFS filesystem cp -p /boot/zfs/zpool.cache /bunker/boot/zfs/zpool.cache 9. set mountpoint # zfs set mountpoint=/ bunker 10. Now, given that aliases for all disks in the zpool exists (check with the `devalias` command on the boot monitor prompt) and disk0 corresponds to da0 (likewise for additional disks), the system can be booted from the ZFS with: {1} ok boot disk0 PR: 165025 Submitted by: Gavin Mu
2012-05-01 17:16:01 +00:00
#ifdef LOADER_ZFS_SUPPORT
&zfs_fsops,
#endif
#ifdef LOADER_ZIP_SUPPORT
&zipfs_fsops,
#endif
#ifdef LOADER_GZIP_SUPPORT
&gzipfs_fsops,
#endif
#ifdef LOADER_BZIP2_SUPPORT
&bzipfs_fsops,
#endif
#ifdef LOADER_NFS_SUPPORT
&nfs_fsops,
#endif
#ifdef LOADER_TFTP_SUPPORT
&tftp_fsops,
#endif
0
};
struct netif_driver *netif_drivers[] = {
#ifdef LOADER_NET_SUPPORT
&ofwnet,
#endif
0
};
extern struct console ofwconsole;
struct console *consoles[] = {
&ofwconsole,
0
};
#ifdef LOADER_DEBUG
static int
watch_phys_set_mask(vm_offset_t pa, u_long mask)
{
u_long lsucr;
stxa(AA_DMMU_PWPR, ASI_DMMU, pa & (((2UL << 38) - 1) << 3));
lsucr = ldxa(0, ASI_LSU_CTL_REG);
lsucr = ((lsucr | LSU_PW) & ~LSU_PM_MASK) |
(mask << LSU_PM_SHIFT);
stxa(0, ASI_LSU_CTL_REG, lsucr);
return (0);
}
static int
watch_phys_set(vm_offset_t pa, int sz)
{
u_long off;
off = (u_long)pa & 7;
/* Test for misaligned watch points. */
if (off + sz > 8)
return (-1);
return (watch_phys_set_mask(pa, ((1 << sz) - 1) << off));
}
static int
watch_virt_set_mask(vm_offset_t va, u_long mask)
{
u_long lsucr;
stxa(AA_DMMU_VWPR, ASI_DMMU, va & (((2UL << 41) - 1) << 3));
lsucr = ldxa(0, ASI_LSU_CTL_REG);
lsucr = ((lsucr | LSU_VW) & ~LSU_VM_MASK) |
(mask << LSU_VM_SHIFT);
stxa(0, ASI_LSU_CTL_REG, lsucr);
return (0);
}
static int
watch_virt_set(vm_offset_t va, int sz)
{
u_long off;
off = (u_long)va & 7;
/* Test for misaligned watch points. */
if (off + sz > 8)
return (-1);
return (watch_virt_set_mask(va, ((1 << sz) - 1) << off));
}
#endif
/*
* archsw functions
*/
static int
sparc64_autoload(void)
{
return (0);
}
static ssize_t
sparc64_readin(const int fd, vm_offset_t va, const size_t len)
{
mmu_ops->mmu_mapin(va, len);
return (read(fd, (void *)va, len));
}
static ssize_t
sparc64_copyin(const void *src, vm_offset_t dest, size_t len)
{
mmu_ops->mmu_mapin(dest, len);
memcpy((void *)dest, src, len);
return (len);
}
/*
* other MD functions
*/
static vm_offset_t
claim_virt(vm_offset_t virt, size_t size, int align)
{
vm_offset_t mva;
if (OF_call_method("claim", mmu, 3, 1, virt, size, align, &mva) == -1)
return ((vm_offset_t)-1);
return (mva);
}
static vm_offset_t
alloc_phys(size_t size, int align)
{
cell_t phys_hi, phys_low;
if (OF_call_method("claim", memory, 2, 2, size, align, &phys_low,
&phys_hi) == -1)
return ((vm_offset_t)-1);
return ((vm_offset_t)phys_hi << 32 | phys_low);
}
static int
map_phys(int mode, size_t size, vm_offset_t virt, vm_offset_t phys)
{
return (OF_call_method("map", mmu, 5, 0, (uint32_t)phys,
(uint32_t)(phys >> 32), virt, size, mode));
}
static void
release_phys(vm_offset_t phys, u_int size)
{
(void)OF_call_method("release", memory, 3, 0, (uint32_t)phys,
(uint32_t)(phys >> 32), size);
}
static int
__elfN(exec)(struct preloaded_file *fp)
{
struct file_metadata *fmp;
vm_offset_t mdp;
Elf_Addr entry;
Elf_Ehdr *e;
int error;
if ((fmp = file_findmetadata(fp, MODINFOMD_ELFHDR)) == 0)
return (EFTYPE);
e = (Elf_Ehdr *)&fmp->md_data;
if ((error = md_load(fp->f_args, &mdp)) != 0)
return (error);
printf("jumping to kernel entry at %#lx.\n", e->e_entry);
#ifdef LOADER_DEBUG
pmap_print_tlb_sun4u();
#endif
dev_cleanup();
entry = e->e_entry;
OF_release((void *)heapva, HEAPSZ);
((kernel_entry_t *)entry)(mdp, 0, 0, 0, openfirmware);
panic("%s: exec returned", __func__);
}
static inline u_long
dtlb_get_data_sun4u(u_int tlb, u_int slot)
{
u_long data, pstate;
slot = TLB_DAR_SLOT(tlb, slot);
/*
* We read ASI_DTLB_DATA_ACCESS_REG twice back-to-back in order to
* work around errata of USIII and beyond.
*/
pstate = rdpr(pstate);
wrpr(pstate, pstate & ~PSTATE_IE, 0);
(void)ldxa(slot, ASI_DTLB_DATA_ACCESS_REG);
data = ldxa(slot, ASI_DTLB_DATA_ACCESS_REG);
wrpr(pstate, pstate, 0);
return (data);
}
static inline u_long
itlb_get_data_sun4u(u_int tlb, u_int slot)
{
u_long data, pstate;
slot = TLB_DAR_SLOT(tlb, slot);
/*
* We read ASI_DTLB_DATA_ACCESS_REG twice back-to-back in order to
* work around errata of USIII and beyond.
*/
pstate = rdpr(pstate);
wrpr(pstate, pstate & ~PSTATE_IE, 0);
(void)ldxa(slot, ASI_ITLB_DATA_ACCESS_REG);
data = ldxa(slot, ASI_ITLB_DATA_ACCESS_REG);
wrpr(pstate, pstate, 0);
return (data);
}
static vm_offset_t
dtlb_va_to_pa_sun4u(vm_offset_t va)
{
u_long pstate, reg;
u_int i, tlb;
pstate = rdpr(pstate);
wrpr(pstate, pstate & ~PSTATE_IE, 0);
for (i = 0; i < dtlb_slot_max; i++) {
reg = ldxa(TLB_DAR_SLOT(tlb_locked, i),
ASI_DTLB_TAG_READ_REG);
if (TLB_TAR_VA(reg) != va)
continue;
reg = dtlb_get_data_sun4u(tlb_locked, i);
wrpr(pstate, pstate, 0);
reg >>= TD_PA_SHIFT;
if (cpu_impl == CPU_IMPL_SPARC64V ||
cpu_impl >= CPU_IMPL_ULTRASPARCIII)
return (reg & TD_PA_CH_MASK);
return (reg & TD_PA_SF_MASK);
}
wrpr(pstate, pstate, 0);
return (-1);
}
static vm_offset_t
itlb_va_to_pa_sun4u(vm_offset_t va)
{
u_long pstate, reg;
int i;
pstate = rdpr(pstate);
wrpr(pstate, pstate & ~PSTATE_IE, 0);
for (i = 0; i < itlb_slot_max; i++) {
reg = ldxa(TLB_DAR_SLOT(tlb_locked, i),
ASI_ITLB_TAG_READ_REG);
if (TLB_TAR_VA(reg) != va)
continue;
reg = itlb_get_data_sun4u(tlb_locked, i);
wrpr(pstate, pstate, 0);
reg >>= TD_PA_SHIFT;
if (cpu_impl == CPU_IMPL_SPARC64V ||
cpu_impl >= CPU_IMPL_ULTRASPARCIII)
return (reg & TD_PA_CH_MASK);
return (reg & TD_PA_SF_MASK);
}
wrpr(pstate, pstate, 0);
return (-1);
}
static int
dtlb_enter_sun4u(u_int index, u_long data, vm_offset_t virt)
{
return (OF_call_method("SUNW,dtlb-load", mmu, 3, 0, index, data,
virt));
}
static int
itlb_enter_sun4u(u_int index, u_long data, vm_offset_t virt)
{
if (cpu_impl == CPU_IMPL_ULTRASPARCIIIp && index == 0 &&
(data & TD_L) != 0)
panic("%s: won't enter locked TLB entry at index 0 on USIII+",
__func__);
return (OF_call_method("SUNW,itlb-load", mmu, 3, 0, index, data,
virt));
}
static void
itlb_relocate_locked0_sun4u(void)
{
u_long data, pstate, tag;
int i;
if (cpu_impl != CPU_IMPL_ULTRASPARCIIIp)
return;
pstate = rdpr(pstate);
wrpr(pstate, pstate & ~PSTATE_IE, 0);
data = itlb_get_data_sun4u(tlb_locked, 0);
if ((data & (TD_V | TD_L)) != (TD_V | TD_L)) {
wrpr(pstate, pstate, 0);
return;
}
/* Flush the mapping of slot 0. */
tag = ldxa(TLB_DAR_SLOT(tlb_locked, 0), ASI_ITLB_TAG_READ_REG);
stxa(TLB_DEMAP_VA(TLB_TAR_VA(tag)) | TLB_DEMAP_PRIMARY |
TLB_DEMAP_PAGE, ASI_IMMU_DEMAP, 0);
flush(0); /* The USIII-family ignores the address. */
/*
* Search a replacement slot != 0 and enter the data and tag
* that formerly were in slot 0.
*/
for (i = 1; i < itlb_slot_max; i++) {
if ((itlb_get_data_sun4u(tlb_locked, i) & TD_V) != 0)
continue;
stxa(AA_IMMU_TAR, ASI_IMMU, tag);
stxa(TLB_DAR_SLOT(tlb_locked, i), ASI_ITLB_DATA_ACCESS_REG,
data);
flush(0); /* The USIII-family ignores the address. */
break;
}
wrpr(pstate, pstate, 0);
if (i == itlb_slot_max)
panic("%s: could not find a replacement slot", __func__);
}
static int
mmu_mapin_sun4u(vm_offset_t va, vm_size_t len)
{
vm_offset_t pa, mva;
u_long data;
u_int index;
if (va + len > curkva)
curkva = va + len;
pa = (vm_offset_t)-1;
len += va & PAGE_MASK_4M;
va &= ~PAGE_MASK_4M;
while (len) {
if (dtlb_va_to_pa_sun4u(va) == (vm_offset_t)-1 ||
itlb_va_to_pa_sun4u(va) == (vm_offset_t)-1) {
2008-08-22 20:28:19 +00:00
/* Allocate a physical page, claim the virtual area. */
if (pa == (vm_offset_t)-1) {
pa = alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M);
if (pa == (vm_offset_t)-1)
panic("%s: out of memory", __func__);
mva = claim_virt(va, PAGE_SIZE_4M, 0);
if (mva != va)
panic("%s: can't claim virtual page "
"(wanted %#lx, got %#lx)",
__func__, va, mva);
2008-08-22 20:28:19 +00:00
/*
* The mappings may have changed, be paranoid.
*/
continue;
}
/*
* Actually, we can only allocate two pages less at
* most (depending on the kernel TSB size).
*/
if (dtlb_slot >= dtlb_slot_max)
panic("%s: out of dtlb_slots", __func__);
if (itlb_slot >= itlb_slot_max)
panic("%s: out of itlb_slots", __func__);
data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP |
TD_CV | TD_P | TD_W;
dtlb_store[dtlb_slot].te_pa = pa;
dtlb_store[dtlb_slot].te_va = va;
index = dtlb_slot_max - dtlb_slot - 1;
if (dtlb_enter_sun4u(index, data, va) < 0)
panic("%s: can't enter dTLB slot %d data "
"%#lx va %#lx", __func__, index, data,
va);
dtlb_slot++;
itlb_store[itlb_slot].te_pa = pa;
itlb_store[itlb_slot].te_va = va;
index = itlb_slot_max - itlb_slot - 1;
if (itlb_enter_sun4u(index, data, va) < 0)
panic("%s: can't enter iTLB slot %d data "
"%#lx va %#lxd", __func__, index, data,
va);
itlb_slot++;
pa = (vm_offset_t)-1;
}
len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
va += PAGE_SIZE_4M;
}
if (pa != (vm_offset_t)-1)
release_phys(pa, PAGE_SIZE_4M);
return (0);
}
static vm_offset_t
init_heap(void)
{
/* There is no need for continuous physical heap memory. */
heapva = (vm_offset_t)OF_claim((void *)HEAPVA, HEAPSZ, 32);
return (heapva);
}
- Assert that HEAPSZ is a multiple of PAGE_SIZE as at least the firmware of Sun Fire V1280 doesn't round up the size itself but instead lets claiming of non page-sized amounts of memory fail. - Change parameters and variables related to the TLB slots to unsigned which is more appropriate. - Search the whole OFW device tree instead of only the children of the root nexus device for the BSP as starting with UltraSPARC IV the 'cpu' nodes hang off of from 'cmp' (chip multi-threading processor) or 'core' or combinations thereof. Also in large UltraSPARC III based machines the 'cpu' nodes hang off of 'ssm' (scalable shared memory) nodes which group snooping-coherency domains together instead of directly from the nexus. - Add support for UltraSPARC IV and IV+ BSPs. Due to the fact that these are multi-core each CPU has two Fireplane config registers and thus the module/target ID has to be determined differently so the one specific to a certain core is used. Similarly, starting with UltraSPARC IV the individual cores use a different property in the OFW device tree to indicate the CPU/core ID as it no longer is in coincidence with the shared slot/socket ID. While at it additionally distinguish between CPUs with Fireplane and JBus interconnects as these also use slightly different sizes for the JBus/agent/module/target IDs. - Check the return value of init_heap(). This requires moving it after cons_probe() so we can panic when appropriate. This should be fine as the PowerPC OFW loader uses that order for quite some time now.
2010-02-13 14:13:39 +00:00
static phandle_t
find_bsp_sun4u(phandle_t node, uint32_t bspid)
{
char type[sizeof("cpu")];
phandle_t child;
uint32_t cpuid;
for (; node > 0; node = OF_peer(node)) {
child = OF_child(node);
if (child > 0) {
child = find_bsp_sun4u(child, bspid);
if (child > 0)
return (child);
} else {
if (OF_getprop(node, "device_type", type,
sizeof(type)) <= 0)
continue;
if (strcmp(type, "cpu") != 0)
continue;
if (OF_getprop(node, cpu_cpuid_prop_sun4u(), &cpuid,
sizeof(cpuid)) <= 0)
continue;
if (cpuid == bspid)
return (node);
}
}
return (0);
}
const char *
cpu_cpuid_prop_sun4u(void)
{
switch (cpu_impl) {
case CPU_IMPL_SPARC64:
case CPU_IMPL_SPARC64V:
- Assert that HEAPSZ is a multiple of PAGE_SIZE as at least the firmware of Sun Fire V1280 doesn't round up the size itself but instead lets claiming of non page-sized amounts of memory fail. - Change parameters and variables related to the TLB slots to unsigned which is more appropriate. - Search the whole OFW device tree instead of only the children of the root nexus device for the BSP as starting with UltraSPARC IV the 'cpu' nodes hang off of from 'cmp' (chip multi-threading processor) or 'core' or combinations thereof. Also in large UltraSPARC III based machines the 'cpu' nodes hang off of 'ssm' (scalable shared memory) nodes which group snooping-coherency domains together instead of directly from the nexus. - Add support for UltraSPARC IV and IV+ BSPs. Due to the fact that these are multi-core each CPU has two Fireplane config registers and thus the module/target ID has to be determined differently so the one specific to a certain core is used. Similarly, starting with UltraSPARC IV the individual cores use a different property in the OFW device tree to indicate the CPU/core ID as it no longer is in coincidence with the shared slot/socket ID. While at it additionally distinguish between CPUs with Fireplane and JBus interconnects as these also use slightly different sizes for the JBus/agent/module/target IDs. - Check the return value of init_heap(). This requires moving it after cons_probe() so we can panic when appropriate. This should be fine as the PowerPC OFW loader uses that order for quite some time now.
2010-02-13 14:13:39 +00:00
case CPU_IMPL_ULTRASPARCI:
case CPU_IMPL_ULTRASPARCII:
case CPU_IMPL_ULTRASPARCIIi:
case CPU_IMPL_ULTRASPARCIIe:
return ("upa-portid");
case CPU_IMPL_ULTRASPARCIII:
case CPU_IMPL_ULTRASPARCIIIp:
case CPU_IMPL_ULTRASPARCIIIi:
case CPU_IMPL_ULTRASPARCIIIip:
return ("portid");
case CPU_IMPL_ULTRASPARCIV:
case CPU_IMPL_ULTRASPARCIVp:
return ("cpuid");
default:
return ("");
}
}
uint32_t
cpu_get_mid_sun4u(void)
{
switch (cpu_impl) {
case CPU_IMPL_SPARC64:
case CPU_IMPL_SPARC64V:
- Assert that HEAPSZ is a multiple of PAGE_SIZE as at least the firmware of Sun Fire V1280 doesn't round up the size itself but instead lets claiming of non page-sized amounts of memory fail. - Change parameters and variables related to the TLB slots to unsigned which is more appropriate. - Search the whole OFW device tree instead of only the children of the root nexus device for the BSP as starting with UltraSPARC IV the 'cpu' nodes hang off of from 'cmp' (chip multi-threading processor) or 'core' or combinations thereof. Also in large UltraSPARC III based machines the 'cpu' nodes hang off of 'ssm' (scalable shared memory) nodes which group snooping-coherency domains together instead of directly from the nexus. - Add support for UltraSPARC IV and IV+ BSPs. Due to the fact that these are multi-core each CPU has two Fireplane config registers and thus the module/target ID has to be determined differently so the one specific to a certain core is used. Similarly, starting with UltraSPARC IV the individual cores use a different property in the OFW device tree to indicate the CPU/core ID as it no longer is in coincidence with the shared slot/socket ID. While at it additionally distinguish between CPUs with Fireplane and JBus interconnects as these also use slightly different sizes for the JBus/agent/module/target IDs. - Check the return value of init_heap(). This requires moving it after cons_probe() so we can panic when appropriate. This should be fine as the PowerPC OFW loader uses that order for quite some time now.
2010-02-13 14:13:39 +00:00
case CPU_IMPL_ULTRASPARCI:
case CPU_IMPL_ULTRASPARCII:
case CPU_IMPL_ULTRASPARCIIi:
case CPU_IMPL_ULTRASPARCIIe:
return (UPA_CR_GET_MID(ldxa(0, ASI_UPA_CONFIG_REG)));
case CPU_IMPL_ULTRASPARCIII:
case CPU_IMPL_ULTRASPARCIIIp:
return (FIREPLANE_CR_GET_AID(ldxa(AA_FIREPLANE_CONFIG,
ASI_FIREPLANE_CONFIG_REG)));
case CPU_IMPL_ULTRASPARCIIIi:
case CPU_IMPL_ULTRASPARCIIIip:
return (JBUS_CR_GET_JID(ldxa(0, ASI_JBUS_CONFIG_REG)));
case CPU_IMPL_ULTRASPARCIV:
case CPU_IMPL_ULTRASPARCIVp:
return (INTR_ID_GET_ID(ldxa(AA_INTR_ID, ASI_INTR_ID)));
default:
return (0);
}
}
static void
tlb_init_sun4u(void)
{
- Assert that HEAPSZ is a multiple of PAGE_SIZE as at least the firmware of Sun Fire V1280 doesn't round up the size itself but instead lets claiming of non page-sized amounts of memory fail. - Change parameters and variables related to the TLB slots to unsigned which is more appropriate. - Search the whole OFW device tree instead of only the children of the root nexus device for the BSP as starting with UltraSPARC IV the 'cpu' nodes hang off of from 'cmp' (chip multi-threading processor) or 'core' or combinations thereof. Also in large UltraSPARC III based machines the 'cpu' nodes hang off of 'ssm' (scalable shared memory) nodes which group snooping-coherency domains together instead of directly from the nexus. - Add support for UltraSPARC IV and IV+ BSPs. Due to the fact that these are multi-core each CPU has two Fireplane config registers and thus the module/target ID has to be determined differently so the one specific to a certain core is used. Similarly, starting with UltraSPARC IV the individual cores use a different property in the OFW device tree to indicate the CPU/core ID as it no longer is in coincidence with the shared slot/socket ID. While at it additionally distinguish between CPUs with Fireplane and JBus interconnects as these also use slightly different sizes for the JBus/agent/module/target IDs. - Check the return value of init_heap(). This requires moving it after cons_probe() so we can panic when appropriate. This should be fine as the PowerPC OFW loader uses that order for quite some time now.
2010-02-13 14:13:39 +00:00
phandle_t bsp;
cpu_impl = VER_IMPL(rdpr(ver));
switch (cpu_impl) {
case CPU_IMPL_SPARC64:
case CPU_IMPL_ULTRASPARCI:
case CPU_IMPL_ULTRASPARCII:
case CPU_IMPL_ULTRASPARCIIi:
case CPU_IMPL_ULTRASPARCIIe:
tlb_locked = TLB_DAR_T32;
break;
case CPU_IMPL_ULTRASPARCIII:
case CPU_IMPL_ULTRASPARCIIIp:
case CPU_IMPL_ULTRASPARCIIIi:
case CPU_IMPL_ULTRASPARCIIIip:
case CPU_IMPL_ULTRASPARCIV:
case CPU_IMPL_ULTRASPARCIVp:
tlb_locked = TLB_DAR_T16;
break;
case CPU_IMPL_SPARC64V:
tlb_locked = TLB_DAR_FTLB;
break;
}
- Assert that HEAPSZ is a multiple of PAGE_SIZE as at least the firmware of Sun Fire V1280 doesn't round up the size itself but instead lets claiming of non page-sized amounts of memory fail. - Change parameters and variables related to the TLB slots to unsigned which is more appropriate. - Search the whole OFW device tree instead of only the children of the root nexus device for the BSP as starting with UltraSPARC IV the 'cpu' nodes hang off of from 'cmp' (chip multi-threading processor) or 'core' or combinations thereof. Also in large UltraSPARC III based machines the 'cpu' nodes hang off of 'ssm' (scalable shared memory) nodes which group snooping-coherency domains together instead of directly from the nexus. - Add support for UltraSPARC IV and IV+ BSPs. Due to the fact that these are multi-core each CPU has two Fireplane config registers and thus the module/target ID has to be determined differently so the one specific to a certain core is used. Similarly, starting with UltraSPARC IV the individual cores use a different property in the OFW device tree to indicate the CPU/core ID as it no longer is in coincidence with the shared slot/socket ID. While at it additionally distinguish between CPUs with Fireplane and JBus interconnects as these also use slightly different sizes for the JBus/agent/module/target IDs. - Check the return value of init_heap(). This requires moving it after cons_probe() so we can panic when appropriate. This should be fine as the PowerPC OFW loader uses that order for quite some time now.
2010-02-13 14:13:39 +00:00
bsp = find_bsp_sun4u(OF_child(root), cpu_get_mid_sun4u());
if (bsp == 0)
panic("%s: no node for bootcpu?!?!", __func__);
2008-08-22 20:28:19 +00:00
- Assert that HEAPSZ is a multiple of PAGE_SIZE as at least the firmware of Sun Fire V1280 doesn't round up the size itself but instead lets claiming of non page-sized amounts of memory fail. - Change parameters and variables related to the TLB slots to unsigned which is more appropriate. - Search the whole OFW device tree instead of only the children of the root nexus device for the BSP as starting with UltraSPARC IV the 'cpu' nodes hang off of from 'cmp' (chip multi-threading processor) or 'core' or combinations thereof. Also in large UltraSPARC III based machines the 'cpu' nodes hang off of 'ssm' (scalable shared memory) nodes which group snooping-coherency domains together instead of directly from the nexus. - Add support for UltraSPARC IV and IV+ BSPs. Due to the fact that these are multi-core each CPU has two Fireplane config registers and thus the module/target ID has to be determined differently so the one specific to a certain core is used. Similarly, starting with UltraSPARC IV the individual cores use a different property in the OFW device tree to indicate the CPU/core ID as it no longer is in coincidence with the shared slot/socket ID. While at it additionally distinguish between CPUs with Fireplane and JBus interconnects as these also use slightly different sizes for the JBus/agent/module/target IDs. - Check the return value of init_heap(). This requires moving it after cons_probe() so we can panic when appropriate. This should be fine as the PowerPC OFW loader uses that order for quite some time now.
2010-02-13 14:13:39 +00:00
if (OF_getprop(bsp, "#dtlb-entries", &dtlb_slot_max,
sizeof(dtlb_slot_max)) == -1 ||
- Assert that HEAPSZ is a multiple of PAGE_SIZE as at least the firmware of Sun Fire V1280 doesn't round up the size itself but instead lets claiming of non page-sized amounts of memory fail. - Change parameters and variables related to the TLB slots to unsigned which is more appropriate. - Search the whole OFW device tree instead of only the children of the root nexus device for the BSP as starting with UltraSPARC IV the 'cpu' nodes hang off of from 'cmp' (chip multi-threading processor) or 'core' or combinations thereof. Also in large UltraSPARC III based machines the 'cpu' nodes hang off of 'ssm' (scalable shared memory) nodes which group snooping-coherency domains together instead of directly from the nexus. - Add support for UltraSPARC IV and IV+ BSPs. Due to the fact that these are multi-core each CPU has two Fireplane config registers and thus the module/target ID has to be determined differently so the one specific to a certain core is used. Similarly, starting with UltraSPARC IV the individual cores use a different property in the OFW device tree to indicate the CPU/core ID as it no longer is in coincidence with the shared slot/socket ID. While at it additionally distinguish between CPUs with Fireplane and JBus interconnects as these also use slightly different sizes for the JBus/agent/module/target IDs. - Check the return value of init_heap(). This requires moving it after cons_probe() so we can panic when appropriate. This should be fine as the PowerPC OFW loader uses that order for quite some time now.
2010-02-13 14:13:39 +00:00
OF_getprop(bsp, "#itlb-entries", &itlb_slot_max,
sizeof(itlb_slot_max)) == -1)
panic("%s: can't get TLB slot max.", __func__);
if (cpu_impl == CPU_IMPL_ULTRASPARCIIIp) {
#ifdef LOADER_DEBUG
printf("pre fixup:\n");
pmap_print_tlb_sun4u();
#endif
/*
* Relocate the locked entry in it16 slot 0 (if existent)
* as part of working around Cheetah+ erratum 34.
*/
itlb_relocate_locked0_sun4u();
#ifdef LOADER_DEBUG
printf("post fixup:\n");
pmap_print_tlb_sun4u();
#endif
}
dtlb_store = malloc(dtlb_slot_max * sizeof(*dtlb_store));
itlb_store = malloc(itlb_slot_max * sizeof(*itlb_store));
if (dtlb_store == NULL || itlb_store == NULL)
panic("%s: can't allocate TLB store", __func__);
}
Add initial support for booting from ZFS on sparc64. At least on Sun Fire V100, the firmware is known to be broken and not allowing to simultaneously open disk devices, causing attempts to boot from a mirror or RAIDZ to cause a crash. This will be worked around later. The firmwares of newer sun4u models don't seem to exhibit this problem though. Steps for ZFS booting: 1. create VTOC8 label # gpart create -s vtoc8 da0 2. add partitions, f.e.: # gpart add -t freebsd-zfs -s 60g da0 # gpart add -t freebsd-swap da0 resulting in something like: # gpart show => 0 143331930 da0 VTOC8 (68G) 0 125821080 1 freebsd-zfs (60G) 125821080 17510850 2 freebsd-swap (8.4G) 3. create zpool # zpool create bunker da0a or for mirror/RAIDZ (after preparing additional disks as in steps 1. + 2.): # zpool create bunker mirror da0a da1a # zpool create bunker raidz da0a da1a da2a ... 4. set bootfs # zpool set bootfs=bunker bunker 5. install zfsboot # zpool export bunker # gpart bootcode -p /boot/zfsboot da0 6. write zfsloader to the ZFS Boot Block (so far, there's no dedicated tool for this, so dd(1) has to be used for this purpose) When using mirror/RAIDZ, step 4. and the dd(1) invocation should be repeated for the additional disks in order to be able to boot from another disk in case of failure. # sysctl kern.geom.debugflags=0x10 # dd if=/boot/zfsloader of=/dev/da0a bs=512 oseek=1024 conv=notrunc # zpool import bunker 7. install system on ZFS filesystem Don't forget to set 'zfs_load="YES"' and vfs.root.mountfrom="zfs:bunker" in loader.conf as well as 'zfs_enable="YES"'in rc.conf. 8. copy zpool.cache to the ZFS filesystem cp -p /boot/zfs/zpool.cache /bunker/boot/zfs/zpool.cache 9. set mountpoint # zfs set mountpoint=/ bunker 10. Now, given that aliases for all disks in the zpool exists (check with the `devalias` command on the boot monitor prompt) and disk0 corresponds to da0 (likewise for additional disks), the system can be booted from the ZFS with: {1} ok boot disk0 PR: 165025 Submitted by: Gavin Mu
2012-05-01 17:16:01 +00:00
#ifdef LOADER_ZFS_SUPPORT
static void
sparc64_zfs_probe(void)
Add initial support for booting from ZFS on sparc64. At least on Sun Fire V100, the firmware is known to be broken and not allowing to simultaneously open disk devices, causing attempts to boot from a mirror or RAIDZ to cause a crash. This will be worked around later. The firmwares of newer sun4u models don't seem to exhibit this problem though. Steps for ZFS booting: 1. create VTOC8 label # gpart create -s vtoc8 da0 2. add partitions, f.e.: # gpart add -t freebsd-zfs -s 60g da0 # gpart add -t freebsd-swap da0 resulting in something like: # gpart show => 0 143331930 da0 VTOC8 (68G) 0 125821080 1 freebsd-zfs (60G) 125821080 17510850 2 freebsd-swap (8.4G) 3. create zpool # zpool create bunker da0a or for mirror/RAIDZ (after preparing additional disks as in steps 1. + 2.): # zpool create bunker mirror da0a da1a # zpool create bunker raidz da0a da1a da2a ... 4. set bootfs # zpool set bootfs=bunker bunker 5. install zfsboot # zpool export bunker # gpart bootcode -p /boot/zfsboot da0 6. write zfsloader to the ZFS Boot Block (so far, there's no dedicated tool for this, so dd(1) has to be used for this purpose) When using mirror/RAIDZ, step 4. and the dd(1) invocation should be repeated for the additional disks in order to be able to boot from another disk in case of failure. # sysctl kern.geom.debugflags=0x10 # dd if=/boot/zfsloader of=/dev/da0a bs=512 oseek=1024 conv=notrunc # zpool import bunker 7. install system on ZFS filesystem Don't forget to set 'zfs_load="YES"' and vfs.root.mountfrom="zfs:bunker" in loader.conf as well as 'zfs_enable="YES"'in rc.conf. 8. copy zpool.cache to the ZFS filesystem cp -p /boot/zfs/zpool.cache /bunker/boot/zfs/zpool.cache 9. set mountpoint # zfs set mountpoint=/ bunker 10. Now, given that aliases for all disks in the zpool exists (check with the `devalias` command on the boot monitor prompt) and disk0 corresponds to da0 (likewise for additional disks), the system can be booted from the ZFS with: {1} ok boot disk0 PR: 165025 Submitted by: Gavin Mu
2012-05-01 17:16:01 +00:00
{
struct vtoc8 vtoc;
struct zfs_devdesc zfs_currdev;
char devname[32];
Add initial support for booting from ZFS on sparc64. At least on Sun Fire V100, the firmware is known to be broken and not allowing to simultaneously open disk devices, causing attempts to boot from a mirror or RAIDZ to cause a crash. This will be worked around later. The firmwares of newer sun4u models don't seem to exhibit this problem though. Steps for ZFS booting: 1. create VTOC8 label # gpart create -s vtoc8 da0 2. add partitions, f.e.: # gpart add -t freebsd-zfs -s 60g da0 # gpart add -t freebsd-swap da0 resulting in something like: # gpart show => 0 143331930 da0 VTOC8 (68G) 0 125821080 1 freebsd-zfs (60G) 125821080 17510850 2 freebsd-swap (8.4G) 3. create zpool # zpool create bunker da0a or for mirror/RAIDZ (after preparing additional disks as in steps 1. + 2.): # zpool create bunker mirror da0a da1a # zpool create bunker raidz da0a da1a da2a ... 4. set bootfs # zpool set bootfs=bunker bunker 5. install zfsboot # zpool export bunker # gpart bootcode -p /boot/zfsboot da0 6. write zfsloader to the ZFS Boot Block (so far, there's no dedicated tool for this, so dd(1) has to be used for this purpose) When using mirror/RAIDZ, step 4. and the dd(1) invocation should be repeated for the additional disks in order to be able to boot from another disk in case of failure. # sysctl kern.geom.debugflags=0x10 # dd if=/boot/zfsloader of=/dev/da0a bs=512 oseek=1024 conv=notrunc # zpool import bunker 7. install system on ZFS filesystem Don't forget to set 'zfs_load="YES"' and vfs.root.mountfrom="zfs:bunker" in loader.conf as well as 'zfs_enable="YES"'in rc.conf. 8. copy zpool.cache to the ZFS filesystem cp -p /boot/zfs/zpool.cache /bunker/boot/zfs/zpool.cache 9. set mountpoint # zfs set mountpoint=/ bunker 10. Now, given that aliases for all disks in the zpool exists (check with the `devalias` command on the boot monitor prompt) and disk0 corresponds to da0 (likewise for additional disks), the system can be booted from the ZFS with: {1} ok boot disk0 PR: 165025 Submitted by: Gavin Mu
2012-05-01 17:16:01 +00:00
uint64_t guid;
int fd, part, unit;
/* Get the GUID of the ZFS pool on the boot device. */
guid = 0;
zfs_probe_dev(bootpath, &guid);
Add initial support for booting from ZFS on sparc64. At least on Sun Fire V100, the firmware is known to be broken and not allowing to simultaneously open disk devices, causing attempts to boot from a mirror or RAIDZ to cause a crash. This will be worked around later. The firmwares of newer sun4u models don't seem to exhibit this problem though. Steps for ZFS booting: 1. create VTOC8 label # gpart create -s vtoc8 da0 2. add partitions, f.e.: # gpart add -t freebsd-zfs -s 60g da0 # gpart add -t freebsd-swap da0 resulting in something like: # gpart show => 0 143331930 da0 VTOC8 (68G) 0 125821080 1 freebsd-zfs (60G) 125821080 17510850 2 freebsd-swap (8.4G) 3. create zpool # zpool create bunker da0a or for mirror/RAIDZ (after preparing additional disks as in steps 1. + 2.): # zpool create bunker mirror da0a da1a # zpool create bunker raidz da0a da1a da2a ... 4. set bootfs # zpool set bootfs=bunker bunker 5. install zfsboot # zpool export bunker # gpart bootcode -p /boot/zfsboot da0 6. write zfsloader to the ZFS Boot Block (so far, there's no dedicated tool for this, so dd(1) has to be used for this purpose) When using mirror/RAIDZ, step 4. and the dd(1) invocation should be repeated for the additional disks in order to be able to boot from another disk in case of failure. # sysctl kern.geom.debugflags=0x10 # dd if=/boot/zfsloader of=/dev/da0a bs=512 oseek=1024 conv=notrunc # zpool import bunker 7. install system on ZFS filesystem Don't forget to set 'zfs_load="YES"' and vfs.root.mountfrom="zfs:bunker" in loader.conf as well as 'zfs_enable="YES"'in rc.conf. 8. copy zpool.cache to the ZFS filesystem cp -p /boot/zfs/zpool.cache /bunker/boot/zfs/zpool.cache 9. set mountpoint # zfs set mountpoint=/ bunker 10. Now, given that aliases for all disks in the zpool exists (check with the `devalias` command on the boot monitor prompt) and disk0 corresponds to da0 (likewise for additional disks), the system can be booted from the ZFS with: {1} ok boot disk0 PR: 165025 Submitted by: Gavin Mu
2012-05-01 17:16:01 +00:00
for (unit = 0; unit < MAXDEV; unit++) {
Add initial support for booting from ZFS on sparc64. At least on Sun Fire V100, the firmware is known to be broken and not allowing to simultaneously open disk devices, causing attempts to boot from a mirror or RAIDZ to cause a crash. This will be worked around later. The firmwares of newer sun4u models don't seem to exhibit this problem though. Steps for ZFS booting: 1. create VTOC8 label # gpart create -s vtoc8 da0 2. add partitions, f.e.: # gpart add -t freebsd-zfs -s 60g da0 # gpart add -t freebsd-swap da0 resulting in something like: # gpart show => 0 143331930 da0 VTOC8 (68G) 0 125821080 1 freebsd-zfs (60G) 125821080 17510850 2 freebsd-swap (8.4G) 3. create zpool # zpool create bunker da0a or for mirror/RAIDZ (after preparing additional disks as in steps 1. + 2.): # zpool create bunker mirror da0a da1a # zpool create bunker raidz da0a da1a da2a ... 4. set bootfs # zpool set bootfs=bunker bunker 5. install zfsboot # zpool export bunker # gpart bootcode -p /boot/zfsboot da0 6. write zfsloader to the ZFS Boot Block (so far, there's no dedicated tool for this, so dd(1) has to be used for this purpose) When using mirror/RAIDZ, step 4. and the dd(1) invocation should be repeated for the additional disks in order to be able to boot from another disk in case of failure. # sysctl kern.geom.debugflags=0x10 # dd if=/boot/zfsloader of=/dev/da0a bs=512 oseek=1024 conv=notrunc # zpool import bunker 7. install system on ZFS filesystem Don't forget to set 'zfs_load="YES"' and vfs.root.mountfrom="zfs:bunker" in loader.conf as well as 'zfs_enable="YES"'in rc.conf. 8. copy zpool.cache to the ZFS filesystem cp -p /boot/zfs/zpool.cache /bunker/boot/zfs/zpool.cache 9. set mountpoint # zfs set mountpoint=/ bunker 10. Now, given that aliases for all disks in the zpool exists (check with the `devalias` command on the boot monitor prompt) and disk0 corresponds to da0 (likewise for additional disks), the system can be booted from the ZFS with: {1} ok boot disk0 PR: 165025 Submitted by: Gavin Mu
2012-05-01 17:16:01 +00:00
/* Find freebsd-zfs slices in the VTOC. */
sprintf(devname, "disk%d:", unit);
fd = open(devname, O_RDONLY);
if (fd == -1)
continue;
lseek(fd, 0, SEEK_SET);
if (read(fd, &vtoc, sizeof(vtoc)) != sizeof(vtoc)) {
close(fd);
continue;
}
close(fd);
for (part = 0; part < 8; part++) {
if (part == 2 || vtoc.part[part].tag !=
VTOC_TAG_FREEBSD_ZFS)
Add initial support for booting from ZFS on sparc64. At least on Sun Fire V100, the firmware is known to be broken and not allowing to simultaneously open disk devices, causing attempts to boot from a mirror or RAIDZ to cause a crash. This will be worked around later. The firmwares of newer sun4u models don't seem to exhibit this problem though. Steps for ZFS booting: 1. create VTOC8 label # gpart create -s vtoc8 da0 2. add partitions, f.e.: # gpart add -t freebsd-zfs -s 60g da0 # gpart add -t freebsd-swap da0 resulting in something like: # gpart show => 0 143331930 da0 VTOC8 (68G) 0 125821080 1 freebsd-zfs (60G) 125821080 17510850 2 freebsd-swap (8.4G) 3. create zpool # zpool create bunker da0a or for mirror/RAIDZ (after preparing additional disks as in steps 1. + 2.): # zpool create bunker mirror da0a da1a # zpool create bunker raidz da0a da1a da2a ... 4. set bootfs # zpool set bootfs=bunker bunker 5. install zfsboot # zpool export bunker # gpart bootcode -p /boot/zfsboot da0 6. write zfsloader to the ZFS Boot Block (so far, there's no dedicated tool for this, so dd(1) has to be used for this purpose) When using mirror/RAIDZ, step 4. and the dd(1) invocation should be repeated for the additional disks in order to be able to boot from another disk in case of failure. # sysctl kern.geom.debugflags=0x10 # dd if=/boot/zfsloader of=/dev/da0a bs=512 oseek=1024 conv=notrunc # zpool import bunker 7. install system on ZFS filesystem Don't forget to set 'zfs_load="YES"' and vfs.root.mountfrom="zfs:bunker" in loader.conf as well as 'zfs_enable="YES"'in rc.conf. 8. copy zpool.cache to the ZFS filesystem cp -p /boot/zfs/zpool.cache /bunker/boot/zfs/zpool.cache 9. set mountpoint # zfs set mountpoint=/ bunker 10. Now, given that aliases for all disks in the zpool exists (check with the `devalias` command on the boot monitor prompt) and disk0 corresponds to da0 (likewise for additional disks), the system can be booted from the ZFS with: {1} ok boot disk0 PR: 165025 Submitted by: Gavin Mu
2012-05-01 17:16:01 +00:00
continue;
sprintf(devname, "disk%d:%c", unit, part + 'a');
if (zfs_probe_dev(devname, NULL) == ENXIO)
Add initial support for booting from ZFS on sparc64. At least on Sun Fire V100, the firmware is known to be broken and not allowing to simultaneously open disk devices, causing attempts to boot from a mirror or RAIDZ to cause a crash. This will be worked around later. The firmwares of newer sun4u models don't seem to exhibit this problem though. Steps for ZFS booting: 1. create VTOC8 label # gpart create -s vtoc8 da0 2. add partitions, f.e.: # gpart add -t freebsd-zfs -s 60g da0 # gpart add -t freebsd-swap da0 resulting in something like: # gpart show => 0 143331930 da0 VTOC8 (68G) 0 125821080 1 freebsd-zfs (60G) 125821080 17510850 2 freebsd-swap (8.4G) 3. create zpool # zpool create bunker da0a or for mirror/RAIDZ (after preparing additional disks as in steps 1. + 2.): # zpool create bunker mirror da0a da1a # zpool create bunker raidz da0a da1a da2a ... 4. set bootfs # zpool set bootfs=bunker bunker 5. install zfsboot # zpool export bunker # gpart bootcode -p /boot/zfsboot da0 6. write zfsloader to the ZFS Boot Block (so far, there's no dedicated tool for this, so dd(1) has to be used for this purpose) When using mirror/RAIDZ, step 4. and the dd(1) invocation should be repeated for the additional disks in order to be able to boot from another disk in case of failure. # sysctl kern.geom.debugflags=0x10 # dd if=/boot/zfsloader of=/dev/da0a bs=512 oseek=1024 conv=notrunc # zpool import bunker 7. install system on ZFS filesystem Don't forget to set 'zfs_load="YES"' and vfs.root.mountfrom="zfs:bunker" in loader.conf as well as 'zfs_enable="YES"'in rc.conf. 8. copy zpool.cache to the ZFS filesystem cp -p /boot/zfs/zpool.cache /bunker/boot/zfs/zpool.cache 9. set mountpoint # zfs set mountpoint=/ bunker 10. Now, given that aliases for all disks in the zpool exists (check with the `devalias` command on the boot monitor prompt) and disk0 corresponds to da0 (likewise for additional disks), the system can be booted from the ZFS with: {1} ok boot disk0 PR: 165025 Submitted by: Gavin Mu
2012-05-01 17:16:01 +00:00
break;
}
}
if (guid != 0) {
zfs_currdev.pool_guid = guid;
zfs_currdev.root_guid = 0;
zfs_currdev.d_dev = &zfs_dev;
zfs_currdev.d_type = zfs_currdev.d_dev->dv_type;
(void)strncpy(bootpath, zfs_fmtdev(&zfs_currdev),
sizeof(bootpath) - 1);
bootpath[sizeof(bootpath) - 1] = '\0';
Add initial support for booting from ZFS on sparc64. At least on Sun Fire V100, the firmware is known to be broken and not allowing to simultaneously open disk devices, causing attempts to boot from a mirror or RAIDZ to cause a crash. This will be worked around later. The firmwares of newer sun4u models don't seem to exhibit this problem though. Steps for ZFS booting: 1. create VTOC8 label # gpart create -s vtoc8 da0 2. add partitions, f.e.: # gpart add -t freebsd-zfs -s 60g da0 # gpart add -t freebsd-swap da0 resulting in something like: # gpart show => 0 143331930 da0 VTOC8 (68G) 0 125821080 1 freebsd-zfs (60G) 125821080 17510850 2 freebsd-swap (8.4G) 3. create zpool # zpool create bunker da0a or for mirror/RAIDZ (after preparing additional disks as in steps 1. + 2.): # zpool create bunker mirror da0a da1a # zpool create bunker raidz da0a da1a da2a ... 4. set bootfs # zpool set bootfs=bunker bunker 5. install zfsboot # zpool export bunker # gpart bootcode -p /boot/zfsboot da0 6. write zfsloader to the ZFS Boot Block (so far, there's no dedicated tool for this, so dd(1) has to be used for this purpose) When using mirror/RAIDZ, step 4. and the dd(1) invocation should be repeated for the additional disks in order to be able to boot from another disk in case of failure. # sysctl kern.geom.debugflags=0x10 # dd if=/boot/zfsloader of=/dev/da0a bs=512 oseek=1024 conv=notrunc # zpool import bunker 7. install system on ZFS filesystem Don't forget to set 'zfs_load="YES"' and vfs.root.mountfrom="zfs:bunker" in loader.conf as well as 'zfs_enable="YES"'in rc.conf. 8. copy zpool.cache to the ZFS filesystem cp -p /boot/zfs/zpool.cache /bunker/boot/zfs/zpool.cache 9. set mountpoint # zfs set mountpoint=/ bunker 10. Now, given that aliases for all disks in the zpool exists (check with the `devalias` command on the boot monitor prompt) and disk0 corresponds to da0 (likewise for additional disks), the system can be booted from the ZFS with: {1} ok boot disk0 PR: 165025 Submitted by: Gavin Mu
2012-05-01 17:16:01 +00:00
}
}
#endif /* LOADER_ZFS_SUPPORT */
int
main(int (*openfirm)(void *))
{
char compatible[32];
struct devsw **dp;
/*
2008-08-22 20:28:19 +00:00
* Tell the Open Firmware functions where they find the OFW gate.
*/
OF_init(openfirm);
archsw.arch_getdev = ofw_getdev;
archsw.arch_copyin = sparc64_copyin;
archsw.arch_copyout = ofw_copyout;
archsw.arch_readin = sparc64_readin;
archsw.arch_autoload = sparc64_autoload;
#ifdef LOADER_ZFS_SUPPORT
archsw.arch_zfs_probe = sparc64_zfs_probe;
#endif
if (init_heap() == (vm_offset_t)-1)
OF_exit();
setheap((void *)heapva, (void *)(heapva + HEAPSZ));
/*
* Probe for a console.
*/
cons_probe();
if ((root = OF_peer(0)) == -1)
panic("%s: can't get root phandle", __func__);
OF_getprop(root, "compatible", compatible, sizeof(compatible));
mmu_ops = &mmu_ops_sun4u;
mmu_ops->tlb_init();
/*
* Set up the current device.
*/
OF_getprop(chosen, "bootpath", bootpath, sizeof(bootpath));
/*
* Sun compatible bootable CD-ROMs have a disk label placed
* before the cd9660 data, with the actual filesystem being
* in the first partition, while the other partitions contain
* pseudo disk labels with embedded boot blocks for different
* architectures, which may be followed by UFS filesystems.
* The firmware will set the boot path to the partition it
* boots from ('f' in the sun4u case), but we want the kernel
* to be loaded from the cd9660 fs ('a'), so the boot path
* needs to be altered.
*/
if (bootpath[strlen(bootpath) - 2] == ':' &&
Add initial support for booting from ZFS on sparc64. At least on Sun Fire V100, the firmware is known to be broken and not allowing to simultaneously open disk devices, causing attempts to boot from a mirror or RAIDZ to cause a crash. This will be worked around later. The firmwares of newer sun4u models don't seem to exhibit this problem though. Steps for ZFS booting: 1. create VTOC8 label # gpart create -s vtoc8 da0 2. add partitions, f.e.: # gpart add -t freebsd-zfs -s 60g da0 # gpart add -t freebsd-swap da0 resulting in something like: # gpart show => 0 143331930 da0 VTOC8 (68G) 0 125821080 1 freebsd-zfs (60G) 125821080 17510850 2 freebsd-swap (8.4G) 3. create zpool # zpool create bunker da0a or for mirror/RAIDZ (after preparing additional disks as in steps 1. + 2.): # zpool create bunker mirror da0a da1a # zpool create bunker raidz da0a da1a da2a ... 4. set bootfs # zpool set bootfs=bunker bunker 5. install zfsboot # zpool export bunker # gpart bootcode -p /boot/zfsboot da0 6. write zfsloader to the ZFS Boot Block (so far, there's no dedicated tool for this, so dd(1) has to be used for this purpose) When using mirror/RAIDZ, step 4. and the dd(1) invocation should be repeated for the additional disks in order to be able to boot from another disk in case of failure. # sysctl kern.geom.debugflags=0x10 # dd if=/boot/zfsloader of=/dev/da0a bs=512 oseek=1024 conv=notrunc # zpool import bunker 7. install system on ZFS filesystem Don't forget to set 'zfs_load="YES"' and vfs.root.mountfrom="zfs:bunker" in loader.conf as well as 'zfs_enable="YES"'in rc.conf. 8. copy zpool.cache to the ZFS filesystem cp -p /boot/zfs/zpool.cache /bunker/boot/zfs/zpool.cache 9. set mountpoint # zfs set mountpoint=/ bunker 10. Now, given that aliases for all disks in the zpool exists (check with the `devalias` command on the boot monitor prompt) and disk0 corresponds to da0 (likewise for additional disks), the system can be booted from the ZFS with: {1} ok boot disk0 PR: 165025 Submitted by: Gavin Mu
2012-05-01 17:16:01 +00:00
bootpath[strlen(bootpath) - 1] == 'f' &&
strstr(bootpath, "cdrom") != NULL) {
bootpath[strlen(bootpath) - 1] = 'a';
printf("Boot path set to %s\n", bootpath);
}
Add initial support for booting from ZFS on sparc64. At least on Sun Fire V100, the firmware is known to be broken and not allowing to simultaneously open disk devices, causing attempts to boot from a mirror or RAIDZ to cause a crash. This will be worked around later. The firmwares of newer sun4u models don't seem to exhibit this problem though. Steps for ZFS booting: 1. create VTOC8 label # gpart create -s vtoc8 da0 2. add partitions, f.e.: # gpart add -t freebsd-zfs -s 60g da0 # gpart add -t freebsd-swap da0 resulting in something like: # gpart show => 0 143331930 da0 VTOC8 (68G) 0 125821080 1 freebsd-zfs (60G) 125821080 17510850 2 freebsd-swap (8.4G) 3. create zpool # zpool create bunker da0a or for mirror/RAIDZ (after preparing additional disks as in steps 1. + 2.): # zpool create bunker mirror da0a da1a # zpool create bunker raidz da0a da1a da2a ... 4. set bootfs # zpool set bootfs=bunker bunker 5. install zfsboot # zpool export bunker # gpart bootcode -p /boot/zfsboot da0 6. write zfsloader to the ZFS Boot Block (so far, there's no dedicated tool for this, so dd(1) has to be used for this purpose) When using mirror/RAIDZ, step 4. and the dd(1) invocation should be repeated for the additional disks in order to be able to boot from another disk in case of failure. # sysctl kern.geom.debugflags=0x10 # dd if=/boot/zfsloader of=/dev/da0a bs=512 oseek=1024 conv=notrunc # zpool import bunker 7. install system on ZFS filesystem Don't forget to set 'zfs_load="YES"' and vfs.root.mountfrom="zfs:bunker" in loader.conf as well as 'zfs_enable="YES"'in rc.conf. 8. copy zpool.cache to the ZFS filesystem cp -p /boot/zfs/zpool.cache /bunker/boot/zfs/zpool.cache 9. set mountpoint # zfs set mountpoint=/ bunker 10. Now, given that aliases for all disks in the zpool exists (check with the `devalias` command on the boot monitor prompt) and disk0 corresponds to da0 (likewise for additional disks), the system can be booted from the ZFS with: {1} ok boot disk0 PR: 165025 Submitted by: Gavin Mu
2012-05-01 17:16:01 +00:00
/*
* Initialize devices.
*/
for (dp = devsw; *dp != 0; dp++)
if ((*dp)->dv_init != 0)
(*dp)->dv_init();
/*
* Now that sparc64_zfs_probe() might have altered bootpath,
* export it.
*/
env_setenv("currdev", EV_VOLATILE, bootpath,
ofw_setcurrdev, env_nounset);
env_setenv("loaddev", EV_VOLATILE, bootpath,
env_noset, env_nounset);
printf("\n");
printf("%s, Revision %s\n", bootprog_name, bootprog_rev);
printf("(%s, %s)\n", bootprog_maker, bootprog_date);
printf("bootpath=\"%s\"\n", bootpath);
/* Give control to the machine independent loader code. */
interact();
return (1);
}
COMMAND_SET(heap, "heap", "show heap usage", command_heap);
static int
command_heap(int argc, char *argv[])
{
mallocstats();
printf("heap base at %p, top at %p, upper limit at %p\n", heapva,
sbrk(0), heapva + HEAPSZ);
return(CMD_OK);
}
COMMAND_SET(reboot, "reboot", "reboot the system", command_reboot);
static int
command_reboot(int argc, char *argv[])
{
int i;
for (i = 0; devsw[i] != NULL; ++i)
if (devsw[i]->dv_cleanup != NULL)
(devsw[i]->dv_cleanup)();
printf("Rebooting...\n");
OF_exit();
}
/* provide this for panic, as it's not in the startup code */
void
exit(int code)
{
OF_exit();
}
#ifdef LOADER_DEBUG
static const char *const page_sizes[] = {
" 8k", " 64k", "512k", " 4m"
};
static void
pmap_print_tte_sun4u(tte_t tag, tte_t tte)
{
printf("%s %s ",
page_sizes[(tte >> TD_SIZE_SHIFT) & TD_SIZE_MASK],
tag & TD_G ? "G" : " ");
printf(tte & TD_W ? "W " : " ");
printf(tte & TD_P ? "\e[33mP\e[0m " : " ");
printf(tte & TD_E ? "E " : " ");
printf(tte & TD_CV ? "CV " : " ");
printf(tte & TD_CP ? "CP " : " ");
printf(tte & TD_L ? "\e[32mL\e[0m " : " ");
printf(tte & TD_IE ? "IE " : " ");
printf(tte & TD_NFO ? "NFO " : " ");
printf("pa=0x%lx va=0x%lx ctx=%ld\n",
TD_PA(tte), TLB_TAR_VA(tag), TLB_TAR_CTX(tag));
}
static void
pmap_print_tlb_sun4u(void)
{
tte_t tag, tte;
u_long pstate;
int i;
pstate = rdpr(pstate);
for (i = 0; i < itlb_slot_max; i++) {
wrpr(pstate, pstate & ~PSTATE_IE, 0);
tte = itlb_get_data_sun4u(tlb_locked, i);
wrpr(pstate, pstate, 0);
if (!(tte & TD_V))
continue;
tag = ldxa(TLB_DAR_SLOT(tlb_locked, i),
ASI_ITLB_TAG_READ_REG);
printf("iTLB-%2u: ", i);
pmap_print_tte_sun4u(tag, tte);
}
for (i = 0; i < dtlb_slot_max; i++) {
wrpr(pstate, pstate & ~PSTATE_IE, 0);
tte = dtlb_get_data_sun4u(tlb_locked, i);
wrpr(pstate, pstate, 0);
if (!(tte & TD_V))
continue;
tag = ldxa(TLB_DAR_SLOT(tlb_locked, i),
ASI_DTLB_TAG_READ_REG);
printf("dTLB-%2u: ", i);
pmap_print_tte_sun4u(tag, tte);
}
}
#endif