5349 verify that block pointer is plausible before reading

Reviewed by: Alex Reece <alex.reece@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Reviewed by: Dan McDonald <danmcd@omniti.com>
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Richard Lowe <richlowe@richlowe.net>
Reviewed by: Xin Li <delphij@FreeBSD.org>
Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com>
Approved by: Gordon Ross <gwr@nexenta.com>
Author: Matthew Ahrens <mahrens@delphix.com>

illumos/illumos-gate@f63ab3d5a8
This commit is contained in:
Xin LI 2014-12-06 00:42:30 +00:00
parent 9c207a0483
commit e0436c62e3
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/vendor-sys/illumos/dist/; revision=275548
2 changed files with 87 additions and 4 deletions

View File

@ -791,6 +791,7 @@ extern boolean_t spa_is_root(spa_t *spa);
extern boolean_t spa_writeable(spa_t *spa);
extern boolean_t spa_has_pending_synctask(spa_t *spa);
extern int spa_maxblocksize(spa_t *spa);
extern void zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp);
extern int spa_mode(spa_t *spa);
extern uint64_t strtonum(const char *str, char **nptr);

View File

@ -216,7 +216,7 @@ zio_buf_alloc(size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
ASSERT3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
}
@ -232,7 +232,7 @@ zio_data_buf_alloc(size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
}
@ -242,7 +242,7 @@ zio_buf_free(void *buf, size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
kmem_cache_free(zio_buf_cache[c], buf);
}
@ -252,7 +252,7 @@ zio_data_buf_free(void *buf, size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
kmem_cache_free(zio_data_buf_cache[c], buf);
}
@ -596,6 +596,86 @@ zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags)
return (zio_null(NULL, spa, NULL, done, private, flags));
}
void
zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp)
{
if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
zfs_panic_recover("blkptr at %p has invalid TYPE %llu",
bp, (longlong_t)BP_GET_TYPE(bp));
}
if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS ||
BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) {
zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu",
bp, (longlong_t)BP_GET_CHECKSUM(bp));
}
if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS ||
BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) {
zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu",
bp, (longlong_t)BP_GET_COMPRESS(bp));
}
if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
zfs_panic_recover("blkptr at %p has invalid LSIZE %llu",
bp, (longlong_t)BP_GET_LSIZE(bp));
}
if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
zfs_panic_recover("blkptr at %p has invalid PSIZE %llu",
bp, (longlong_t)BP_GET_PSIZE(bp));
}
if (BP_IS_EMBEDDED(bp)) {
if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) {
zfs_panic_recover("blkptr at %p has invalid ETYPE %llu",
bp, (longlong_t)BPE_GET_ETYPE(bp));
}
}
/*
* Pool-specific checks.
*
* Note: it would be nice to verify that the blk_birth and
* BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze()
* allows the birth time of log blocks (and dmu_sync()-ed blocks
* that are in the log) to be arbitrarily large.
*/
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]);
if (vdevid >= spa->spa_root_vdev->vdev_children) {
zfs_panic_recover("blkptr at %p DVA %u has invalid "
"VDEV %llu",
bp, i, (longlong_t)vdevid);
}
vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
if (vd == NULL) {
zfs_panic_recover("blkptr at %p DVA %u has invalid "
"VDEV %llu",
bp, i, (longlong_t)vdevid);
}
if (vd->vdev_ops == &vdev_hole_ops) {
zfs_panic_recover("blkptr at %p DVA %u has hole "
"VDEV %llu",
bp, i, (longlong_t)vdevid);
}
if (vd->vdev_ops == &vdev_missing_ops) {
/*
* "missing" vdevs are valid during import, but we
* don't have their detailed info (e.g. asize), so
* we can't perform any more checks on them.
*/
continue;
}
uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]);
if (BP_IS_GANG(bp))
asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
if (offset + asize > vd->vdev_asize) {
zfs_panic_recover("blkptr at %p DVA %u has invalid "
"OFFSET %llu",
bp, i, (longlong_t)offset);
}
}
}
zio_t *
zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
void *data, uint64_t size, zio_done_func_t *done, void *private,
@ -603,6 +683,8 @@ zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
{
zio_t *zio;
zfs_blkptr_verify(spa, bp);
zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp,
data, size, done, private,
ZIO_TYPE_READ, priority, flags, NULL, 0, zb,