diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/spa.h b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/spa.h index a5f32e837be0..cb5e01158d56 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/spa.h +++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/spa.h @@ -811,6 +811,7 @@ extern boolean_t spa_is_root(spa_t *spa); extern boolean_t spa_writeable(spa_t *spa); extern boolean_t spa_has_pending_synctask(spa_t *spa); extern int spa_maxblocksize(spa_t *spa); +extern void zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp); extern int spa_mode(spa_t *spa); extern uint64_t zfs_strtonum(const char *str, char **nptr); diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c index c71b61540d4b..8408afbbfe49 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c +++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c @@ -265,7 +265,7 @@ zio_buf_alloc(size_t size) size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; int flags = zio_exclude_metadata ? KM_NODEBUG : 0; - ASSERT3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); + VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); if (zio_use_uma) return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); @@ -284,7 +284,7 @@ zio_data_buf_alloc(size_t size) { size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; - ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); + VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); if (zio_use_uma) return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); @@ -297,7 +297,7 @@ zio_buf_free(void *buf, size_t size) { size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; - ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); + VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); if (zio_use_uma) kmem_cache_free(zio_buf_cache[c], buf); @@ -310,7 +310,7 @@ zio_data_buf_free(void *buf, size_t size) { size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; - ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); + VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); if (zio_use_uma) kmem_cache_free(zio_data_buf_cache[c], buf); @@ -657,6 +657,86 @@ zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) return (zio_null(NULL, spa, NULL, done, private, flags)); } +void +zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp) +{ + if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) { + zfs_panic_recover("blkptr at %p has invalid TYPE %llu", + bp, (longlong_t)BP_GET_TYPE(bp)); + } + if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS || + BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) { + zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu", + bp, (longlong_t)BP_GET_CHECKSUM(bp)); + } + if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS || + BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) { + zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu", + bp, (longlong_t)BP_GET_COMPRESS(bp)); + } + if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) { + zfs_panic_recover("blkptr at %p has invalid LSIZE %llu", + bp, (longlong_t)BP_GET_LSIZE(bp)); + } + if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) { + zfs_panic_recover("blkptr at %p has invalid PSIZE %llu", + bp, (longlong_t)BP_GET_PSIZE(bp)); + } + + if (BP_IS_EMBEDDED(bp)) { + if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) { + zfs_panic_recover("blkptr at %p has invalid ETYPE %llu", + bp, (longlong_t)BPE_GET_ETYPE(bp)); + } + } + + /* + * Pool-specific checks. + * + * Note: it would be nice to verify that the blk_birth and + * BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze() + * allows the birth time of log blocks (and dmu_sync()-ed blocks + * that are in the log) to be arbitrarily large. + */ + for (int i = 0; i < BP_GET_NDVAS(bp); i++) { + uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]); + if (vdevid >= spa->spa_root_vdev->vdev_children) { + zfs_panic_recover("blkptr at %p DVA %u has invalid " + "VDEV %llu", + bp, i, (longlong_t)vdevid); + } + vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; + if (vd == NULL) { + zfs_panic_recover("blkptr at %p DVA %u has invalid " + "VDEV %llu", + bp, i, (longlong_t)vdevid); + } + if (vd->vdev_ops == &vdev_hole_ops) { + zfs_panic_recover("blkptr at %p DVA %u has hole " + "VDEV %llu", + bp, i, (longlong_t)vdevid); + + } + if (vd->vdev_ops == &vdev_missing_ops) { + /* + * "missing" vdevs are valid during import, but we + * don't have their detailed info (e.g. asize), so + * we can't perform any more checks on them. + */ + continue; + } + uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); + uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]); + if (BP_IS_GANG(bp)) + asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); + if (offset + asize > vd->vdev_asize) { + zfs_panic_recover("blkptr at %p DVA %u has invalid " + "OFFSET %llu", + bp, i, (longlong_t)offset); + } + } +} + zio_t * zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, void *data, uint64_t size, zio_done_func_t *done, void *private, @@ -664,6 +744,8 @@ zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, { zio_t *zio; + zfs_blkptr_verify(spa, bp); + zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, data, size, done, private, ZIO_TYPE_READ, priority, flags, NULL, 0, zb,