Revert r340096: 9952 Block size change during zfs receive drops spill block

It was reported, and I easily reproduced it, that this change triggers panic
when receiving replication stream with enabled embedded blocks, when short
file compressing into one embedded block changes its block size.  I am not
sure that the problem is in this particuler patch, not just triggered by it,
but since investigation and fix will take some time, I've decided to revert
this for now.

PR:		198457, 233277
This commit is contained in:
Alexander Motin 2018-11-21 18:18:57 +00:00
parent d5e494fee4
commit eecd0a1856
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=340737

View File

@ -2143,7 +2143,6 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
{
dmu_object_info_t doi;
dmu_tx_t *tx;
dmu_buf_t *db;
uint64_t object;
int err;
@ -2191,14 +2190,12 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
tx = dmu_tx_create(rwa->os);
dmu_tx_hold_bonus(tx, object);
dmu_tx_hold_write(tx, object, 0, 0);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
return (err);
}
db = NULL;
if (object == DMU_NEW_OBJECT) {
/* currently free, want to be allocated */
err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
@ -2206,33 +2203,15 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
drro->drr_bonustype, drro->drr_bonuslen,
drro->drr_dn_slots << DNODE_SHIFT, tx);
} else if (drro->drr_type != doi.doi_type ||
(drro->drr_blksz != doi.doi_data_block_size &&
doi.doi_max_offset > doi.doi_data_block_size)) {
drro->drr_blksz != doi.doi_data_block_size ||
drro->drr_bonustype != doi.doi_bonus_type ||
drro->drr_bonuslen != doi.doi_bonus_size) {
/* currently allocated, but with different properties */
err = dmu_object_reclaim(rwa->os, drro->drr_object,
drro->drr_type, drro->drr_blksz,
drro->drr_bonustype, drro->drr_bonuslen, tx);
} else {
/*
* Currently allocated, but with slightly different properties,
* that may change live, like block size or bonus buffer.
* Change those specifically to not loose the spill block, etc.
*/
if (drro->drr_bonustype != doi.doi_bonus_type ||
drro->drr_bonuslen != doi.doi_bonus_size)
VERIFY0(dmu_bonus_hold(rwa->os, drro->drr_object, FTAG,
&db));
if (drro->drr_bonustype != doi.doi_bonus_type)
VERIFY0(dmu_set_bonustype(db, drro->drr_bonustype, tx));
if (drro->drr_bonuslen != doi.doi_bonus_size)
VERIFY0(dmu_set_bonus(db, drro->drr_bonuslen, tx));
if (drro->drr_blksz != doi.doi_data_block_size)
err = dmu_object_set_blocksize(rwa->os, drro->drr_object,
drro->drr_blksz, 0, tx);
}
if (err != 0) {
if (db != NULL)
dmu_buf_rele(db, FTAG);
dmu_tx_commit(tx);
return (SET_ERROR(EINVAL));
}
@ -2243,9 +2222,9 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
drro->drr_compress, tx);
if (data != NULL) {
if (db == NULL)
VERIFY0(dmu_bonus_hold(rwa->os, drro->drr_object, FTAG,
&db));
dmu_buf_t *db;
VERIFY0(dmu_bonus_hold(rwa->os, drro->drr_object, FTAG, &db));
dmu_buf_will_dirty(db, tx);
ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
@ -2256,9 +2235,8 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
dmu_ot_byteswap[byteswap].ob_func(db->db_data,
drro->drr_bonuslen);
}
}
if (db != NULL)
dmu_buf_rele(db, FTAG);
}
dmu_tx_commit(tx);
return (0);