Remove bcopy(), bzero(), bcmp()

bcopy() has a confusing argument order and is actually a move, not a
copy; they're all deprecated since POSIX.1-2001 and removed in -2008,
and we shim them out to mem*() on Linux anyway

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz>
Closes #12996
This commit is contained in:
наб 2022-02-25 14:26:54 +01:00 committed by Brian Behlendorf
parent 1d77d62f5a
commit 861166b027
129 changed files with 990 additions and 1051 deletions

View File

@ -63,7 +63,7 @@ bench_fini_raidz_maps(void)
{
/* tear down golden zio */
raidz_free(zio_bench.io_abd, max_data_size);
bzero(&zio_bench, sizeof (zio_t));
memset(&zio_bench, 0, sizeof (zio_t));
}
static inline void

View File

@ -141,10 +141,9 @@ static void process_options(int argc, char **argv)
{
size_t value;
int opt;
raidz_test_opts_t *o = &rto_opts;
bcopy(&rto_opts_defaults, o, sizeof (*o));
memcpy(o, &rto_opts_defaults, sizeof (*o));
while ((opt = getopt(argc, argv, "TDBSvha:er:o:d:s:t:")) != -1) {
value = 0;

View File

@ -92,19 +92,19 @@ static inline size_t ilog2(size_t a)
}
#define LOG(lvl, a...) \
#define LOG(lvl, ...) \
{ \
if (rto_opts.rto_v >= lvl) \
(void) fprintf(stdout, a); \
(void) fprintf(stdout, __VA_ARGS__); \
} \
#define LOG_OPT(lvl, opt, a...) \
#define LOG_OPT(lvl, opt, ...) \
{ \
if (opt->rto_v >= lvl) \
(void) fprintf(stdout, a); \
(void) fprintf(stdout, __VA_ARGS__); \
} \
#define ERR(a...) (void) fprintf(stderr, a)
#define ERR(...) (void) fprintf(stderr, __VA_ARGS__)
#define DBLSEP "================\n"

View File

@ -2035,11 +2035,8 @@ dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class)
static void
dump_all_ddts(spa_t *spa)
{
ddt_histogram_t ddh_total;
ddt_stat_t dds_total;
bzero(&ddh_total, sizeof (ddh_total));
bzero(&dds_total, sizeof (dds_total));
ddt_histogram_t ddh_total = {{{0}}};
ddt_stat_t dds_total = {0};
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
@ -4360,7 +4357,7 @@ dump_l2arc_log_blocks(int fd, l2arc_dev_hdr_phys_t l2dhdr,
if (!dump_opt['q'])
print_l2arc_log_blocks();
bcopy((&l2dhdr)->dh_start_lbps, lbps, sizeof (lbps));
memcpy(lbps, l2dhdr.dh_start_lbps, sizeof (lbps));
dev.l2ad_evict = l2dhdr.dh_evict;
dev.l2ad_start = l2dhdr.dh_start;
@ -4460,12 +4457,9 @@ dump_l2arc_log_blocks(int fd, l2arc_dev_hdr_phys_t l2dhdr,
static int
dump_l2arc_header(int fd)
{
l2arc_dev_hdr_phys_t l2dhdr, rebuild;
l2arc_dev_hdr_phys_t l2dhdr = {0}, rebuild = {0};
int error = B_FALSE;
bzero(&l2dhdr, sizeof (l2dhdr));
bzero(&rebuild, sizeof (rebuild));
if (pread64(fd, &l2dhdr, sizeof (l2dhdr),
VDEV_LABEL_START_SIZE) != sizeof (l2dhdr)) {
error = B_TRUE;
@ -4820,7 +4814,7 @@ static int
dump_label(const char *dev)
{
char path[MAXPATHLEN];
zdb_label_t labels[VDEV_LABELS];
zdb_label_t labels[VDEV_LABELS] = {{{{0}}}};
uint64_t psize, ashift, l2cache;
struct stat64 statbuf;
boolean_t config_found = B_FALSE;
@ -4831,8 +4825,6 @@ dump_label(const char *dev)
void *node, *cookie;
int fd;
bzero(labels, sizeof (labels));
/*
* Check if we were given absolute path and use it as is.
* Otherwise if the provided vdev name doesn't point to a file,
@ -5746,14 +5738,13 @@ zdb_load_obsolete_counts(vdev_t *vd)
static void
zdb_ddt_leak_init(spa_t *spa, zdb_cb_t *zcb)
{
ddt_bookmark_t ddb;
ddt_bookmark_t ddb = {0};
ddt_entry_t dde;
int error;
int p;
ASSERT(!dump_opt['L']);
bzero(&ddb, sizeof (ddb));
while ((error = ddt_walk(spa, &ddb, &dde)) == 0) {
blkptr_t blk;
ddt_phys_t *ddp = dde.dde_phys;
@ -6413,7 +6404,7 @@ deleted_livelists_dump_mos(spa_t *spa)
static int
dump_block_stats(spa_t *spa)
{
zdb_cb_t zcb;
zdb_cb_t zcb = {{{{0}}}};
zdb_blkstats_t *zb, *tzb;
uint64_t norm_alloc, norm_space, total_alloc, total_found;
int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
@ -6422,7 +6413,6 @@ dump_block_stats(spa_t *spa)
int e, c, err;
bp_embedded_type_t i;
bzero(&zcb, sizeof (zcb));
(void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n",
(dump_opt['c'] || !dump_opt['L']) ? "to verify " : "",
(dump_opt['c'] == 1) ? "metadata " : "",
@ -6442,7 +6432,6 @@ dump_block_stats(spa_t *spa)
* pool claiming each block we discover, but we skip opening any space
* maps.
*/
bzero(&zcb, sizeof (zdb_cb_t));
zdb_leak_init(spa, &zcb);
/*
@ -6815,11 +6804,9 @@ dump_simulated_ddt(spa_t *spa)
avl_tree_t t;
void *cookie = NULL;
zdb_ddt_entry_t *zdde;
ddt_histogram_t ddh_total;
ddt_stat_t dds_total;
ddt_histogram_t ddh_total = {{{0}}};
ddt_stat_t dds_total = {0};
bzero(&ddh_total, sizeof (ddh_total));
bzero(&dds_total, sizeof (dds_total));
avl_create(&t, ddt_entry_compare,
sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node));
@ -7654,8 +7641,7 @@ dump_log_spacemap_obsolete_stats(spa_t *spa)
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
log_sm_obsolete_stats_arg_t lsos;
bzero(&lsos, sizeof (lsos));
log_sm_obsolete_stats_arg_t lsos = {0};
(void) printf("Log Space Map Obsolete Entry Statistics:\n");
@ -8050,7 +8036,7 @@ zdb_decompress_block(abd_t *pabd, void *buf, void *lbuf, uint64_t lsize,
lbuf, psize, lsize, NULL) == 0 &&
zio_decompress_data(*cfuncp, pabd,
lbuf2, psize, lsize, NULL) == 0 &&
bcmp(lbuf, lbuf2, lsize) == 0)
memcmp(lbuf, lbuf2, lsize) == 0)
break;
}
if (*cfuncp != 0)
@ -8368,12 +8354,11 @@ zdb_read_block(char *thing, spa_t *spa)
static void
zdb_embedded_block(char *thing)
{
blkptr_t bp;
blkptr_t bp = {{{{0}}}};
unsigned long long *words = (void *)&bp;
char *buf;
int err;
bzero(&bp, sizeof (bp));
err = sscanf(thing, "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx:"
"%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx",
words + 0, words + 1, words + 2, words + 3,
@ -8566,7 +8551,7 @@ main(int argc, char **argv)
} else {
char **tmp = umem_alloc((nsearch + 1) *
sizeof (char *), UMEM_NOFAIL);
bcopy(searchdirs, tmp, nsearch *
memcpy(tmp, searchdirs, nsearch *
sizeof (char *));
umem_free(searchdirs,
nsearch * sizeof (char *));

View File

@ -485,7 +485,7 @@ fmd_buf_read(fmd_hdl_t *hdl, fmd_case_t *cp,
assert(cp->ci_bufptr != NULL);
assert(size <= cp->ci_bufsiz);
bcopy(cp->ci_bufptr, buf, size);
memcpy(buf, cp->ci_bufptr, size);
}
void
@ -497,7 +497,7 @@ fmd_buf_write(fmd_hdl_t *hdl, fmd_case_t *cp,
assert(cp->ci_bufptr != NULL);
assert(cp->ci_bufsiz >= size);
bcopy(buf, cp->ci_bufptr, size);
memcpy(cp->ci_bufptr, buf, size);
}
/* SERD Engines */
@ -581,7 +581,7 @@ _timer_notify(union sigval sv)
fmd_hdl_debug(hdl, "timer fired (%p)", ftp->ft_tid);
/* disarm the timer */
bzero(&its, sizeof (struct itimerspec));
memset(&its, 0, sizeof (struct itimerspec));
timer_settime(ftp->ft_tid, 0, &its, NULL);
/* Note that the fmdo_timeout can remove this timer */

View File

@ -74,7 +74,7 @@ fmd_serd_eng_alloc(const char *name, uint64_t n, hrtime_t t)
fmd_serd_eng_t *sgp;
sgp = malloc(sizeof (fmd_serd_eng_t));
bzero(sgp, sizeof (fmd_serd_eng_t));
memset(sgp, 0, sizeof (fmd_serd_eng_t));
sgp->sg_name = strdup(name);
sgp->sg_flags = FMD_SERD_DIRTY;
@ -139,7 +139,7 @@ fmd_serd_hash_destroy(fmd_serd_hash_t *shp)
}
free(shp->sh_hash);
bzero(shp, sizeof (fmd_serd_hash_t));
memset(shp, 0, sizeof (fmd_serd_hash_t));
}
void

View File

@ -2037,7 +2037,7 @@ zfs_do_get(int argc, char **argv)
* Process the set of columns to display. We zero out
* the structure to give us a blank slate.
*/
bzero(&cb.cb_columns, sizeof (cb.cb_columns));
memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));
i = 0;
while (*optarg != '\0') {
static char *col_subopts[] =
@ -5104,7 +5104,7 @@ deleg_perm_compare(const void *larg, const void *rarg, void *unused)
static inline void
fs_perm_set_init(fs_perm_set_t *fspset)
{
bzero(fspset, sizeof (fs_perm_set_t));
memset(fspset, 0, sizeof (fs_perm_set_t));
if ((fspset->fsps_list_pool = uu_list_pool_create("fsps_list_pool",
sizeof (fs_perm_node_t), offsetof(fs_perm_node_t, fspn_list_node),
@ -5171,7 +5171,7 @@ who_perm_init(who_perm_t *who_perm, fs_perm_t *fsperm,
uu_avl_pool_t *pool;
pool = fsperm->fsp_set->fsps_deleg_perm_avl_pool;
bzero(who_perm, sizeof (who_perm_t));
memset(who_perm, 0, sizeof (who_perm_t));
if ((who_perm->who_deleg_perm_avl = uu_avl_create(pool, NULL,
UU_DEFAULT)) == NULL)
@ -5205,7 +5205,7 @@ fs_perm_init(fs_perm_t *fsperm, fs_perm_set_t *fspset, const char *fsname)
uu_avl_pool_t *nset_pool = fspset->fsps_named_set_avl_pool;
uu_avl_pool_t *who_pool = fspset->fsps_who_perm_avl_pool;
bzero(fsperm, sizeof (fs_perm_t));
memset(fsperm, 0, sizeof (fs_perm_t));
if ((fsperm->fsp_sc_avl = uu_avl_create(nset_pool, NULL, UU_DEFAULT))
== NULL)
@ -8508,7 +8508,7 @@ zfs_do_wait(int argc, char **argv)
char *value;
/* Reset activities array */
bzero(&enabled, sizeof (enabled));
memset(&enabled, 0, sizeof (enabled));
while (*optarg != '\0') {
int activity = getsubopt(&optarg, col_subopts,
&value);

View File

@ -484,15 +484,12 @@ zhack_repair_label_cksum(int argc, char **argv)
zio_checksum_info_t *ci = &zio_checksum_table[ZIO_CHECKSUM_LABEL];
const char *cfg_keys[] = { ZPOOL_CONFIG_VERSION,
ZPOOL_CONFIG_POOL_STATE, ZPOOL_CONFIG_GUID };
boolean_t labels_repaired[VDEV_LABELS];
boolean_t labels_repaired[VDEV_LABELS] = {0};
boolean_t repaired = B_FALSE;
vdev_label_t labels[VDEV_LABELS];
vdev_label_t labels[VDEV_LABELS] = {{{0}}};
struct stat st;
int fd;
bzero(labels_repaired, sizeof (labels_repaired));
bzero(labels, sizeof (labels));
abd_init();
argc -= 1;

View File

@ -10097,7 +10097,7 @@ zpool_do_get(int argc, char **argv)
cb.cb_scripted = B_TRUE;
break;
case 'o':
bzero(&cb.cb_columns, sizeof (cb.cb_columns));
memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));
i = 0;
while (*optarg != '\0') {
static char *col_subopts[] =
@ -10714,7 +10714,7 @@ zpool_do_wait(int argc, char **argv)
"scrub", "trim", NULL };
/* Reset activities array */
bzero(&wd.wd_enabled, sizeof (wd.wd_enabled));
memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled));
while (*optarg != '\0') {
int activity = getsubopt(&optarg, col_subopts,
&value);

View File

@ -229,7 +229,7 @@ zfs_redup_stream(int infd, int outfd, boolean_t verbose)
* We need to regenerate the checksum.
*/
if (drr->drr_type != DRR_BEGIN) {
bzero(&drr->drr_u.drr_checksum.drr_checksum,
memset(&drr->drr_u.drr_checksum.drr_checksum, 0,
sizeof (drr->drr_u.drr_checksum.drr_checksum));
}
@ -380,7 +380,7 @@ zfs_redup_stream(int infd, int outfd, boolean_t verbose)
* a checksum.
*/
if (drr->drr_type != DRR_BEGIN) {
bzero(&drr->drr_u.drr_checksum.drr_checksum,
memset(&drr->drr_u.drr_checksum.drr_checksum, 0,
sizeof (drr->drr_u.drr_checksum.drr_checksum));
}
if (dump_record(drr, buf, payload_size,

View File

@ -930,9 +930,9 @@ process_options(int argc, char **argv)
int opt;
uint64_t value;
char altdir[MAXNAMELEN] = { 0 };
char raid_kind[8] = { "random" };
char raid_kind[8] = "random";
bcopy(&ztest_opts_defaults, zo, sizeof (*zo));
memcpy(zo, &ztest_opts_defaults, sizeof (*zo));
init_options();
@ -1887,7 +1887,7 @@ ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr)
return;
itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize);
bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
memcpy(&itx->itx_lr + 1, &lr->lr_common + 1,
sizeof (*lr) + namesize - sizeof (lr_t));
zil_itx_assign(zd->zd_zilog, itx, tx);
@ -1904,7 +1904,7 @@ ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object)
return;
itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize);
bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
memcpy(&itx->itx_lr + 1, &lr->lr_common + 1,
sizeof (*lr) + namesize - sizeof (lr_t));
itx->itx_oid = object;
@ -1937,7 +1937,7 @@ ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr)
itx->itx_wr_state = write_state;
itx->itx_sync = (ztest_random(8) == 0);
bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
memcpy(&itx->itx_lr + 1, &lr->lr_common + 1,
sizeof (*lr) - sizeof (lr_t));
zil_itx_assign(zd->zd_zilog, itx, tx);
@ -1952,7 +1952,7 @@ ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr)
return;
itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
memcpy(&itx->itx_lr + 1, &lr->lr_common + 1,
sizeof (*lr) - sizeof (lr_t));
itx->itx_sync = B_FALSE;
@ -1968,7 +1968,7 @@ ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr)
return;
itx = zil_itx_create(TX_SETATTR, sizeof (*lr));
bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
memcpy(&itx->itx_lr + 1, &lr->lr_common + 1,
sizeof (*lr) - sizeof (lr_t));
itx->itx_sync = B_FALSE;
@ -2233,7 +2233,7 @@ ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap)
if (abuf == NULL) {
dmu_write(os, lr->lr_foid, offset, length, data, tx);
} else {
bcopy(data, abuf->b_data, length);
memcpy(abuf->b_data, data, length);
dmu_assign_arcbuf_by_dbuf(db, offset, abuf, tx);
}
@ -2506,7 +2506,7 @@ ztest_lr_alloc(size_t lrsize, char *name)
lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL);
if (name)
bcopy(name, lr + lrsize, namesize);
memcpy(lr + lrsize, name, namesize);
return (lr);
}
@ -2663,7 +2663,7 @@ ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size,
lr->lr_blkoff = 0;
BP_ZERO(&lr->lr_blkptr);
bcopy(data, lr + 1, size);
memcpy(lr + 1, data, size);
error = ztest_replay_write(zd, lr, B_FALSE);
@ -2787,7 +2787,7 @@ ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset)
break;
case ZTEST_IO_WRITE_ZEROES:
bzero(data, blocksize);
memset(data, 0, blocksize);
(void) ztest_write(zd, object, offset, blocksize, data);
break;
@ -4844,16 +4844,16 @@ ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
"got %"PRIx64", wanted %"PRIx64"+%"PRIx64"",
pack->bw_index, n, i);
if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
if (memcmp(pack, bigH, sizeof (bufwad_t)) != 0)
fatal(B_FALSE, "pack/bigH mismatch in %p/%p",
pack, bigH);
if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
if (memcmp(pack, bigT, sizeof (bufwad_t)) != 0)
fatal(B_FALSE, "pack/bigT mismatch in %p/%p",
pack, bigT);
if (freeit) {
bzero(pack, sizeof (bufwad_t));
memset(pack, 0, sizeof (bufwad_t));
} else {
pack->bw_index = n + i;
pack->bw_txg = txg;
@ -4899,8 +4899,8 @@ ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
VERIFY0(dmu_read(os, bigobj, bigoff,
bigsize, bigcheck, DMU_READ_PREFETCH));
ASSERT0(bcmp(packbuf, packcheck, packsize));
ASSERT0(bcmp(bigbuf, bigcheck, bigsize));
ASSERT0(memcmp(packbuf, packcheck, packsize));
ASSERT0(memcmp(bigbuf, bigcheck, bigsize));
umem_free(packcheck, packsize);
umem_free(bigcheck, bigsize);
@ -4947,11 +4947,11 @@ compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf,
"got %"PRIx64", wanted %"PRIx64"+%"PRIx64"",
pack->bw_index, n, i);
if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
if (memcmp(pack, bigH, sizeof (bufwad_t)) != 0)
fatal(B_FALSE, "pack/bigH mismatch in %p/%p",
pack, bigH);
if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
if (memcmp(pack, bigT, sizeof (bufwad_t)) != 0)
fatal(B_FALSE, "pack/bigT mismatch in %p/%p",
pack, bigT);
@ -5139,15 +5139,16 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
for (off = bigoff, j = 0; j < s; j++, off += chunksize) {
dmu_buf_t *dbt;
if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) {
bcopy((caddr_t)bigbuf + (off - bigoff),
bigbuf_arcbufs[j]->b_data, chunksize);
memcpy(bigbuf_arcbufs[j]->b_data,
(caddr_t)bigbuf + (off - bigoff),
chunksize);
} else {
bcopy((caddr_t)bigbuf + (off - bigoff),
bigbuf_arcbufs[2 * j]->b_data,
memcpy(bigbuf_arcbufs[2 * j]->b_data,
(caddr_t)bigbuf + (off - bigoff),
chunksize / 2);
bcopy((caddr_t)bigbuf + (off - bigoff) +
memcpy(bigbuf_arcbufs[2 * j + 1]->b_data,
(caddr_t)bigbuf + (off - bigoff) +
chunksize / 2,
bigbuf_arcbufs[2 * j + 1]->b_data,
chunksize / 2);
}
@ -5183,8 +5184,8 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
VERIFY0(dmu_read(os, bigobj, bigoff,
bigsize, bigcheck, DMU_READ_PREFETCH));
ASSERT0(bcmp(packbuf, packcheck, packsize));
ASSERT0(bcmp(bigbuf, bigcheck, bigsize));
ASSERT0(memcmp(packbuf, packcheck, packsize));
ASSERT0(memcmp(bigbuf, bigcheck, bigsize));
umem_free(packcheck, packsize);
umem_free(bigcheck, bigsize);
@ -5337,7 +5338,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id)
prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
(void) sprintf(propname, "prop_%"PRIu64"", prop);
(void) sprintf(txgname, "txg_%"PRIu64"", prop);
bzero(value, sizeof (value));
memset(value, 0, sizeof (value));
last_txg = 0;
/*
@ -5529,11 +5530,11 @@ ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
umem_free(od, sizeof (ztest_od_t));
return;
}
bcopy(name, string_value, namelen);
memcpy(string_value, name, namelen);
} else {
tx = NULL;
txg = 0;
bzero(string_value, namelen);
memset(string_value, 0, namelen);
}
switch (i) {
@ -5552,7 +5553,7 @@ ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
error = zap_lookup(os, object, name, wsize, wc, data);
if (error == 0) {
if (data == string_value &&
bcmp(name, data, namelen) != 0)
memcmp(name, data, namelen) != 0)
fatal(B_FALSE, "name '%s' != val '%s' len %d",
name, (char *)data, namelen);
} else {
@ -6436,8 +6437,8 @@ ztest_fletcher(ztest_ds_t *zd, uint64_t id)
fletcher_4_byteswap(buf, size, NULL, &zc_byteswap);
fletcher_4_native(buf, size, NULL, &zc);
VERIFY0(bcmp(&zc, &zc_ref, sizeof (zc)));
VERIFY0(bcmp(&zc_byteswap, &zc_ref_byteswap,
VERIFY0(memcmp(&zc, &zc_ref, sizeof (zc)));
VERIFY0(memcmp(&zc_byteswap, &zc_ref_byteswap,
sizeof (zc_byteswap)));
/* Test ABD - data */
@ -6445,8 +6446,8 @@ ztest_fletcher(ztest_ds_t *zd, uint64_t id)
&zc_byteswap);
abd_fletcher_4_native(abd_data, size, NULL, &zc);
VERIFY0(bcmp(&zc, &zc_ref, sizeof (zc)));
VERIFY0(bcmp(&zc_byteswap, &zc_ref_byteswap,
VERIFY0(memcmp(&zc, &zc_ref, sizeof (zc)));
VERIFY0(memcmp(&zc_byteswap, &zc_ref_byteswap,
sizeof (zc_byteswap)));
/* Test ABD - metadata */
@ -6454,8 +6455,8 @@ ztest_fletcher(ztest_ds_t *zd, uint64_t id)
&zc_byteswap);
abd_fletcher_4_native(abd_meta, size, NULL, &zc);
VERIFY0(bcmp(&zc, &zc_ref, sizeof (zc)));
VERIFY0(bcmp(&zc_byteswap, &zc_ref_byteswap,
VERIFY0(memcmp(&zc, &zc_ref, sizeof (zc)));
VERIFY0(memcmp(&zc_byteswap, &zc_ref_byteswap,
sizeof (zc_byteswap)));
}
@ -7960,7 +7961,7 @@ ztest_run_init(void)
* Create and initialize our storage pool.
*/
for (i = 1; i <= ztest_opts.zo_init; i++) {
bzero(zs, sizeof (ztest_shared_t));
memset(zs, 0, sizeof (*zs));
if (ztest_opts.zo_verbose >= 3 &&
ztest_opts.zo_init != 1) {
(void) printf("ztest_init(), pass %d\n", i);
@ -8041,12 +8042,12 @@ main(int argc, char **argv)
setup_data_fd();
setup_hdr();
setup_data();
bcopy(&ztest_opts, ztest_shared_opts,
memcpy(ztest_shared_opts, &ztest_opts,
sizeof (*ztest_shared_opts));
} else {
ztest_fd_data = atoi(fd_data_str);
setup_data();
bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts));
memcpy(&ztest_opts, ztest_shared_opts, sizeof (ztest_opts));
}
ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count);

View File

@ -152,7 +152,7 @@ alloc_pw_string(const char *source)
static void
pw_free(pw_password_t *pw)
{
bzero(pw->value, pw->len);
memset(pw->value, 0, pw->len);
if (try_lock(munlock, pw->value, pw->len) == 0) {
(void) munmap(pw->value, pw->len);
}

View File

@ -132,7 +132,7 @@ typedef struct callb_cpr {
#define CALLB_CPR_INIT(cp, lockp, func, name) { \
strlcpy(curthread->td_name, (name), \
sizeof (curthread->td_name)); \
bzero((caddr_t)(cp), sizeof (callb_cpr_t)); \
memset(cp, 0, sizeof (callb_cpr_t)); \
(cp)->cc_lockp = lockp; \
(cp)->cc_id = callb_add(func, (void *)(cp), \
CB_CL_CPR_DAEMON, name); \

View File

@ -282,7 +282,7 @@ typedef struct xvattr {
static inline void
xva_init(xvattr_t *xvap)
{
bzero(xvap, sizeof (xvattr_t));
memset(xvap, 0, sizeof (xvattr_t));
xvap->xva_mapsize = XVA_MAPSIZE;
xvap->xva_magic = XVA_MAGIC;
xvap->xva_vattr.va_mask = ATTR_XVATTR;

View File

@ -865,7 +865,7 @@ efi_read(int fd, struct dk_gpt *vtoc)
j < sizeof (conversion_array)
/ sizeof (struct uuid_to_ptag); j++) {
if (bcmp(&vtoc->efi_parts[i].p_guid,
if (memcmp(&vtoc->efi_parts[i].p_guid,
&conversion_array[j].uuid,
sizeof (struct uuid)) == 0) {
vtoc->efi_parts[i].p_tag = j;
@ -920,18 +920,17 @@ write_pmbr(int fd, struct dk_gpt *vtoc)
/* LINTED -- always longlong aligned */
dk_ioc.dki_data = (efi_gpt_t *)buf;
if (efi_ioctl(fd, DKIOCGETEFI, &dk_ioc) == -1) {
(void) memcpy(&mb, buf, sizeof (mb));
bzero(&mb, sizeof (mb));
memset(&mb, 0, sizeof (mb));
mb.signature = LE_16(MBB_MAGIC);
} else {
(void) memcpy(&mb, buf, sizeof (mb));
if (mb.signature != LE_16(MBB_MAGIC)) {
bzero(&mb, sizeof (mb));
memset(&mb, 0, sizeof (mb));
mb.signature = LE_16(MBB_MAGIC);
}
}
bzero(&mb.parts, sizeof (mb.parts));
memset(&mb.parts, 0, sizeof (mb.parts));
cp = (uchar_t *)&mb.parts[0];
/* bootable or not */
*cp++ = 0;
@ -1455,8 +1454,8 @@ efi_write(int fd, struct dk_gpt *vtoc)
(void) uuid_generate((uchar_t *)
&vtoc->efi_parts[i].p_uguid);
}
bcopy(&vtoc->efi_parts[i].p_uguid,
&efi_parts[i].efi_gpe_UniquePartitionGUID,
memcpy(&efi_parts[i].efi_gpe_UniquePartitionGUID,
&vtoc->efi_parts[i].p_uguid,
sizeof (uuid_t));
}
efi->efi_gpt_PartitionEntryArrayCRC32 =

View File

@ -46,12 +46,10 @@
static int
nvlist_print_json_string(FILE *fp, const char *input)
{
mbstate_t mbr;
mbstate_t mbr = {0};
wchar_t c;
size_t sz;
bzero(&mbr, sizeof (mbr));
FPRINTF(fp, "\"");
while ((sz = mbrtowc(&c, input, MB_CUR_MAX, &mbr)) > 0) {
if (sz == (size_t)-1 || sz == (size_t)-2) {

View File

@ -383,9 +383,9 @@ typedef struct kstat32 {
*
* ksp->ks_snaptime = gethrtime();
* if (rw == KSTAT_WRITE)
* bcopy(buf, ksp->ks_data, ksp->ks_data_size);
* memcpy(ksp->ks_data, buf, ksp->ks_data_size);
* else
* bcopy(ksp->ks_data, buf, ksp->ks_data_size);
* memcpy(buf, ksp->ks_data, ksp->ks_data_size);
* return (0);
*
* A more illuminating example is taking a snapshot of a linked list:
@ -394,7 +394,7 @@ typedef struct kstat32 {
* if (rw == KSTAT_WRITE)
* return (EACCES); ... See below ...
* for (foo = first_foo; foo; foo = foo->next) {
* bcopy((char *) foo, (char *) buf, sizeof (struct foo));
* memcpy(buf, foo, sizeof (struct foo));
* buf = ((struct foo *) buf) + 1;
* }
* return (0);
@ -423,12 +423,12 @@ typedef struct kstat32 {
* uint_t i;
*
* ... Do the regular copy ...
* bcopy(ksp->ks_data, buf, sizeof (kstat_named_t) * ksp->ks_ndata);
* memcpy(buf, ksp->ks_data, sizeof (kstat_named_t) * ksp->ks_ndata);
*
* for (i = 0; i < ksp->ks_ndata; i++, knp++) {
* if (knp[i].data_type == KSTAT_DATA_STRING &&
* KSTAT_NAMED_STR_PTR(knp) != NULL) {
* bcopy(KSTAT_NAMED_STR_PTR(knp), end,
* memcpy(end, KSTAT_NAMED_STR_PTR(knp),
* KSTAT_NAMED_STR_BUFLEN(knp));
* KSTAT_NAMED_STR_PTR(knp) = end;
* end += KSTAT_NAMED_STR_BUFLEN(knp);

View File

@ -788,7 +788,7 @@ derive_key(libzfs_handle_t *hdl, zfs_keyformat_t format, uint64_t iters,
switch (format) {
case ZFS_KEYFORMAT_RAW:
bcopy(key_material, key, WRAPPING_KEY_LEN);
memcpy(key, key_material, WRAPPING_KEY_LEN);
break;
case ZFS_KEYFORMAT_HEX:
ret = hex_key_to_raw((char *)key_material,

View File

@ -3931,14 +3931,13 @@ zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
int
zpool_vdev_remove_cancel(zpool_handle_t *zhp)
{
zfs_cmd_t zc;
zfs_cmd_t zc = {{0}};
char msg[1024];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot cancel removal"));
bzero(&zc, sizeof (zc));
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_cookie = 1;

View File

@ -4285,9 +4285,9 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
boolean_t recursive;
char *snapname = NULL;
char destsnap[MAXPATHLEN * 2];
char origin[MAXNAMELEN];
char origin[MAXNAMELEN] = {0};
char name[MAXPATHLEN];
char tmp_keylocation[MAXNAMELEN];
char tmp_keylocation[MAXNAMELEN] = {0};
nvlist_t *rcvprops = NULL; /* props received from the send stream */
nvlist_t *oxprops = NULL; /* override (-o) and exclude (-x) props */
nvlist_t *origprops = NULL; /* original props (if destination exists) */
@ -4303,8 +4303,6 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
#define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC
#endif
clock_gettime(CLOCK_MONOTONIC_RAW, &begin_time);
bzero(origin, MAXNAMELEN);
bzero(tmp_keylocation, MAXNAMELEN);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
@ -5228,7 +5226,7 @@ zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap,
* We computed the checksum in the wrong byteorder in
* recv_read() above; do it again correctly.
*/
bzero(&zcksum, sizeof (zio_cksum_t));
memset(&zcksum, 0, sizeof (zio_cksum_t));
fletcher_4_incremental_byteswap(&drr, sizeof (drr), &zcksum);
flags->byteswap = B_TRUE;

View File

@ -807,7 +807,7 @@ zfs_realloc(libzfs_handle_t *hdl, void *ptr, size_t oldsize, size_t newsize)
return (NULL);
}
bzero((char *)ret + oldsize, (newsize - oldsize));
memset((char *)ret + oldsize, 0, newsize - oldsize);
return (ret);
}

View File

@ -103,9 +103,9 @@ execvPe(const char *name, const char *path, char * const *argv,
16);
continue;
}
bcopy(p, buf, lp);
memcpy(buf, p, lp);
buf[lp] = '/';
bcopy(name, buf + lp + 1, ln);
memcpy(buf + lp + 1, name, ln);
buf[lp + ln + 1] = '\0';
retry: (void) execve(bp, argv, envp);
@ -135,7 +135,7 @@ retry: (void) execve(bp, argv, envp);
if (cnt > 0) {
memp[0] = argv[0];
memp[1] = bp;
bcopy(argv + 1, memp + 2,
memcpy(memp + 2, argv + 1,
cnt * sizeof (char *));
} else {
memp[0] = "sh";

View File

@ -47,7 +47,7 @@ aes_init_keysched(const uint8_t *cipherKey, uint_t keyBits, void *keysched)
union {
uint64_t ka64[4];
uint32_t ka32[8];
} keyarr;
} keyarr;
switch (keyBits) {
case 128:
@ -81,7 +81,7 @@ aes_init_keysched(const uint8_t *cipherKey, uint_t keyBits, void *keysched)
keyarr.ka64[i] = *((uint64_t *)&cipherKey[j]);
}
} else {
bcopy(cipherKey, keyarr.ka32, keysize);
memcpy(keyarr.ka32, cipherKey, keysize);
}
} else {
/* byte swap */
@ -132,7 +132,7 @@ aes_encrypt_block(const void *ks, const uint8_t *pt, uint8_t *ct)
buffer[2] = htonl(*(uint32_t *)(void *)&pt[8]);
buffer[3] = htonl(*(uint32_t *)(void *)&pt[12]);
} else
bcopy(pt, &buffer, AES_BLOCK_LEN);
memcpy(&buffer, pt, AES_BLOCK_LEN);
ops->encrypt(&ksch->encr_ks.ks32[0], ksch->nr, buffer, buffer);
@ -143,7 +143,7 @@ aes_encrypt_block(const void *ks, const uint8_t *pt, uint8_t *ct)
*(uint32_t *)(void *)&ct[8] = htonl(buffer[2]);
*(uint32_t *)(void *)&ct[12] = htonl(buffer[3]);
} else
bcopy(&buffer, ct, AES_BLOCK_LEN);
memcpy(ct, &buffer, AES_BLOCK_LEN);
}
return (CRYPTO_SUCCESS);
}
@ -179,7 +179,7 @@ aes_decrypt_block(const void *ks, const uint8_t *ct, uint8_t *pt)
buffer[2] = htonl(*(uint32_t *)(void *)&ct[8]);
buffer[3] = htonl(*(uint32_t *)(void *)&ct[12]);
} else
bcopy(ct, &buffer, AES_BLOCK_LEN);
memcpy(&buffer, ct, AES_BLOCK_LEN);
ops->decrypt(&ksch->decr_ks.ks32[0], ksch->nr, buffer, buffer);
@ -190,7 +190,7 @@ aes_decrypt_block(const void *ks, const uint8_t *ct, uint8_t *pt)
*(uint32_t *)(void *)&pt[8] = htonl(buffer[2]);
*(uint32_t *)(void *)&pt[12] = htonl(buffer[3]);
} else
bcopy(&buffer, pt, AES_BLOCK_LEN);
memcpy(pt, &buffer, AES_BLOCK_LEN);
}
return (CRYPTO_SUCCESS);
}

View File

@ -470,32 +470,32 @@ EdonRInit(EdonRState *state, size_t hashbitlen)
state->hashbitlen = 224;
state->bits_processed = 0;
state->unprocessed_bits = 0;
bcopy(i224p2, hashState224(state)->DoublePipe,
16 * sizeof (uint32_t));
memcpy(hashState224(state)->DoublePipe, i224p2,
sizeof (i224p2));
break;
case 256:
state->hashbitlen = 256;
state->bits_processed = 0;
state->unprocessed_bits = 0;
bcopy(i256p2, hashState256(state)->DoublePipe,
16 * sizeof (uint32_t));
memcpy(hashState256(state)->DoublePipe, i256p2,
sizeof (i256p2));
break;
case 384:
state->hashbitlen = 384;
state->bits_processed = 0;
state->unprocessed_bits = 0;
bcopy(i384p2, hashState384(state)->DoublePipe,
16 * sizeof (uint64_t));
memcpy(hashState384(state)->DoublePipe, i384p2,
sizeof (i384p2));
break;
case 512:
state->hashbitlen = 512;
state->bits_processed = 0;
state->unprocessed_bits = 0;
bcopy(i512p2, hashState224(state)->DoublePipe,
16 * sizeof (uint64_t));
memcpy(hashState224(state)->DoublePipe, i512p2,
sizeof (i512p2));
break;
}
}
@ -520,8 +520,9 @@ EdonRUpdate(EdonRState *state, const uint8_t *data, size_t databitlen)
ASSERT(state->unprocessed_bits + databitlen <=
EdonR256_BLOCK_SIZE * 8);
bcopy(data, hashState256(state)->LastPart
+ (state->unprocessed_bits >> 3), LastBytes);
memcpy(hashState256(state)->LastPart
+ (state->unprocessed_bits >> 3),
data, LastBytes);
state->unprocessed_bits += (int)databitlen;
databitlen = state->unprocessed_bits;
/* LINTED E_BAD_PTR_CAST_ALIGN */
@ -542,7 +543,8 @@ EdonRUpdate(EdonRState *state, const uint8_t *data, size_t databitlen)
1) & 0x01ff;
data32 += bits_processed >> 5; /* byte size update */
bcopy(data32, hashState256(state)->LastPart, LastBytes);
memmove(hashState256(state)->LastPart,
data32, LastBytes);
}
break;
@ -555,8 +557,9 @@ EdonRUpdate(EdonRState *state, const uint8_t *data, size_t databitlen)
ASSERT(state->unprocessed_bits + databitlen <=
EdonR512_BLOCK_SIZE * 8);
bcopy(data, hashState512(state)->LastPart
+ (state->unprocessed_bits >> 3), LastBytes);
memcpy(hashState512(state)->LastPart
+ (state->unprocessed_bits >> 3),
data, LastBytes);
state->unprocessed_bits += (int)databitlen;
databitlen = state->unprocessed_bits;
/* LINTED E_BAD_PTR_CAST_ALIGN */
@ -577,7 +580,8 @@ EdonRUpdate(EdonRState *state, const uint8_t *data, size_t databitlen)
1) & 0x03ff;
data64 += bits_processed >> 6; /* byte size update */
bcopy(data64, hashState512(state)->LastPart, LastBytes);
memmove(hashState512(state)->LastPart,
data64, LastBytes);
}
break;
}
@ -682,7 +686,7 @@ EdonRFinal(EdonRState *state, uint8_t *hashval)
for (j = 0; j < EdonR224_DIGEST_SIZE >> 2; j++)
st_swap32(s32[j], d32 + j);
#else
bcopy(hashState256(state)->DoublePipe + 9, hashval,
memcpy(hashval, hashState256(state)->DoublePipe + 9,
EdonR224_DIGEST_SIZE);
#endif
break;
@ -696,7 +700,7 @@ EdonRFinal(EdonRState *state, uint8_t *hashval)
for (j = 0; j < EdonR256_DIGEST_SIZE >> 2; j++)
st_swap32(s32[j], d32 + j);
#else
bcopy(hashState256(state)->DoublePipe + 8, hashval,
memcpy(hashval, hashState256(state)->DoublePipe + 8,
EdonR256_DIGEST_SIZE);
#endif
break;
@ -710,7 +714,7 @@ EdonRFinal(EdonRState *state, uint8_t *hashval)
for (j = 0; j < EdonR384_DIGEST_SIZE >> 3; j++)
st_swap64(s64[j], d64 + j);
#else
bcopy(hashState384(state)->DoublePipe + 10, hashval,
memcpy(hashval, hashState384(state)->DoublePipe + 10,
EdonR384_DIGEST_SIZE);
#endif
break;
@ -724,7 +728,7 @@ EdonRFinal(EdonRState *state, uint8_t *hashval)
for (j = 0; j < EdonR512_DIGEST_SIZE >> 3; j++)
st_swap64(s64[j], d64 + j);
#else
bcopy(hashState512(state)->DoublePipe + 8, hashval,
memcpy(hashval, hashState512(state)->DoublePipe + 8,
EdonR512_DIGEST_SIZE);
#endif
break;

View File

@ -51,8 +51,8 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
if (length + ctx->cbc_remainder_len < block_size) {
/* accumulate bytes here and return */
bcopy(datap,
(uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
memcpy((uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
datap,
length);
ctx->cbc_remainder_len += length;
ctx->cbc_copy_to = datap;
@ -70,8 +70,8 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
bcopy(datap, &((uint8_t *)ctx->cbc_remainder)
[ctx->cbc_remainder_len], need);
memcpy(&((uint8_t *)ctx->cbc_remainder)
[ctx->cbc_remainder_len], datap, need);
blockp = (uint8_t *)ctx->cbc_remainder;
} else {
@ -91,10 +91,10 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
if (out_data_1_len == block_size) {
copy_block(lastp, out_data_1);
} else {
bcopy(lastp, out_data_1, out_data_1_len);
memcpy(out_data_1, lastp, out_data_1_len);
if (out_data_2 != NULL) {
bcopy(lastp + out_data_1_len,
out_data_2,
memcpy(out_data_2,
lastp + out_data_1_len,
block_size - out_data_1_len);
}
}
@ -113,7 +113,7 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
bcopy(datap, ctx->cbc_remainder, remainder);
memcpy(ctx->cbc_remainder, datap, remainder);
ctx->cbc_remainder_len = remainder;
ctx->cbc_copy_to = datap;
goto out;
@ -157,8 +157,8 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
if (length + ctx->cbc_remainder_len < block_size) {
/* accumulate bytes here and return */
bcopy(datap,
(uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
memcpy((uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
datap,
length);
ctx->cbc_remainder_len += length;
ctx->cbc_copy_to = datap;
@ -176,8 +176,8 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
if (need > remainder)
return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
bcopy(datap, &((uint8_t *)ctx->cbc_remainder)
[ctx->cbc_remainder_len], need);
memcpy(&((uint8_t *)ctx->cbc_remainder)
[ctx->cbc_remainder_len], datap, need);
blockp = (uint8_t *)ctx->cbc_remainder;
} else {
@ -203,9 +203,9 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
&out_data_1_len, &out_data_2, block_size);
bcopy(blockp, out_data_1, out_data_1_len);
memcpy(out_data_1, blockp, out_data_1_len);
if (out_data_2 != NULL) {
bcopy(blockp + out_data_1_len, out_data_2,
memcpy(out_data_2, blockp + out_data_1_len,
block_size - out_data_1_len);
}
@ -224,7 +224,7 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
bcopy(datap, ctx->cbc_remainder, remainder);
memcpy(ctx->cbc_remainder, datap, remainder);
ctx->cbc_remainder_len = remainder;
ctx->cbc_lastp = lastp;
ctx->cbc_copy_to = datap;

View File

@ -59,8 +59,8 @@ ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
if (length + ctx->ccm_remainder_len < block_size) {
/* accumulate bytes here and return */
bcopy(datap,
(uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
memcpy((uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
datap,
length);
ctx->ccm_remainder_len += length;
ctx->ccm_copy_to = datap;
@ -80,8 +80,8 @@ ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
[ctx->ccm_remainder_len], need);
memcpy(&((uint8_t *)ctx->ccm_remainder)
[ctx->ccm_remainder_len], datap, need);
blockp = (uint8_t *)ctx->ccm_remainder;
} else {
@ -132,10 +132,10 @@ ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
if (out_data_1_len == block_size) {
copy_block(lastp, out_data_1);
} else {
bcopy(lastp, out_data_1, out_data_1_len);
memcpy(out_data_1, lastp, out_data_1_len);
if (out_data_2 != NULL) {
bcopy(lastp + out_data_1_len,
out_data_2,
memcpy(out_data_2,
lastp + out_data_1_len,
block_size - out_data_1_len);
}
}
@ -154,7 +154,7 @@ ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
bcopy(datap, ctx->ccm_remainder, remainder);
memcpy(ctx->ccm_remainder, datap, remainder);
ctx->ccm_remainder_len = remainder;
ctx->ccm_copy_to = datap;
goto out;
@ -224,10 +224,10 @@ ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
/* ccm_mac_input_buf is not used for encryption */
macp = (uint8_t *)ctx->ccm_mac_input_buf;
bzero(macp, block_size);
memset(macp, 0, block_size);
/* copy remainder to temporary buffer */
bcopy(ctx->ccm_remainder, macp, ctx->ccm_remainder_len);
memcpy(macp, ctx->ccm_remainder, ctx->ccm_remainder_len);
/* calculate the CBC MAC */
xor_block(macp, mac_buf);
@ -254,33 +254,32 @@ ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
ctx->ccm_remainder_len + ctx->ccm_mac_len);
if (ctx->ccm_remainder_len > 0) {
/* copy temporary block to where it belongs */
if (out_data_2 == NULL) {
/* everything will fit in out_data_1 */
bcopy(macp, out_data_1, ctx->ccm_remainder_len);
bcopy(ccm_mac_p, out_data_1 + ctx->ccm_remainder_len,
memcpy(out_data_1, macp, ctx->ccm_remainder_len);
memcpy(out_data_1 + ctx->ccm_remainder_len, ccm_mac_p,
ctx->ccm_mac_len);
} else {
if (out_data_1_len < ctx->ccm_remainder_len) {
size_t data_2_len_used;
bcopy(macp, out_data_1, out_data_1_len);
memcpy(out_data_1, macp, out_data_1_len);
data_2_len_used = ctx->ccm_remainder_len
- out_data_1_len;
bcopy((uint8_t *)macp + out_data_1_len,
out_data_2, data_2_len_used);
bcopy(ccm_mac_p, out_data_2 + data_2_len_used,
memcpy(out_data_2,
(uint8_t *)macp + out_data_1_len,
data_2_len_used);
memcpy(out_data_2 + data_2_len_used,
ccm_mac_p,
ctx->ccm_mac_len);
} else {
bcopy(macp, out_data_1, out_data_1_len);
memcpy(out_data_1, macp, out_data_1_len);
if (out_data_1_len == ctx->ccm_remainder_len) {
/* mac will be in out_data_2 */
bcopy(ccm_mac_p, out_data_2,
memcpy(out_data_2, ccm_mac_p,
ctx->ccm_mac_len);
} else {
size_t len_not_used = out_data_1_len -
@ -290,11 +289,11 @@ ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
* out_data_1, part of the mac will be
* in out_data_2
*/
bcopy(ccm_mac_p,
out_data_1 + ctx->ccm_remainder_len,
len_not_used);
bcopy(ccm_mac_p + len_not_used,
out_data_2,
memcpy(out_data_1 +
ctx->ccm_remainder_len,
ccm_mac_p, len_not_used);
memcpy(out_data_2,
ccm_mac_p + len_not_used,
ctx->ccm_mac_len - len_not_used);
}
@ -302,9 +301,9 @@ ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
}
} else {
/* copy block to where it belongs */
bcopy(ccm_mac_p, out_data_1, out_data_1_len);
memcpy(out_data_1, ccm_mac_p, out_data_1_len);
if (out_data_2 != NULL) {
bcopy(ccm_mac_p + out_data_1_len, out_data_2,
memcpy(out_data_2, ccm_mac_p + out_data_1_len,
block_size - out_data_1_len);
}
}
@ -372,7 +371,7 @@ ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
}
tmp = (uint8_t *)ctx->ccm_mac_input_buf;
bcopy(datap, tmp + pm_len, length);
memcpy(tmp + pm_len, datap, length);
ctx->ccm_processed_mac_len += length;
return (CRYPTO_SUCCESS);
@ -405,15 +404,15 @@ ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
mac_len = length - pt_part;
ctx->ccm_processed_mac_len = mac_len;
bcopy(data + pt_part, ctx->ccm_mac_input_buf, mac_len);
memcpy(ctx->ccm_mac_input_buf, data + pt_part, mac_len);
if (pt_part + ctx->ccm_remainder_len < block_size) {
/*
* since this is last of the ciphertext, will
* just decrypt with it here
*/
bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
[ctx->ccm_remainder_len], pt_part);
memcpy(&((uint8_t *)ctx->ccm_remainder)
[ctx->ccm_remainder_len], datap, pt_part);
ctx->ccm_remainder_len += pt_part;
ccm_decrypt_incomplete_block(ctx, encrypt_block);
ctx->ccm_processed_data_len += ctx->ccm_remainder_len;
@ -424,9 +423,9 @@ ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
length = pt_part;
}
} else if (length + ctx->ccm_remainder_len < block_size) {
/* accumulate bytes here and return */
bcopy(datap,
(uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
/* accumulate bytes here and return */
memcpy((uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
datap,
length);
ctx->ccm_remainder_len += length;
ctx->ccm_copy_to = datap;
@ -441,8 +440,8 @@ ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
if (need > remainder)
return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
[ctx->ccm_remainder_len], need);
memcpy(&((uint8_t *)ctx->ccm_remainder)
[ctx->ccm_remainder_len], datap, need);
blockp = (uint8_t *)ctx->ccm_remainder;
} else {
@ -492,7 +491,7 @@ ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
/* Incomplete last block */
if (remainder > 0 && remainder < block_size) {
bcopy(datap, ctx->ccm_remainder, remainder);
memcpy(ctx->ccm_remainder, datap, remainder);
ctx->ccm_remainder_len = remainder;
ctx->ccm_copy_to = datap;
if (ctx->ccm_processed_mac_len > 0) {
@ -539,10 +538,9 @@ ccm_decrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
macp = (uint8_t *)ctx->ccm_tmp;
while (mac_remain > 0) {
if (mac_remain < block_size) {
bzero(macp, block_size);
bcopy(pt, macp, mac_remain);
memset(macp, 0, block_size);
memcpy(macp, pt, mac_remain);
mac_remain = 0;
} else {
copy_block(pt, macp);
@ -560,7 +558,7 @@ ccm_decrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
calculate_ccm_mac((ccm_ctx_t *)ctx, ccm_mac_p, encrypt_block);
/* compare the input CCM MAC value with what we calculated */
if (bcmp(ctx->ccm_mac_input_buf, ccm_mac_p, ctx->ccm_mac_len)) {
if (memcmp(ctx->ccm_mac_input_buf, ccm_mac_p, ctx->ccm_mac_len)) {
/* They don't match */
return (CRYPTO_INVALID_MAC);
} else {
@ -654,10 +652,10 @@ ccm_format_initial_blocks(uchar_t *nonce, ulong_t nonceSize,
b0[0] = (have_adata << 6) | (((t - 2) / 2) << 3) | (q - 1);
/* copy the nonce value into b0 */
bcopy(nonce, &(b0[1]), nonceSize);
memcpy(&(b0[1]), nonce, nonceSize);
/* store the length of the payload into b0 */
bzero(&(b0[1+nonceSize]), q);
memset(&(b0[1+nonceSize]), 0, q);
payloadSize = aes_ctx->ccm_data_len;
limit = 8 < q ? 8 : q;
@ -673,9 +671,9 @@ ccm_format_initial_blocks(uchar_t *nonce, ulong_t nonceSize,
cb[0] = 0x07 & (q-1); /* first byte */
/* copy the nonce value into the counter block */
bcopy(nonce, &(cb[1]), nonceSize);
memcpy(&(cb[1]), nonce, nonceSize);
bzero(&(cb[1+nonceSize]), q);
memset(&(cb[1+nonceSize]), 0, q);
/* Create the mask for the counter field based on the size of nonce */
q <<= 3;
@ -782,7 +780,7 @@ ccm_init(ccm_ctx_t *ctx, unsigned char *nonce, size_t nonce_len,
/* The IV for CBC MAC for AES CCM mode is always zero */
ivp = (uint8_t *)ctx->ccm_tmp;
bzero(ivp, block_size);
memset(ivp, 0, block_size);
xor_block(ivp, mac_buf);
@ -800,14 +798,14 @@ ccm_init(ccm_ctx_t *ctx, unsigned char *nonce, size_t nonce_len,
/* 1st block: it contains encoded associated data, and some data */
authp = (uint8_t *)ctx->ccm_tmp;
bzero(authp, block_size);
bcopy(encoded_a, authp, encoded_a_len);
memset(authp, 0, block_size);
memcpy(authp, encoded_a, encoded_a_len);
processed = block_size - encoded_a_len;
if (processed > auth_data_len) {
/* in case auth_data is very small */
processed = auth_data_len;
}
bcopy(auth_data, authp+encoded_a_len, processed);
memcpy(authp+encoded_a_len, auth_data, processed);
/* xor with previous buffer */
xor_block(authp, mac_buf);
encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
@ -823,8 +821,8 @@ ccm_init(ccm_ctx_t *ctx, unsigned char *nonce, size_t nonce_len,
* There's not a block full of data, pad rest of
* buffer with zero
*/
bzero(authp, block_size);
bcopy(&(auth_data[processed]), authp, remainder);
memset(authp, 0, block_size);
memcpy(authp, &(auth_data[processed]), remainder);
datap = (uint8_t *)authp;
remainder = 0;
} else {

View File

@ -52,8 +52,8 @@ ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length,
if (length + ctx->ctr_remainder_len < block_size) {
/* accumulate bytes here and return */
bcopy(datap,
(uint8_t *)ctx->ctr_remainder + ctx->ctr_remainder_len,
memcpy((uint8_t *)ctx->ctr_remainder + ctx->ctr_remainder_len,
datap,
length);
ctx->ctr_remainder_len += length;
ctx->ctr_copy_to = datap;
@ -71,8 +71,8 @@ ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length,
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
bcopy(datap, &((uint8_t *)ctx->ctr_remainder)
[ctx->ctr_remainder_len], need);
memcpy(&((uint8_t *)ctx->ctr_remainder)
[ctx->ctr_remainder_len], datap, need);
blockp = (uint8_t *)ctx->ctr_remainder;
} else {
@ -114,9 +114,9 @@ ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length,
&out_data_1_len, &out_data_2, block_size);
/* copy block to where it belongs */
bcopy(lastp, out_data_1, out_data_1_len);
memcpy(out_data_1, lastp, out_data_1_len);
if (out_data_2 != NULL) {
bcopy(lastp + out_data_1_len, out_data_2,
memcpy(out_data_2, lastp + out_data_1_len,
block_size - out_data_1_len);
}
/* update offset */
@ -134,7 +134,7 @@ ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length,
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
bcopy(datap, ctx->ctr_remainder, remainder);
memcpy(ctx->ctr_remainder, datap, remainder);
ctx->ctr_remainder_len = remainder;
ctx->ctr_copy_to = datap;
goto out;
@ -176,10 +176,11 @@ ctr_mode_final(ctr_ctx_t *ctx, crypto_data_t *out,
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
&out_data_1_len, &out_data_2, ctx->ctr_remainder_len);
bcopy(p, out_data_1, out_data_1_len);
memcpy(out_data_1, p, out_data_1_len);
if (out_data_2 != NULL) {
bcopy((uint8_t *)p + out_data_1_len,
out_data_2, ctx->ctr_remainder_len - out_data_1_len);
memcpy(out_data_2,
(uint8_t *)p + out_data_1_len,
ctx->ctr_remainder_len - out_data_1_len);
}
out->cd_offset += ctx->ctr_remainder_len;
ctx->ctr_remainder_len = 0;

View File

@ -49,8 +49,8 @@ ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length,
if (length + ctx->ecb_remainder_len < block_size) {
/* accumulate bytes here and return */
bcopy(datap,
(uint8_t *)ctx->ecb_remainder + ctx->ecb_remainder_len,
memcpy((uint8_t *)ctx->ecb_remainder + ctx->ecb_remainder_len,
datap,
length);
ctx->ecb_remainder_len += length;
ctx->ecb_copy_to = datap;
@ -68,8 +68,8 @@ ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length,
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
bcopy(datap, &((uint8_t *)ctx->ecb_remainder)
[ctx->ecb_remainder_len], need);
memcpy(&((uint8_t *)ctx->ecb_remainder)
[ctx->ecb_remainder_len], datap, need);
blockp = (uint8_t *)ctx->ecb_remainder;
} else {
@ -81,9 +81,9 @@ ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length,
&out_data_1_len, &out_data_2, block_size);
/* copy block to where it belongs */
bcopy(lastp, out_data_1, out_data_1_len);
memcpy(out_data_1, lastp, out_data_1_len);
if (out_data_2 != NULL) {
bcopy(lastp + out_data_1_len, out_data_2,
memcpy(out_data_2, lastp + out_data_1_len,
block_size - out_data_1_len);
}
/* update offset */
@ -101,7 +101,7 @@ ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length,
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
bcopy(datap, ctx->ecb_remainder, remainder);
memcpy(ctx->ecb_remainder, datap, remainder);
ctx->ecb_remainder_len = remainder;
ctx->ecb_copy_to = datap;
goto out;

View File

@ -108,8 +108,8 @@ gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
if (length + ctx->gcm_remainder_len < block_size) {
/* accumulate bytes here and return */
bcopy(datap,
(uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len,
memcpy((uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len,
datap,
length);
ctx->gcm_remainder_len += length;
if (ctx->gcm_copy_to == NULL) {
@ -130,8 +130,8 @@ gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
bcopy(datap, &((uint8_t *)ctx->gcm_remainder)
[ctx->gcm_remainder_len], need);
memcpy(&((uint8_t *)ctx->gcm_remainder)
[ctx->gcm_remainder_len], datap, need);
blockp = (uint8_t *)ctx->gcm_remainder;
} else {
@ -162,10 +162,10 @@ gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
if (out_data_1_len == block_size) {
copy_block(lastp, out_data_1);
} else {
bcopy(lastp, out_data_1, out_data_1_len);
memcpy(out_data_1, lastp, out_data_1_len);
if (out_data_2 != NULL) {
bcopy(lastp + out_data_1_len,
out_data_2,
memcpy(out_data_2,
lastp + out_data_1_len,
block_size - out_data_1_len);
}
}
@ -187,7 +187,7 @@ gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
bcopy(datap, ctx->gcm_remainder, remainder);
memcpy(ctx->gcm_remainder, datap, remainder);
ctx->gcm_remainder_len = remainder;
ctx->gcm_copy_to = datap;
goto out;
@ -245,7 +245,7 @@ gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
(uint8_t *)ctx->gcm_tmp);
macp = (uint8_t *)ctx->gcm_remainder;
bzero(macp + ctx->gcm_remainder_len,
memset(macp + ctx->gcm_remainder_len, 0,
block_size - ctx->gcm_remainder_len);
/* XOR with counter block */
@ -309,8 +309,8 @@ gcm_decrypt_incomplete_block(gcm_ctx_t *ctx, size_t block_size, size_t index,
counterp = (uint8_t *)ctx->gcm_tmp;
/* authentication tag */
bzero((uint8_t *)ctx->gcm_tmp, block_size);
bcopy(datap, (uint8_t *)ctx->gcm_tmp, ctx->gcm_remainder_len);
memset((uint8_t *)ctx->gcm_tmp, 0, block_size);
memcpy((uint8_t *)ctx->gcm_tmp, datap, ctx->gcm_remainder_len);
/* add ciphertext to the hash */
GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash, gcm_impl_get_ops());
@ -350,7 +350,7 @@ gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
}
if (ctx->gcm_pt_buf != NULL) {
bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len);
memcpy(new, ctx->gcm_pt_buf, ctx->gcm_pt_buf_len);
vmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len);
} else {
ASSERT0(ctx->gcm_pt_buf_len);
@ -358,7 +358,7 @@ gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
ctx->gcm_pt_buf = new;
ctx->gcm_pt_buf_len = new_len;
bcopy(data, &ctx->gcm_pt_buf[ctx->gcm_processed_data_len],
memcpy(&ctx->gcm_pt_buf[ctx->gcm_processed_data_len], data,
length);
ctx->gcm_processed_data_len += length;
}
@ -397,7 +397,7 @@ gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
while (remainder > 0) {
/* Incomplete last block */
if (remainder < block_size) {
bcopy(blockp, ctx->gcm_remainder, remainder);
memcpy(ctx->gcm_remainder, blockp, remainder);
ctx->gcm_remainder_len = remainder;
/*
* not expecting anymore ciphertext, just
@ -438,7 +438,7 @@ gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
xor_block((uint8_t *)ctx->gcm_J0, ghash);
/* compare the input authentication tag with what we calculated */
if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) {
if (memcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) {
/* They don't match */
return (CRYPTO_INVALID_MAC);
} else {
@ -495,7 +495,7 @@ gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len,
ghash = (uint8_t *)ctx->gcm_ghash;
cb = (uint8_t *)ctx->gcm_cb;
if (iv_len == 12) {
bcopy(iv, cb, 12);
memcpy(cb, iv, 12);
cb[12] = 0;
cb[13] = 0;
cb[14] = 0;
@ -506,8 +506,8 @@ gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len,
/* GHASH the IV */
do {
if (remainder < block_size) {
bzero(cb, block_size);
bcopy(&(iv[processed]), cb, remainder);
memset(cb, 0, block_size);
memcpy(cb, &(iv[processed]), remainder);
datap = (uint8_t *)cb;
remainder = 0;
} else {
@ -539,7 +539,7 @@ gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
size_t remainder, processed;
/* encrypt zero block to get subkey H */
bzero(ctx->gcm_H, sizeof (ctx->gcm_H));
memset(ctx->gcm_H, 0, sizeof (ctx->gcm_H));
encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_H,
(uint8_t *)ctx->gcm_H);
@ -549,8 +549,8 @@ gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
gops = gcm_impl_get_ops();
authp = (uint8_t *)ctx->gcm_tmp;
ghash = (uint8_t *)ctx->gcm_ghash;
bzero(authp, block_size);
bzero(ghash, block_size);
memset(authp, 0, block_size);
memset(ghash, 0, block_size);
processed = 0;
remainder = auth_data_len;
@ -562,9 +562,9 @@ gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
*/
if (auth_data != NULL) {
bzero(authp, block_size);
bcopy(&(auth_data[processed]),
authp, remainder);
memset(authp, 0, block_size);
memcpy(authp, &(auth_data[processed]),
remainder);
} else {
ASSERT0(remainder);
}
@ -1139,10 +1139,10 @@ gcm_simd_get_htab_size(boolean_t simd_mode)
static inline void
gcm_clear_ctx(gcm_ctx_t *ctx)
{
bzero(ctx->gcm_remainder, sizeof (ctx->gcm_remainder));
bzero(ctx->gcm_H, sizeof (ctx->gcm_H));
bzero(ctx->gcm_J0, sizeof (ctx->gcm_J0));
bzero(ctx->gcm_tmp, sizeof (ctx->gcm_tmp));
memset(ctx->gcm_remainder, 0, sizeof (ctx->gcm_remainder));
memset(ctx->gcm_H, 0, sizeof (ctx->gcm_H));
memset(ctx->gcm_J0, 0, sizeof (ctx->gcm_J0));
memset(ctx->gcm_tmp, 0, sizeof (ctx->gcm_tmp));
}
/* Increment the GCM counter block by n. */
@ -1187,8 +1187,8 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data,
need = block_size - ctx->gcm_remainder_len;
if (length < need) {
/* Accumulate bytes here and return. */
bcopy(datap, (uint8_t *)ctx->gcm_remainder +
ctx->gcm_remainder_len, length);
memcpy((uint8_t *)ctx->gcm_remainder +
ctx->gcm_remainder_len, datap, length);
ctx->gcm_remainder_len += length;
if (ctx->gcm_copy_to == NULL) {
@ -1197,8 +1197,8 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data,
return (CRYPTO_SUCCESS);
} else {
/* Complete incomplete block. */
bcopy(datap, (uint8_t *)ctx->gcm_remainder +
ctx->gcm_remainder_len, need);
memcpy((uint8_t *)ctx->gcm_remainder +
ctx->gcm_remainder_len, datap, need);
ctx->gcm_copy_to = NULL;
}
@ -1276,7 +1276,7 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data,
/* Less than GCM_AVX_MIN_ENCRYPT_BYTES remain, operate on blocks. */
while (bleft > 0) {
if (bleft < block_size) {
bcopy(datap, ctx->gcm_remainder, bleft);
memcpy(ctx->gcm_remainder, datap, bleft);
ctx->gcm_remainder_len = bleft;
ctx->gcm_copy_to = datap;
goto out;
@ -1335,7 +1335,7 @@ gcm_encrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
const uint32_t *cb = (uint32_t *)ctx->gcm_cb;
aes_encrypt_intel(keysched, aes_rounds, cb, (uint32_t *)tmp);
bzero(remainder + rem_len, block_size - rem_len);
memset(remainder + rem_len, 0, block_size - rem_len);
for (int i = 0; i < rem_len; i++) {
remainder[i] ^= tmp[i];
}
@ -1431,8 +1431,8 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
if (bleft < block_size) {
uint8_t *lastb = (uint8_t *)ctx->gcm_remainder;
bzero(lastb, block_size);
bcopy(datap, lastb, bleft);
memset(lastb, 0, block_size);
memcpy(lastb, datap, bleft);
/* The GCM processing. */
GHASH_AVX(ctx, lastb, block_size);
aes_encrypt_intel(key->encr_ks.ks32, key->nr, cb, tmp);
@ -1468,7 +1468,7 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
kfpu_end();
/* Compare the input authentication tag with what we calculated. */
if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) {
if (memcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) {
/* They don't match. */
return (CRYPTO_INVALID_MAC);
}
@ -1500,8 +1500,8 @@ gcm_init_avx(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
ASSERT(block_size == GCM_BLOCK_LEN);
/* Init H (encrypt zero block) and create the initial counter block. */
bzero(ctx->gcm_ghash, sizeof (ctx->gcm_ghash));
bzero(H, sizeof (ctx->gcm_H));
memset(ctx->gcm_ghash, 0, sizeof (ctx->gcm_ghash));
memset(H, 0, sizeof (ctx->gcm_H));
kfpu_begin();
aes_encrypt_intel(keysched, aes_rounds,
(const uint32_t *)H, (uint32_t *)H);
@ -1509,13 +1509,13 @@ gcm_init_avx(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
gcm_init_htab_avx(ctx->gcm_Htable, H);
if (iv_len == 12) {
bcopy(iv, cb, 12);
memcpy(cb, iv, 12);
cb[12] = 0;
cb[13] = 0;
cb[14] = 0;
cb[15] = 1;
/* We need the ICB later. */
bcopy(cb, ctx->gcm_J0, sizeof (ctx->gcm_J0));
memcpy(ctx->gcm_J0, cb, sizeof (ctx->gcm_J0));
} else {
/*
* Most consumers use 12 byte IVs, so it's OK to use the
@ -1553,8 +1553,8 @@ gcm_init_avx(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
/* Zero pad and hash incomplete last block. */
uint8_t *authp = (uint8_t *)ctx->gcm_tmp;
bzero(authp, block_size);
bcopy(datap, authp, incomp);
memset(authp, 0, block_size);
memcpy(authp, datap, incomp);
GHASH_AVX(ctx, authp, block_size);
}
}

View File

@ -155,7 +155,7 @@ crypto_free_mode_ctx(void *ctx)
#ifdef CAN_USE_GCM_ASM
if (((gcm_ctx_t *)ctx)->gcm_Htable != NULL) {
gcm_ctx_t *gcm_ctx = (gcm_ctx_t *)ctx;
bzero(gcm_ctx->gcm_Htable, gcm_ctx->gcm_htab_len);
memset(gcm_ctx->gcm_Htable, 0, gcm_ctx->gcm_htab_len);
kmem_free(gcm_ctx->gcm_Htable, gcm_ctx->gcm_htab_len);
}
#endif

View File

@ -190,7 +190,7 @@ SHA256Transform(SHA2_CTX *ctx, const uint8_t *blk)
#endif /* __sparc */
if ((uintptr_t)blk & 0x3) { /* not 4-byte aligned? */
bcopy(blk, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32));
memcpy(ctx->buf_un.buf32, blk, sizeof (ctx->buf_un.buf32));
blk = (uint8_t *)ctx->buf_un.buf32;
}
@ -406,7 +406,7 @@ SHA512Transform(SHA2_CTX *ctx, const uint8_t *blk)
if ((uintptr_t)blk & 0x7) { /* not 8-byte aligned? */
bcopy(blk, ctx->buf_un.buf64, sizeof (ctx->buf_un.buf64));
memcpy(ctx->buf_un.buf64, blk, sizeof (ctx->buf_un.buf64));
blk = (uint8_t *)ctx->buf_un.buf64;
}
@ -823,14 +823,14 @@ SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len)
/*
* general optimization:
*
* only do initial bcopy() and SHA2Transform() if
* only do initial memcpy() and SHA2Transform() if
* buf_index != 0. if buf_index == 0, we're just
* wasting our time doing the bcopy() since there
* wasting our time doing the memcpy() since there
* wasn't any data left over from a previous call to
* SHA2Update().
*/
if (buf_index) {
bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
memcpy(&ctx->buf_un.buf8[buf_index], input, buf_len);
if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE)
SHA256Transform(ctx, ctx->buf_un.buf8);
else
@ -873,7 +873,7 @@ SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len)
* general optimization:
*
* if i and input_len are the same, return now instead
* of calling bcopy(), since the bcopy() in this case
* of calling memcpy(), since the memcpy() in this case
* will be an expensive noop.
*/
@ -884,7 +884,7 @@ SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len)
}
/* buffer remaining input */
bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
memcpy(&ctx->buf_un.buf8[buf_index], &input[i], input_len - i);
}
@ -936,7 +936,7 @@ SHA2Final(void *digest, SHA2_CTX *ctx)
*/
Encode64(digest, ctx->state.s64, sizeof (uint64_t) * 3);
Encode64(last, &ctx->state.s64[3], sizeof (uint64_t));
bcopy(last, (uint8_t *)digest + 24, 4);
memcpy((uint8_t *)digest + 24, last, 4);
} else if (algotype == SHA512_256_MECH_INFO_TYPE) {
Encode64(digest, ctx->state.s64, sizeof (uint64_t) * 4);
} else {
@ -946,7 +946,7 @@ SHA2Final(void *digest, SHA2_CTX *ctx)
}
/* zeroize sensitive information */
bzero(ctx, sizeof (*ctx));
memset(ctx, 0, sizeof (*ctx));
}
#ifdef _KERNEL

View File

@ -26,16 +26,16 @@ Skein_256_Init(Skein_256_Ctxt_t *ctx, size_t hashBitLen)
switch (hashBitLen) { /* use pre-computed values, where available */
#ifndef SKEIN_NO_PRECOMP
case 256:
bcopy(SKEIN_256_IV_256, ctx->X, sizeof (ctx->X));
memcpy(ctx->X, SKEIN_256_IV_256, sizeof (ctx->X));
break;
case 224:
bcopy(SKEIN_256_IV_224, ctx->X, sizeof (ctx->X));
memcpy(ctx->X, SKEIN_256_IV_224, sizeof (ctx->X));
break;
case 160:
bcopy(SKEIN_256_IV_160, ctx->X, sizeof (ctx->X));
memcpy(ctx->X, SKEIN_256_IV_160, sizeof (ctx->X));
break;
case 128:
bcopy(SKEIN_256_IV_128, ctx->X, sizeof (ctx->X));
memcpy(ctx->X, SKEIN_256_IV_128, sizeof (ctx->X));
break;
#endif
default:
@ -53,11 +53,11 @@ Skein_256_Init(Skein_256_Ctxt_t *ctx, size_t hashBitLen)
cfg.w[1] = Skein_Swap64(hashBitLen);
cfg.w[2] = Skein_Swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
/* zero pad config block */
bzero(&cfg.w[3], sizeof (cfg) - 3 * sizeof (cfg.w[0]));
memset(&cfg.w[3], 0, sizeof (cfg) - 3 * sizeof (cfg.w[0]));
/* compute the initial chaining values from config block */
/* zero the chaining variables */
bzero(ctx->X, sizeof (ctx->X));
memset(ctx->X, 0, sizeof (ctx->X));
Skein_256_Process_Block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN);
break;
}
@ -91,7 +91,7 @@ Skein_256_InitExt(Skein_256_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo,
/* compute the initial chaining values ctx->X[], based on key */
if (keyBytes == 0) { /* is there a key? */
/* no key: use all zeroes as key for config block */
bzero(ctx->X, sizeof (ctx->X));
memset(ctx->X, 0, sizeof (ctx->X));
} else { /* here to pre-process a key */
Skein_assert(sizeof (cfg.b) >= sizeof (ctx->X));
@ -101,13 +101,13 @@ Skein_256_InitExt(Skein_256_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo,
/* set tweaks: T0 = 0; T1 = KEY type */
Skein_Start_New_Type(ctx, KEY);
/* zero the initial chaining variables */
bzero(ctx->X, sizeof (ctx->X));
memset(ctx->X, 0, sizeof (ctx->X));
/* hash the key */
(void) Skein_256_Update(ctx, key, keyBytes);
/* put result into cfg.b[] */
(void) Skein_256_Final_Pad(ctx, cfg.b);
/* copy over into ctx->X[] */
bcopy(cfg.b, ctx->X, sizeof (cfg.b));
memcpy(ctx->X, cfg.b, sizeof (cfg.b));
#if SKEIN_NEED_SWAP
{
uint_t i;
@ -124,7 +124,7 @@ Skein_256_InitExt(Skein_256_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo,
ctx->h.hashBitLen = hashBitLen; /* output hash bit count */
Skein_Start_New_Type(ctx, CFG_FINAL);
bzero(&cfg.w, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */
memset(&cfg.w, 0, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */
cfg.w[0] = Skein_Swap64(SKEIN_SCHEMA_VER);
cfg.w[1] = Skein_Swap64(hashBitLen); /* hash result length in bits */
/* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */
@ -161,7 +161,7 @@ Skein_256_Update(Skein_256_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt)
if (n) {
/* check on our logic here */
Skein_assert(n < msgByteCnt);
bcopy(msg, &ctx->b[ctx->h.bCnt], n);
memcpy(&ctx->b[ctx->h.bCnt], msg, n);
msgByteCnt -= n;
msg += n;
ctx->h.bCnt += n;
@ -189,7 +189,7 @@ Skein_256_Update(Skein_256_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt)
/* copy any remaining source message data bytes into b[] */
if (msgByteCnt) {
Skein_assert(msgByteCnt + ctx->h.bCnt <= SKEIN_256_BLOCK_BYTES);
bcopy(msg, &ctx->b[ctx->h.bCnt], msgByteCnt);
memcpy(&ctx->b[ctx->h.bCnt], msg, msgByteCnt);
ctx->h.bCnt += msgByteCnt;
}
@ -209,7 +209,7 @@ Skein_256_Final(Skein_256_Ctxt_t *ctx, uint8_t *hashVal)
ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */
/* zero pad b[] if necessary */
if (ctx->h.bCnt < SKEIN_256_BLOCK_BYTES)
bzero(&ctx->b[ctx->h.bCnt],
memset(&ctx->b[ctx->h.bCnt], 0,
SKEIN_256_BLOCK_BYTES - ctx->h.bCnt);
/* process the final block */
@ -221,13 +221,12 @@ Skein_256_Final(Skein_256_Ctxt_t *ctx, uint8_t *hashVal)
/* run Threefish in "counter mode" to generate output */
/* zero out b[], so it can hold the counter */
bzero(ctx->b, sizeof (ctx->b));
memset(ctx->b, 0, sizeof (ctx->b));
/* keep a local copy of counter mode "key" */
bcopy(ctx->X, X, sizeof (X));
memcpy(X, ctx->X, sizeof (X));
for (i = 0; i * SKEIN_256_BLOCK_BYTES < byteCnt; i++) {
/* build the counter block */
uint64_t tmp = Skein_Swap64((uint64_t)i);
bcopy(&tmp, ctx->b, sizeof (tmp));
*(uint64_t *)ctx->b = Skein_Swap64((uint64_t)i);
Skein_Start_New_Type(ctx, OUT_FINAL);
/* run "counter mode" */
Skein_256_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t));
@ -240,7 +239,7 @@ Skein_256_Final(Skein_256_Ctxt_t *ctx, uint8_t *hashVal)
Skein_Show_Final(256, &ctx->h, n,
hashVal + i * SKEIN_256_BLOCK_BYTES);
/* restore the counter mode key for next time */
bcopy(X, ctx->X, sizeof (X));
memcpy(ctx->X, X, sizeof (X));
}
return (SKEIN_SUCCESS);
}
@ -262,16 +261,16 @@ Skein_512_Init(Skein_512_Ctxt_t *ctx, size_t hashBitLen)
switch (hashBitLen) { /* use pre-computed values, where available */
#ifndef SKEIN_NO_PRECOMP
case 512:
bcopy(SKEIN_512_IV_512, ctx->X, sizeof (ctx->X));
memcpy(ctx->X, SKEIN_512_IV_512, sizeof (ctx->X));
break;
case 384:
bcopy(SKEIN_512_IV_384, ctx->X, sizeof (ctx->X));
memcpy(ctx->X, SKEIN_512_IV_384, sizeof (ctx->X));
break;
case 256:
bcopy(SKEIN_512_IV_256, ctx->X, sizeof (ctx->X));
memcpy(ctx->X, SKEIN_512_IV_256, sizeof (ctx->X));
break;
case 224:
bcopy(SKEIN_512_IV_224, ctx->X, sizeof (ctx->X));
memcpy(ctx->X, SKEIN_512_IV_224, sizeof (ctx->X));
break;
#endif
default:
@ -289,11 +288,11 @@ Skein_512_Init(Skein_512_Ctxt_t *ctx, size_t hashBitLen)
cfg.w[1] = Skein_Swap64(hashBitLen);
cfg.w[2] = Skein_Swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
/* zero pad config block */
bzero(&cfg.w[3], sizeof (cfg) - 3 * sizeof (cfg.w[0]));
memset(&cfg.w[3], 0, sizeof (cfg) - 3 * sizeof (cfg.w[0]));
/* compute the initial chaining values from config block */
/* zero the chaining variables */
bzero(ctx->X, sizeof (ctx->X));
memset(ctx->X, 0, sizeof (ctx->X));
Skein_512_Process_Block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN);
break;
}
@ -328,7 +327,7 @@ Skein_512_InitExt(Skein_512_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo,
/* compute the initial chaining values ctx->X[], based on key */
if (keyBytes == 0) { /* is there a key? */
/* no key: use all zeroes as key for config block */
bzero(ctx->X, sizeof (ctx->X));
memset(ctx->X, 0, sizeof (ctx->X));
} else { /* here to pre-process a key */
Skein_assert(sizeof (cfg.b) >= sizeof (ctx->X));
@ -338,12 +337,12 @@ Skein_512_InitExt(Skein_512_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo,
/* set tweaks: T0 = 0; T1 = KEY type */
Skein_Start_New_Type(ctx, KEY);
/* zero the initial chaining variables */
bzero(ctx->X, sizeof (ctx->X));
memset(ctx->X, 0, sizeof (ctx->X));
(void) Skein_512_Update(ctx, key, keyBytes); /* hash the key */
/* put result into cfg.b[] */
(void) Skein_512_Final_Pad(ctx, cfg.b);
/* copy over into ctx->X[] */
bcopy(cfg.b, ctx->X, sizeof (cfg.b));
memcpy(ctx->X, cfg.b, sizeof (cfg.b));
#if SKEIN_NEED_SWAP
{
uint_t i;
@ -360,7 +359,7 @@ Skein_512_InitExt(Skein_512_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo,
ctx->h.hashBitLen = hashBitLen; /* output hash bit count */
Skein_Start_New_Type(ctx, CFG_FINAL);
bzero(&cfg.w, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */
memset(&cfg.w, 0, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */
cfg.w[0] = Skein_Swap64(SKEIN_SCHEMA_VER);
cfg.w[1] = Skein_Swap64(hashBitLen); /* hash result length in bits */
/* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */
@ -397,7 +396,7 @@ Skein_512_Update(Skein_512_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt)
if (n) {
/* check on our logic here */
Skein_assert(n < msgByteCnt);
bcopy(msg, &ctx->b[ctx->h.bCnt], n);
memcpy(&ctx->b[ctx->h.bCnt], msg, n);
msgByteCnt -= n;
msg += n;
ctx->h.bCnt += n;
@ -425,7 +424,7 @@ Skein_512_Update(Skein_512_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt)
/* copy any remaining source message data bytes into b[] */
if (msgByteCnt) {
Skein_assert(msgByteCnt + ctx->h.bCnt <= SKEIN_512_BLOCK_BYTES);
bcopy(msg, &ctx->b[ctx->h.bCnt], msgByteCnt);
memcpy(&ctx->b[ctx->h.bCnt], msg, msgByteCnt);
ctx->h.bCnt += msgByteCnt;
}
@ -445,7 +444,7 @@ Skein_512_Final(Skein_512_Ctxt_t *ctx, uint8_t *hashVal)
ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */
/* zero pad b[] if necessary */
if (ctx->h.bCnt < SKEIN_512_BLOCK_BYTES)
bzero(&ctx->b[ctx->h.bCnt],
memset(&ctx->b[ctx->h.bCnt], 0,
SKEIN_512_BLOCK_BYTES - ctx->h.bCnt);
/* process the final block */
@ -457,13 +456,12 @@ Skein_512_Final(Skein_512_Ctxt_t *ctx, uint8_t *hashVal)
/* run Threefish in "counter mode" to generate output */
/* zero out b[], so it can hold the counter */
bzero(ctx->b, sizeof (ctx->b));
memset(ctx->b, 0, sizeof (ctx->b));
/* keep a local copy of counter mode "key" */
bcopy(ctx->X, X, sizeof (X));
memcpy(X, ctx->X, sizeof (X));
for (i = 0; i * SKEIN_512_BLOCK_BYTES < byteCnt; i++) {
/* build the counter block */
uint64_t tmp = Skein_Swap64((uint64_t)i);
bcopy(&tmp, ctx->b, sizeof (tmp));
*(uint64_t *)ctx->b = Skein_Swap64((uint64_t)i);
Skein_Start_New_Type(ctx, OUT_FINAL);
/* run "counter mode" */
Skein_512_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t));
@ -476,7 +474,7 @@ Skein_512_Final(Skein_512_Ctxt_t *ctx, uint8_t *hashVal)
Skein_Show_Final(512, &ctx->h, n,
hashVal + i * SKEIN_512_BLOCK_BYTES);
/* restore the counter mode key for next time */
bcopy(X, ctx->X, sizeof (X));
memcpy(ctx->X, X, sizeof (X));
}
return (SKEIN_SUCCESS);
}
@ -498,13 +496,13 @@ Skein1024_Init(Skein1024_Ctxt_t *ctx, size_t hashBitLen)
switch (hashBitLen) { /* use pre-computed values, where available */
#ifndef SKEIN_NO_PRECOMP
case 512:
bcopy(SKEIN1024_IV_512, ctx->X, sizeof (ctx->X));
memcpy(ctx->X, SKEIN1024_IV_512, sizeof (ctx->X));
break;
case 384:
bcopy(SKEIN1024_IV_384, ctx->X, sizeof (ctx->X));
memcpy(ctx->X, SKEIN1024_IV_384, sizeof (ctx->X));
break;
case 1024:
bcopy(SKEIN1024_IV_1024, ctx->X, sizeof (ctx->X));
memcpy(ctx->X, SKEIN1024_IV_1024, sizeof (ctx->X));
break;
#endif
default:
@ -522,11 +520,11 @@ Skein1024_Init(Skein1024_Ctxt_t *ctx, size_t hashBitLen)
cfg.w[1] = Skein_Swap64(hashBitLen);
cfg.w[2] = Skein_Swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
/* zero pad config block */
bzero(&cfg.w[3], sizeof (cfg) - 3 * sizeof (cfg.w[0]));
memset(&cfg.w[3], 0, sizeof (cfg) - 3 * sizeof (cfg.w[0]));
/* compute the initial chaining values from config block */
/* zero the chaining variables */
bzero(ctx->X, sizeof (ctx->X));
memset(ctx->X, 0, sizeof (ctx->X));
Skein1024_Process_Block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN);
break;
}
@ -561,7 +559,7 @@ Skein1024_InitExt(Skein1024_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo,
/* compute the initial chaining values ctx->X[], based on key */
if (keyBytes == 0) { /* is there a key? */
/* no key: use all zeroes as key for config block */
bzero(ctx->X, sizeof (ctx->X));
memset(ctx->X, 0, sizeof (ctx->X));
} else { /* here to pre-process a key */
Skein_assert(sizeof (cfg.b) >= sizeof (ctx->X));
/* do a mini-Init right here */
@ -570,12 +568,12 @@ Skein1024_InitExt(Skein1024_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo,
/* set tweaks: T0 = 0; T1 = KEY type */
Skein_Start_New_Type(ctx, KEY);
/* zero the initial chaining variables */
bzero(ctx->X, sizeof (ctx->X));
memset(ctx->X, 0, sizeof (ctx->X));
(void) Skein1024_Update(ctx, key, keyBytes); /* hash the key */
/* put result into cfg.b[] */
(void) Skein1024_Final_Pad(ctx, cfg.b);
/* copy over into ctx->X[] */
bcopy(cfg.b, ctx->X, sizeof (cfg.b));
memcpy(ctx->X, cfg.b, sizeof (cfg.b));
#if SKEIN_NEED_SWAP
{
uint_t i;
@ -592,7 +590,7 @@ Skein1024_InitExt(Skein1024_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo,
ctx->h.hashBitLen = hashBitLen; /* output hash bit count */
Skein_Start_New_Type(ctx, CFG_FINAL);
bzero(&cfg.w, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */
memset(&cfg.w, 0, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */
cfg.w[0] = Skein_Swap64(SKEIN_SCHEMA_VER);
/* hash result length in bits */
cfg.w[1] = Skein_Swap64(hashBitLen);
@ -630,7 +628,7 @@ Skein1024_Update(Skein1024_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt)
if (n) {
/* check on our logic here */
Skein_assert(n < msgByteCnt);
bcopy(msg, &ctx->b[ctx->h.bCnt], n);
memcpy(&ctx->b[ctx->h.bCnt], msg, n);
msgByteCnt -= n;
msg += n;
ctx->h.bCnt += n;
@ -658,7 +656,7 @@ Skein1024_Update(Skein1024_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt)
/* copy any remaining source message data bytes into b[] */
if (msgByteCnt) {
Skein_assert(msgByteCnt + ctx->h.bCnt <= SKEIN1024_BLOCK_BYTES);
bcopy(msg, &ctx->b[ctx->h.bCnt], msgByteCnt);
memcpy(&ctx->b[ctx->h.bCnt], msg, msgByteCnt);
ctx->h.bCnt += msgByteCnt;
}
@ -678,7 +676,7 @@ Skein1024_Final(Skein1024_Ctxt_t *ctx, uint8_t *hashVal)
ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */
/* zero pad b[] if necessary */
if (ctx->h.bCnt < SKEIN1024_BLOCK_BYTES)
bzero(&ctx->b[ctx->h.bCnt],
memset(&ctx->b[ctx->h.bCnt], 0,
SKEIN1024_BLOCK_BYTES - ctx->h.bCnt);
/* process the final block */
@ -690,13 +688,12 @@ Skein1024_Final(Skein1024_Ctxt_t *ctx, uint8_t *hashVal)
/* run Threefish in "counter mode" to generate output */
/* zero out b[], so it can hold the counter */
bzero(ctx->b, sizeof (ctx->b));
memset(ctx->b, 0, sizeof (ctx->b));
/* keep a local copy of counter mode "key" */
bcopy(ctx->X, X, sizeof (X));
memcpy(X, ctx->X, sizeof (X));
for (i = 0; i * SKEIN1024_BLOCK_BYTES < byteCnt; i++) {
/* build the counter block */
uint64_t tmp = Skein_Swap64((uint64_t)i);
bcopy(&tmp, ctx->b, sizeof (tmp));
*(uint64_t *)ctx->b = Skein_Swap64((uint64_t)i);
Skein_Start_New_Type(ctx, OUT_FINAL);
/* run "counter mode" */
Skein1024_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t));
@ -709,7 +706,7 @@ Skein1024_Final(Skein1024_Ctxt_t *ctx, uint8_t *hashVal)
Skein_Show_Final(1024, &ctx->h, n,
hashVal + i * SKEIN1024_BLOCK_BYTES);
/* restore the counter mode key for next time */
bcopy(X, ctx->X, sizeof (X));
memcpy(ctx->X, X, sizeof (X));
}
return (SKEIN_SUCCESS);
}
@ -727,7 +724,7 @@ Skein_256_Final_Pad(Skein_256_Ctxt_t *ctx, uint8_t *hashVal)
ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */
/* zero pad b[] if necessary */
if (ctx->h.bCnt < SKEIN_256_BLOCK_BYTES)
bzero(&ctx->b[ctx->h.bCnt],
memset(&ctx->b[ctx->h.bCnt], 0,
SKEIN_256_BLOCK_BYTES - ctx->h.bCnt);
/* process the final block */
Skein_256_Process_Block(ctx, ctx->b, 1, ctx->h.bCnt);
@ -748,7 +745,7 @@ Skein_512_Final_Pad(Skein_512_Ctxt_t *ctx, uint8_t *hashVal)
ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */
/* zero pad b[] if necessary */
if (ctx->h.bCnt < SKEIN_512_BLOCK_BYTES)
bzero(&ctx->b[ctx->h.bCnt],
memset(&ctx->b[ctx->h.bCnt], 0,
SKEIN_512_BLOCK_BYTES - ctx->h.bCnt);
/* process the final block */
Skein_512_Process_Block(ctx, ctx->b, 1, ctx->h.bCnt);
@ -770,7 +767,7 @@ Skein1024_Final_Pad(Skein1024_Ctxt_t *ctx, uint8_t *hashVal)
ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL;
/* zero pad b[] if necessary */
if (ctx->h.bCnt < SKEIN1024_BLOCK_BYTES)
bzero(&ctx->b[ctx->h.bCnt],
memset(&ctx->b[ctx->h.bCnt], 0,
SKEIN1024_BLOCK_BYTES - ctx->h.bCnt);
/* process the final block */
Skein1024_Process_Block(ctx, ctx->b, 1, ctx->h.bCnt);
@ -798,13 +795,12 @@ Skein_256_Output(Skein_256_Ctxt_t *ctx, uint8_t *hashVal)
/* run Threefish in "counter mode" to generate output */
/* zero out b[], so it can hold the counter */
bzero(ctx->b, sizeof (ctx->b));
memset(ctx->b, 0, sizeof (ctx->b));
/* keep a local copy of counter mode "key" */
bcopy(ctx->X, X, sizeof (X));
memcpy(X, ctx->X, sizeof (X));
for (i = 0; i * SKEIN_256_BLOCK_BYTES < byteCnt; i++) {
/* build the counter block */
uint64_t tmp = Skein_Swap64((uint64_t)i);
bcopy(&tmp, ctx->b, sizeof (tmp));
*(uint64_t *)ctx->b = Skein_Swap64((uint64_t)i);
Skein_Start_New_Type(ctx, OUT_FINAL);
/* run "counter mode" */
Skein_256_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t));
@ -817,7 +813,7 @@ Skein_256_Output(Skein_256_Ctxt_t *ctx, uint8_t *hashVal)
Skein_Show_Final(256, &ctx->h, n,
hashVal + i * SKEIN_256_BLOCK_BYTES);
/* restore the counter mode key for next time */
bcopy(X, ctx->X, sizeof (X));
memcpy(ctx->X, X, sizeof (X));
}
return (SKEIN_SUCCESS);
}
@ -838,13 +834,12 @@ Skein_512_Output(Skein_512_Ctxt_t *ctx, uint8_t *hashVal)
/* run Threefish in "counter mode" to generate output */
/* zero out b[], so it can hold the counter */
bzero(ctx->b, sizeof (ctx->b));
memset(ctx->b, 0, sizeof (ctx->b));
/* keep a local copy of counter mode "key" */
bcopy(ctx->X, X, sizeof (X));
memcpy(X, ctx->X, sizeof (X));
for (i = 0; i * SKEIN_512_BLOCK_BYTES < byteCnt; i++) {
/* build the counter block */
uint64_t tmp = Skein_Swap64((uint64_t)i);
bcopy(&tmp, ctx->b, sizeof (tmp));
*(uint64_t *)ctx->b = Skein_Swap64((uint64_t)i);
Skein_Start_New_Type(ctx, OUT_FINAL);
/* run "counter mode" */
Skein_512_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t));
@ -857,7 +852,7 @@ Skein_512_Output(Skein_512_Ctxt_t *ctx, uint8_t *hashVal)
Skein_Show_Final(256, &ctx->h, n,
hashVal + i * SKEIN_512_BLOCK_BYTES);
/* restore the counter mode key for next time */
bcopy(X, ctx->X, sizeof (X));
memcpy(ctx->X, X, sizeof (X));
}
return (SKEIN_SUCCESS);
}
@ -878,13 +873,12 @@ Skein1024_Output(Skein1024_Ctxt_t *ctx, uint8_t *hashVal)
/* run Threefish in "counter mode" to generate output */
/* zero out b[], so it can hold the counter */
bzero(ctx->b, sizeof (ctx->b));
memset(ctx->b, 0, sizeof (ctx->b));
/* keep a local copy of counter mode "key" */
bcopy(ctx->X, X, sizeof (X));
memcpy(X, ctx->X, sizeof (X));
for (i = 0; i * SKEIN1024_BLOCK_BYTES < byteCnt; i++) {
/* build the counter block */
uint64_t tmp = Skein_Swap64((uint64_t)i);
bcopy(&tmp, ctx->b, sizeof (tmp));
*(uint64_t *)ctx->b = Skein_Swap64((uint64_t)i);
Skein_Start_New_Type(ctx, OUT_FINAL);
/* run "counter mode" */
Skein1024_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t));
@ -897,7 +891,7 @@ Skein1024_Output(Skein1024_Ctxt_t *ctx, uint8_t *hashVal)
Skein_Show_Final(256, &ctx->h, n,
hashVal + i * SKEIN1024_BLOCK_BYTES);
/* restore the counter mode key for next time */
bcopy(X, ctx->X, sizeof (X));
memcpy(ctx->X, X, sizeof (X));
}
return (SKEIN_SUCCESS);
}

View File

@ -50,9 +50,9 @@
#else
/* here for x86 and x86-64 CPUs (and other detected little-endian CPUs) */
#define SKEIN_NEED_SWAP (0)
#define Skein_Put64_LSB_First(dst08, src64, bCnt) bcopy(src64, dst08, bCnt)
#define Skein_Put64_LSB_First(dst08, src64, bCnt) memcpy(dst08, src64, bCnt)
#define Skein_Get64_LSB_First(dst64, src08, wCnt) \
bcopy(src08, dst64, 8 * (wCnt))
memcpy(dst64, src08, 8 * (wCnt))
#endif
#endif /* ifndef SKEIN_NEED_SWAP */

View File

@ -138,7 +138,7 @@ crypto_destroy_ctx_template(crypto_ctx_template_t tmpl)
ASSERT(ctx_tmpl->ct_prov_tmpl != NULL);
bzero(ctx_tmpl->ct_prov_tmpl, ctx_tmpl->ct_size);
memset(ctx_tmpl->ct_prov_tmpl, 0, ctx_tmpl->ct_size);
kmem_free(ctx_tmpl->ct_prov_tmpl, ctx_tmpl->ct_size);
kmem_free(ctx_tmpl, sizeof (kcf_ctx_template_t));
}

View File

@ -250,7 +250,8 @@ kcf_add_mech_provider(short mech_indx,
/* allocate and initialize new kcf_prov_mech_desc */
prov_mech = kmem_zalloc(sizeof (kcf_prov_mech_desc_t), KM_SLEEP);
bcopy(mech_info, &prov_mech->pm_mech_info, sizeof (crypto_mech_info_t));
memcpy(&prov_mech->pm_mech_info, mech_info,
sizeof (crypto_mech_info_t));
prov_mech->pm_prov_desc = prov_desc;
prov_desc->pd_mech_indx[KCF_MECH2CLASS(kcf_mech_type)]
[KCF_MECH2INDEX(kcf_mech_type)] = mech_indx;

View File

@ -70,7 +70,7 @@ crypto_uio_copy_to_data(crypto_data_t *data, uchar_t *buf, int len)
offset, length);
datap = (uchar_t *)(zfs_uio_iovbase(uiop, vec_idx) + offset);
bcopy(buf, datap, cur_len);
memcpy(datap, buf, cur_len);
buf += cur_len;
length -= cur_len;
@ -99,8 +99,8 @@ crypto_put_output_data(uchar_t *buf, crypto_data_t *output, int len)
output->cd_length = len;
return (CRYPTO_BUFFER_TOO_SMALL);
}
bcopy(buf, (uchar_t *)(output->cd_raw.iov_base +
output->cd_offset), len);
memcpy((uchar_t *)(output->cd_raw.iov_base +
output->cd_offset), buf, len);
break;
case CRYPTO_DATA_UIO:

View File

@ -832,7 +832,7 @@ aes_encrypt_atomic(crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext,
crypto_spi_ctx_template_t template)
{
aes_ctx_t aes_ctx; /* on the stack */
aes_ctx_t aes_ctx = {{{{0}}}};
off_t saved_offset;
size_t saved_length;
size_t length_needed;
@ -858,8 +858,6 @@ aes_encrypt_atomic(crypto_mechanism_t *mechanism,
if ((ret = aes_check_mech_param(mechanism, NULL)) != CRYPTO_SUCCESS)
return (ret);
bzero(&aes_ctx, sizeof (aes_ctx_t));
ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key,
KM_SLEEP, B_TRUE);
if (ret != CRYPTO_SUCCESS)
@ -944,7 +942,7 @@ aes_encrypt_atomic(crypto_mechanism_t *mechanism,
out:
if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
memset(aes_ctx.ac_keysched, 0, aes_ctx.ac_keysched_len);
kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
}
#ifdef CAN_USE_GCM_ASM
@ -953,7 +951,7 @@ aes_encrypt_atomic(crypto_mechanism_t *mechanism,
gcm_ctx_t *ctx = (gcm_ctx_t *)&aes_ctx;
bzero(ctx->gcm_Htable, ctx->gcm_htab_len);
memset(ctx->gcm_Htable, 0, ctx->gcm_htab_len);
kmem_free(ctx->gcm_Htable, ctx->gcm_htab_len);
}
#endif
@ -966,7 +964,7 @@ aes_decrypt_atomic(crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext,
crypto_spi_ctx_template_t template)
{
aes_ctx_t aes_ctx; /* on the stack */
aes_ctx_t aes_ctx = {{{{0}}}};
off_t saved_offset;
size_t saved_length;
size_t length_needed;
@ -992,8 +990,6 @@ aes_decrypt_atomic(crypto_mechanism_t *mechanism,
if ((ret = aes_check_mech_param(mechanism, NULL)) != CRYPTO_SUCCESS)
return (ret);
bzero(&aes_ctx, sizeof (aes_ctx_t));
ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key,
KM_SLEEP, B_FALSE);
if (ret != CRYPTO_SUCCESS)
@ -1096,7 +1092,7 @@ aes_decrypt_atomic(crypto_mechanism_t *mechanism,
out:
if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
memset(aes_ctx.ac_keysched, 0, aes_ctx.ac_keysched_len);
kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
}
@ -1113,7 +1109,7 @@ aes_decrypt_atomic(crypto_mechanism_t *mechanism,
if (((gcm_ctx_t *)&aes_ctx)->gcm_Htable != NULL) {
gcm_ctx_t *ctx = (gcm_ctx_t *)&aes_ctx;
bzero(ctx->gcm_Htable, ctx->gcm_htab_len);
memset(ctx->gcm_Htable, 0, ctx->gcm_htab_len);
kmem_free(ctx->gcm_Htable, ctx->gcm_htab_len);
}
#endif
@ -1150,7 +1146,7 @@ aes_create_ctx_template(crypto_mechanism_t *mechanism, crypto_key_t *key,
* in the key.
*/
if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) {
bzero(keysched, size);
memset(keysched, 0, size);
kmem_free(keysched, size);
return (rv);
}
@ -1170,7 +1166,8 @@ aes_free_context(crypto_ctx_t *ctx)
if (aes_ctx != NULL) {
if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
ASSERT(aes_ctx->ac_keysched_len != 0);
bzero(aes_ctx->ac_keysched, aes_ctx->ac_keysched_len);
memset(aes_ctx->ac_keysched, 0,
aes_ctx->ac_keysched_len);
kmem_free(aes_ctx->ac_keysched,
aes_ctx->ac_keysched_len);
}
@ -1260,7 +1257,7 @@ aes_common_init_ctx(aes_ctx_t *aes_ctx, crypto_spi_ctx_template_t *template,
if (rv != CRYPTO_SUCCESS) {
if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
bzero(keysched, size);
memset(keysched, 0, size);
kmem_free(keysched, size);
}
}

View File

@ -46,7 +46,7 @@
(len) = (uint32_t)*((ulong_t *)(m)->cm_param); \
else { \
ulong_t tmp_ulong; \
bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t)); \
memcpy(&tmp_ulong, (m)->cm_param, sizeof (ulong_t)); \
(len) = (uint32_t)tmp_ulong; \
} \
}
@ -309,9 +309,9 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest,
*/
SHA2Final(digest_scratch, sha2_ctx);
bcopy(digest_scratch, (uchar_t *)
memcpy((uchar_t *)
zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset,
digest_len);
digest_scratch, digest_len);
} else {
SHA2Final((uchar_t *)zfs_uio_iovbase(digest->
cd_uio, vec_idx) + offset,
@ -336,8 +336,9 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest,
cur_len =
MIN(zfs_uio_iovlen(digest->cd_uio, vec_idx) -
offset, length);
bcopy(digest_tmp + scratch_offset,
memcpy(
zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset,
digest_tmp + scratch_offset,
cur_len);
length -= cur_len;
@ -630,8 +631,8 @@ sha2_digest_atomic(crypto_mechanism_t *mechanism, crypto_data_t *data,
static void
sha2_mac_init_ctx(sha2_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
{
uint64_t ipad[SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t)];
uint64_t opad[SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t)];
uint64_t ipad[SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t)] = {0};
uint64_t opad[SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t)] = {0};
int i, block_size, blocks_per_int64;
/* Determine the block size */
@ -643,12 +644,12 @@ sha2_mac_init_ctx(sha2_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
blocks_per_int64 = SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t);
}
(void) bzero(ipad, block_size);
(void) bzero(opad, block_size);
(void) memset(ipad, 0, block_size);
(void) memset(opad, 0, block_size);
if (keyval != NULL) {
(void) bcopy(keyval, ipad, length_in_bytes);
(void) bcopy(keyval, opad, length_in_bytes);
(void) memcpy(ipad, keyval, length_in_bytes);
(void) memcpy(opad, keyval, length_in_bytes);
} else {
ASSERT0(length_in_bytes);
}
@ -666,7 +667,6 @@ sha2_mac_init_ctx(sha2_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
/* perform SHA2 on opad */
SHA2Init(ctx->hc_mech_type, &ctx->hc_ocontext);
SHA2Update(&ctx->hc_ocontext, (uint8_t *)opad, block_size);
}
/*
@ -708,7 +708,7 @@ sha2_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
PROV_SHA2_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
if (ctx_template != NULL) {
/* reuse context template */
bcopy(ctx_template, PROV_SHA2_HMAC_CTX(ctx),
memcpy(PROV_SHA2_HMAC_CTX(ctx), ctx_template,
sizeof (sha2_hmac_ctx_t));
} else {
/* no context template, compute context */
@ -746,7 +746,7 @@ sha2_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
}
if (ret != CRYPTO_SUCCESS) {
bzero(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
memset(ctx->cc_provider_private, 0, sizeof (sha2_hmac_ctx_t));
kmem_free(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
ctx->cc_provider_private = NULL;
}
@ -850,8 +850,8 @@ sha2_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac)
*/
SHA2Final(digest,
&PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext);
bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
mac->cd_offset, digest_len);
memcpy((unsigned char *)mac->cd_raw.iov_base +
mac->cd_offset, digest, digest_len);
} else {
SHA2Final((unsigned char *)mac->cd_raw.iov_base +
mac->cd_offset,
@ -872,7 +872,7 @@ sha2_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac)
else
mac->cd_length = 0;
bzero(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
memset(ctx->cc_provider_private, 0, sizeof (sha2_hmac_ctx_t));
kmem_free(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
ctx->cc_provider_private = NULL;
@ -928,7 +928,7 @@ sha2_mac_atomic(crypto_mechanism_t *mechanism,
if (ctx_template != NULL) {
/* reuse context template */
bcopy(ctx_template, &sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t));
memcpy(&sha2_hmac_ctx, ctx_template, sizeof (sha2_hmac_ctx_t));
} else {
sha2_hmac_ctx.hc_mech_type = mechanism->cm_type;
/* no context template, initialize context */
@ -1001,8 +1001,8 @@ sha2_mac_atomic(crypto_mechanism_t *mechanism,
* the user only what was requested.
*/
SHA2Final(digest, &sha2_hmac_ctx.hc_ocontext);
bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
mac->cd_offset, digest_len);
memcpy((unsigned char *)mac->cd_raw.iov_base +
mac->cd_offset, digest, digest_len);
} else {
SHA2Final((unsigned char *)mac->cd_raw.iov_base +
mac->cd_offset, &sha2_hmac_ctx.hc_ocontext);
@ -1021,7 +1021,7 @@ sha2_mac_atomic(crypto_mechanism_t *mechanism,
return (CRYPTO_SUCCESS);
}
bail:
bzero(&sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t));
memset(&sha2_hmac_ctx, 0, sizeof (sha2_hmac_ctx_t));
mac->cd_length = 0;
return (ret);
}
@ -1060,7 +1060,7 @@ sha2_mac_verify_atomic(crypto_mechanism_t *mechanism,
if (ctx_template != NULL) {
/* reuse context template */
bcopy(ctx_template, &sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t));
memcpy(&sha2_hmac_ctx, ctx_template, sizeof (sha2_hmac_ctx_t));
} else {
sha2_hmac_ctx.hc_mech_type = mechanism->cm_type;
/* no context template, initialize context */
@ -1137,7 +1137,7 @@ sha2_mac_verify_atomic(crypto_mechanism_t *mechanism,
switch (mac->cd_format) {
case CRYPTO_DATA_RAW:
if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
if (memcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
mac->cd_offset, digest_len) != 0)
ret = CRYPTO_INVALID_MAC;
break;
@ -1170,7 +1170,7 @@ sha2_mac_verify_atomic(crypto_mechanism_t *mechanism,
cur_len = MIN(zfs_uio_iovlen(mac->cd_uio, vec_idx) -
offset, length);
if (bcmp(digest + scratch_offset,
if (memcmp(digest + scratch_offset,
zfs_uio_iovbase(mac->cd_uio, vec_idx) + offset,
cur_len) != 0) {
ret = CRYPTO_INVALID_MAC;
@ -1191,7 +1191,7 @@ sha2_mac_verify_atomic(crypto_mechanism_t *mechanism,
return (ret);
bail:
bzero(&sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t));
memset(&sha2_hmac_ctx, 0, sizeof (sha2_hmac_ctx_t));
mac->cd_length = 0;
return (ret);
}
@ -1282,7 +1282,7 @@ sha2_free_context(crypto_ctx_t *ctx)
else
ctx_len = sizeof (sha2_hmac_ctx_t);
bzero(ctx->cc_provider_private, ctx_len);
memset(ctx->cc_provider_private, 0, ctx_len);
kmem_free(ctx->cc_provider_private, ctx_len);
ctx->cc_provider_private = NULL;

View File

@ -292,8 +292,8 @@ skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest)
while (vec_idx < zfs_uio_iovcnt(uio) && length > 0) {
cur_len = MIN(zfs_uio_iovlen(uio, vec_idx) - offset,
length);
bcopy(digest_tmp + scratch_offset,
zfs_uio_iovbase(uio, vec_idx) + offset, cur_len);
memcpy(zfs_uio_iovbase(uio, vec_idx) + offset,
digest_tmp + scratch_offset, cur_len);
length -= cur_len;
vec_idx++;
@ -349,7 +349,7 @@ skein_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism)
return (CRYPTO_SUCCESS);
errout:
bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
SKEIN_CTX_LVALUE(ctx) = NULL;
return (error);
@ -376,7 +376,7 @@ skein_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest)
error = skein_update(ctx, data);
if (error != CRYPTO_SUCCESS) {
bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
SKEIN_CTX_LVALUE(ctx) = NULL;
digest->cd_length = 0;
@ -452,7 +452,7 @@ skein_final(crypto_ctx_t *ctx, crypto_data_t *digest)
else
digest->cd_length = 0;
bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*(SKEIN_CTX(ctx))));
SKEIN_CTX_LVALUE(ctx) = NULL;
@ -494,7 +494,7 @@ skein_digest_atomic(crypto_mechanism_t *mechanism, crypto_data_t *data,
CRYPTO_BITS2BYTES(skein_ctx.sc_digest_bitlen);
else
digest->cd_length = 0;
bzero(&skein_ctx, sizeof (skein_ctx));
memset(&skein_ctx, 0, sizeof (skein_ctx));
return (error);
}
@ -543,7 +543,7 @@ skein_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
return (CRYPTO_HOST_MEMORY);
if (ctx_template != NULL) {
bcopy(ctx_template, SKEIN_CTX(ctx),
memcpy(SKEIN_CTX(ctx), ctx_template,
sizeof (*SKEIN_CTX(ctx)));
} else {
error = skein_mac_ctx_build(SKEIN_CTX(ctx), mechanism, key);
@ -553,7 +553,7 @@ skein_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
return (CRYPTO_SUCCESS);
errout:
bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
return (error);
}
@ -573,13 +573,13 @@ skein_mac_atomic(crypto_mechanism_t *mechanism,
crypto_spi_ctx_template_t ctx_template)
{
/* faux crypto context just for skein_digest_{update,final} */
int error;
int error;
crypto_ctx_t ctx;
skein_ctx_t skein_ctx;
SKEIN_CTX_LVALUE(&ctx) = &skein_ctx;
if (ctx_template != NULL) {
bcopy(ctx_template, &skein_ctx, sizeof (skein_ctx));
memcpy(&skein_ctx, ctx_template, sizeof (skein_ctx));
} else {
error = skein_mac_ctx_build(&skein_ctx, mechanism, key);
if (error != CRYPTO_SUCCESS)
@ -593,7 +593,7 @@ skein_mac_atomic(crypto_mechanism_t *mechanism,
return (CRYPTO_SUCCESS);
errout:
bzero(&skein_ctx, sizeof (skein_ctx));
memset(&skein_ctx, 0, sizeof (skein_ctx));
return (error);
}
@ -624,7 +624,7 @@ skein_create_ctx_template(crypto_mechanism_t *mechanism, crypto_key_t *key,
return (CRYPTO_SUCCESS);
errout:
bzero(ctx_tmpl, sizeof (*ctx_tmpl));
memset(ctx_tmpl, 0, sizeof (*ctx_tmpl));
kmem_free(ctx_tmpl, sizeof (*ctx_tmpl));
return (error);
}
@ -636,7 +636,7 @@ static int
skein_free_context(crypto_ctx_t *ctx)
{
if (SKEIN_CTX(ctx) != NULL) {
bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
SKEIN_CTX_LVALUE(ctx) = NULL;
}

View File

@ -203,7 +203,7 @@ nv_mem_zalloc(nvpriv_t *nvp, size_t size)
void *buf;
if ((buf = nva->nva_ops->nv_ao_alloc(nva, size)) != NULL)
bzero(buf, size);
memset(buf, 0, size);
return (buf);
}
@ -219,7 +219,7 @@ nv_mem_free(nvpriv_t *nvp, void *buf, size_t size)
static void
nv_priv_init(nvpriv_t *priv, nv_alloc_t *nva, uint32_t stat)
{
bzero(priv, sizeof (nvpriv_t));
memset(priv, 0, sizeof (nvpriv_t));
priv->nvp_nva = nva;
priv->nvp_stat = stat;
@ -1203,7 +1203,7 @@ nvlist_add_common(nvlist_t *nvl, const char *name,
nvp->nvp_name_sz = name_sz;
nvp->nvp_value_elem = nelem;
nvp->nvp_type = type;
bcopy(name, NVP_NAME(nvp), name_sz);
memcpy(NVP_NAME(nvp), name, name_sz);
switch (type) {
case DATA_TYPE_BOOLEAN:
@ -1217,7 +1217,7 @@ nvlist_add_common(nvlist_t *nvl, const char *name,
buf += nelem * sizeof (uint64_t);
for (i = 0; i < nelem; i++) {
int slen = strlen(strs[i]) + 1;
bcopy(strs[i], buf, slen);
memcpy(buf, strs[i], slen);
cstrs[i] = buf;
buf += slen;
}
@ -1255,7 +1255,7 @@ nvlist_add_common(nvlist_t *nvl, const char *name,
break;
}
default:
bcopy(data, NVP_VALUE(nvp), value_sz);
memcpy(NVP_VALUE(nvp), data, value_sz);
}
/* if unique name, remove before add */
@ -1588,7 +1588,7 @@ nvpair_value_common(const nvpair_t *nvp, data_type_t type, uint_t *nelem,
return (EINVAL);
if ((value_sz = i_get_value_size(type, NULL, 1)) < 0)
return (EINVAL);
bcopy(NVP_VALUE(nvp), data, (size_t)value_sz);
memcpy(data, NVP_VALUE(nvp), (size_t)value_sz);
if (nelem != NULL)
*nelem = 1;
break;
@ -2540,7 +2540,7 @@ nvs_embedded_nvl_array(nvstream_t *nvs, nvpair_t *nvp, size_t *size)
size_t len = nelem * sizeof (uint64_t);
nvlist_t *embedded = (nvlist_t *)((uintptr_t)nvlp + len);
bzero(nvlp, len); /* don't trust packed data */
memset(nvlp, 0, len); /* don't trust packed data */
for (i = 0; i < nelem; i++) {
if (nvs_embedded(nvs, embedded) != 0) {
nvpair_free(nvp);
@ -2820,15 +2820,15 @@ native_cp(nvstream_t *nvs, void *buf, size_t size)
return (EFAULT);
/*
* The bcopy() below eliminates alignment requirement
* The memcpy() below eliminates alignment requirement
* on the buffer (stream) and is preferred over direct access.
*/
switch (nvs->nvs_op) {
case NVS_OP_ENCODE:
bcopy(buf, native->n_curr, size);
memcpy(native->n_curr, buf, size);
break;
case NVS_OP_DECODE:
bcopy(native->n_curr, buf, size);
memcpy(buf, native->n_curr, size);
break;
default:
return (EINVAL);
@ -2895,7 +2895,7 @@ nvs_native_nvl_fini(nvstream_t *nvs)
if (native->n_curr + sizeof (int) > native->n_end)
return (EFAULT);
bzero(native->n_curr, sizeof (int));
memset(native->n_curr, 0, sizeof (int));
native->n_curr += sizeof (int);
}
@ -2912,10 +2912,10 @@ nvpair_native_embedded(nvstream_t *nvs, nvpair_t *nvp)
/*
* Null out the pointer that is meaningless in the packed
* structure. The address may not be aligned, so we have
* to use bzero.
* to use memset.
*/
bzero((char *)packed + offsetof(nvlist_t, nvl_priv),
sizeof (uint64_t));
memset((char *)packed + offsetof(nvlist_t, nvl_priv),
0, sizeof (uint64_t));
}
return (nvs_embedded(nvs, EMBEDDED_NVL(nvp)));
@ -2933,18 +2933,18 @@ nvpair_native_embedded_array(nvstream_t *nvs, nvpair_t *nvp)
/*
* Null out pointers that are meaningless in the packed
* structure. The addresses may not be aligned, so we have
* to use bzero.
* to use memset.
*/
bzero(value, len);
memset(value, 0, len);
for (i = 0; i < NVP_NELEM(nvp); i++, packed++)
/*
* Null out the pointer that is meaningless in the
* packed structure. The address may not be aligned,
* so we have to use bzero.
* so we have to use memset.
*/
bzero((char *)packed + offsetof(nvlist_t, nvl_priv),
sizeof (uint64_t));
memset((char *)packed + offsetof(nvlist_t, nvl_priv),
0, sizeof (uint64_t));
}
return (nvs_embedded_nvl_array(nvs, nvp, NULL));
@ -2961,9 +2961,9 @@ nvpair_native_string_array(nvstream_t *nvs, nvpair_t *nvp)
/*
* Null out pointers that are meaningless in the packed
* structure. The addresses may not be aligned, so we have
* to use bzero.
* to use memset.
*/
bzero(strp, NVP_NELEM(nvp) * sizeof (uint64_t));
memset(strp, 0, NVP_NELEM(nvp) * sizeof (uint64_t));
break;
}
case NVS_OP_DECODE: {
@ -2988,9 +2988,9 @@ nvs_native_nvp_op(nvstream_t *nvs, nvpair_t *nvp)
int ret = 0;
/*
* We do the initial bcopy of the data before we look at
* We do the initial memcpy of the data before we look at
* the nvpair type, because when we're decoding, we won't
* have the correct values for the pair until we do the bcopy.
* have the correct values for the pair until we do the memcpy.
*/
switch (nvs->nvs_op) {
case NVS_OP_ENCODE:
@ -3086,7 +3086,7 @@ nvs_native_nvpair(nvstream_t *nvs, nvpair_t *nvp, size_t *size)
/* try to read the size value from the stream */
if (native->n_curr + sizeof (int32_t) > native->n_end)
return (EFAULT);
bcopy(native->n_curr, &decode_len, sizeof (int32_t));
memcpy(&decode_len, native->n_curr, sizeof (int32_t));
/* sanity check the size value */
if (decode_len < 0 ||
@ -3451,7 +3451,7 @@ nvs_xdr_nvp_op(nvstream_t *nvs, nvpair_t *nvp)
int i;
if (nvs->nvs_op == NVS_OP_DECODE)
bzero(buf, len); /* don't trust packed data */
memset(buf, 0, len); /* don't trust packed data */
for (i = 0; i < nelem; i++) {
if (buflen <= len)

View File

@ -738,7 +738,7 @@ ace_mask_to_mode(uint32_t mask, o_mode_t *modep, boolean_t isdir)
static void
acevals_init(acevals_t *vals, uid_t key)
{
bzero(vals, sizeof (*vals));
memset(vals, 0, sizeof (*vals));
vals->allowed = ACE_MASK_UNDEFINED;
vals->denied = ACE_MASK_UNDEFINED;
vals->mask = ACE_MASK_UNDEFINED;

View File

@ -301,7 +301,7 @@ SHA256_Final(unsigned char digest[static SHA256_DIGEST_LENGTH], SHA256_CTX *ctx)
be32enc_vect(digest, ctx->state, SHA256_DIGEST_LENGTH);
/* Clear the context state */
explicit_bzero(ctx, sizeof (*ctx));
memset(ctx, 0, sizeof (*ctx));
}
/* SHA-224: ******************************************************* */
@ -351,7 +351,7 @@ SHA224_Final(unsigned char digest[static SHA224_DIGEST_LENGTH], SHA224_CTX *ctx)
be32enc_vect(digest, ctx->state, SHA224_DIGEST_LENGTH);
/* Clear the context state */
explicit_bzero(ctx, sizeof (*ctx));
memset(ctx, 0, sizeof (*ctx));
}
#ifdef WEAK_REFS

View File

@ -333,7 +333,7 @@ SHA512_Final(unsigned char digest[static SHA512_DIGEST_LENGTH], SHA512_CTX *ctx)
be64enc_vect(digest, ctx->state, SHA512_DIGEST_LENGTH);
/* Clear the context state */
explicit_bzero(ctx, sizeof (*ctx));
memset(ctx, 0, sizeof (*ctx));
}
/* SHA-512t: ******************************************************** */
@ -377,7 +377,7 @@ SHA512_224_Final(unsigned char digest[static SHA512_224_DIGEST_LENGTH],
be64enc_vect(digest, ctx->state, SHA512_224_DIGEST_LENGTH);
/* Clear the context state */
explicit_bzero(ctx, sizeof (*ctx));
memset(ctx, 0, sizeof (*ctx));
}
void
@ -417,7 +417,7 @@ SHA512_256_Final(unsigned char digest[static SHA512_256_DIGEST_LENGTH],
be64enc_vect(digest, ctx->state, SHA512_256_DIGEST_LENGTH);
/* Clear the context state */
explicit_bzero(ctx, sizeof (*ctx));
memset(ctx, 0, sizeof (*ctx));
}
/* ** SHA-384: ******************************************************** */
@ -467,7 +467,7 @@ SHA384_Final(unsigned char digest[static SHA384_DIGEST_LENGTH], SHA384_CTX *ctx)
be64enc_vect(digest, ctx->state, SHA384_DIGEST_LENGTH);
/* Clear the context state */
explicit_bzero(ctx, sizeof (*ctx));
memset(ctx, 0, sizeof (*ctx));
}
#if 0

View File

@ -40,7 +40,7 @@ struct zfs2bsd {
int zb_bsd;
};
struct zfs2bsd perms[] = {{ACE_READ_DATA, ACL_READ_DATA},
static const struct zfs2bsd perms[] = {{ACE_READ_DATA, ACL_READ_DATA},
{ACE_WRITE_DATA, ACL_WRITE_DATA},
{ACE_EXECUTE, ACL_EXECUTE},
{ACE_APPEND_DATA, ACL_APPEND_DATA},
@ -56,7 +56,7 @@ struct zfs2bsd perms[] = {{ACE_READ_DATA, ACL_READ_DATA},
{ACE_SYNCHRONIZE, ACL_SYNCHRONIZE},
{0, 0}};
struct zfs2bsd flags[] = {{ACE_FILE_INHERIT_ACE,
static const struct zfs2bsd flags[] = {{ACE_FILE_INHERIT_ACE,
ACL_ENTRY_FILE_INHERIT},
{ACE_DIRECTORY_INHERIT_ACE,
ACL_ENTRY_DIRECTORY_INHERIT},
@ -122,7 +122,7 @@ acl_from_aces(struct acl *aclp, const ace_t *aces, int nentries)
return (EINVAL);
}
bzero(aclp, sizeof (*aclp));
memset(aclp, 0, sizeof (*aclp));
aclp->acl_maxcnt = ACL_MAX_ENTRIES;
aclp->acl_cnt = nentries;
@ -177,7 +177,7 @@ aces_from_acl(ace_t *aces, int *nentries, const struct acl *aclp)
const struct acl_entry *entry;
ace_t *ace;
bzero(aces, sizeof (*aces) * aclp->acl_cnt);
memset(aces, 0, sizeof (*aces) * aclp->acl_cnt);
*nentries = aclp->acl_cnt;

View File

@ -85,7 +85,7 @@ vfs_setmntopt(vfs_t *vfsp, const char *name, const char *arg,
} else {
opt->len = strlen(arg) + 1;
opt->value = malloc(opt->len, M_MOUNT, M_WAITOK);
bcopy(arg, opt->value, opt->len);
memcpy(opt->value, arg, opt->len);
}
MNT_ILOCK(vfsp);

View File

@ -141,10 +141,9 @@ int
z_compress_level(void *dest, size_t *destLen, const void *source,
size_t sourceLen, int level)
{
z_stream stream;
z_stream stream = {0};
int err;
bzero(&stream, sizeof (stream));
stream.next_in = (Byte *)source;
stream.avail_in = (uInt)sourceLen;
stream.next_out = dest;
@ -196,11 +195,9 @@ z_compress_level(void *dest, size_t *destLen, const void *source,
int
z_uncompress(void *dest, size_t *destLen, const void *source, size_t sourceLen)
{
z_stream stream;
z_stream stream = {0};
int err;
bzero(&stream, sizeof (stream));
stream.next_in = (Byte *)source;
stream.avail_in = (uInt)sourceLen;
stream.next_out = dest;

View File

@ -184,7 +184,7 @@ zone_dataset_visible(const char *dataset, int *write)
LIST_FOREACH(zd, head, zd_next) {
len = strlen(zd->zd_dataset);
if (strlen(dataset) >= len &&
bcmp(dataset, zd->zd_dataset, len) == 0 &&
memcmp(dataset, zd->zd_dataset, len) == 0 &&
(dataset[len] == '\0' || dataset[len] == '/' ||
dataset[len] == '@')) {
if (write)
@ -206,7 +206,7 @@ zone_dataset_visible(const char *dataset, int *write)
if (dataset[len - 1] == '/')
len--; /* Ignore trailing slash */
if (len < strlen(zd->zd_dataset) &&
bcmp(dataset, zd->zd_dataset, len) == 0 &&
memcmp(dataset, zd->zd_dataset, len) == 0 &&
zd->zd_dataset[len] == '/') {
if (write)
*write = 0;

View File

@ -250,7 +250,7 @@ abd_alloc_zero_scatter(void)
n = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
abd_zero_buf = kmem_cache_alloc(abd_chunk_cache, KM_PUSHPAGE);
bzero(abd_zero_buf, PAGE_SIZE);
memset(abd_zero_buf, 0, PAGE_SIZE);
abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER | ABD_FLAG_ZEROS;

View File

@ -69,11 +69,11 @@ crypto_mac_init(struct hmac_ctx *ctx, const crypto_key_t *c_key)
/*
* This code is based on the similar code in geom/eli/g_eli_hmac.c
*/
explicit_bzero(key, sizeof (key));
memset(key, 0, sizeof (key));
if (c_key->ck_length == 0)
/* do nothing */;
else if (cl_bytes <= SHA512_HMAC_BLOCK_SIZE)
bcopy(c_key->ck_data, key, cl_bytes);
memcpy(key, c_key->ck_data, cl_bytes);
else {
/*
* If key is longer than 128 bytes reset it to
@ -89,16 +89,16 @@ crypto_mac_init(struct hmac_ctx *ctx, const crypto_key_t *c_key)
k_ipad[i] = key[i] ^ 0x36;
k_opad[i] = key[i] ^ 0x5c;
}
explicit_bzero(key, sizeof (key));
memset(key, 0, sizeof (key));
/* Start inner SHA512. */
SHA512_Init(&ctx->innerctx);
SHA512_Update(&ctx->innerctx, k_ipad, sizeof (k_ipad));
explicit_bzero(k_ipad, sizeof (k_ipad));
memset(k_ipad, 0, sizeof (k_ipad));
/* Start outer SHA512. */
SHA512_Init(&ctx->outerctx);
SHA512_Update(&ctx->outerctx, k_opad, sizeof (k_opad));
explicit_bzero(k_opad, sizeof (k_opad));
memset(k_opad, 0, sizeof (k_opad));
}
void
@ -119,12 +119,12 @@ crypto_mac_final(struct hmac_ctx *ctx, void *md, size_t mdsize)
SHA512_Update(&ctx->outerctx, digest, sizeof (digest));
SHA512_Final(digest, &ctx->outerctx);
explicit_bzero(ctx, sizeof (*ctx));
memset(ctx, 0, sizeof (*ctx));
/* mdsize == 0 means "Give me the whole hash!" */
if (mdsize == 0)
mdsize = SHA512_DIGEST_LENGTH;
bcopy(digest, md, mdsize);
explicit_bzero(digest, sizeof (digest));
memcpy(md, digest, mdsize);
memset(digest, 0, sizeof (digest));
}
void
@ -156,7 +156,7 @@ freebsd_crypt_freesession(freebsd_crypt_session_t *sess)
{
mtx_destroy(&sess->fs_lock);
crypto_freesession(sess->fs_sid);
explicit_bzero(sess, sizeof (*sess));
memset(sess, 0, sizeof (*sess));
}
static int
@ -243,7 +243,7 @@ int
freebsd_crypt_newsession(freebsd_crypt_session_t *sessp,
const struct zio_crypt_info *c_info, crypto_key_t *key)
{
struct crypto_session_params csp;
struct crypto_session_params csp = {0};
int error = 0;
#ifdef FCRYPTO_DEBUG
@ -259,7 +259,6 @@ freebsd_crypt_newsession(freebsd_crypt_session_t *sessp,
}
printf("}\n");
#endif
bzero(&csp, sizeof (csp));
csp.csp_mode = CSP_MODE_AEAD;
csp.csp_cipher_key = key->ck_data;
csp.csp_cipher_klen = key->ck_length / 8;
@ -364,7 +363,7 @@ freebsd_crypt_uio(boolean_t encrypt,
crp->crp_payload_length = datalen;
crp->crp_digest_start = auth_len + datalen;
bcopy(ivbuf, crp->crp_iv, ZIO_DATA_IV_LEN);
memcpy(crp->crp_iv, ivbuf, ZIO_DATA_IV_LEN);
error = zfs_crypto_dispatch(session, crp);
crypto_freereq(crp);
out:
@ -384,7 +383,7 @@ int
freebsd_crypt_newsession(freebsd_crypt_session_t *sessp,
const struct zio_crypt_info *c_info, crypto_key_t *key)
{
struct cryptoini cria, crie, *crip;
struct cryptoini cria = {0}, crie = {0}, *crip;
struct enc_xform *xform;
struct auth_hash *xauth;
int error = 0;
@ -452,9 +451,6 @@ freebsd_crypt_newsession(freebsd_crypt_session_t *sessp,
xauth->name, xauth->keysize);
#endif
bzero(&crie, sizeof (crie));
bzero(&cria, sizeof (cria));
crie.cri_alg = xform->type;
crie.cri_key = key->ck_data;
crie.cri_klen = key->ck_length;
@ -466,7 +462,7 @@ freebsd_crypt_newsession(freebsd_crypt_session_t *sessp,
cria.cri_next = &crie;
crie.cri_next = NULL;
crip = &cria;
// Everything else is bzero'd
// Everything else is zero-initialised
error = crypto_newsession(&sid, crip,
CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE);
@ -595,7 +591,7 @@ freebsd_crypt_uio(boolean_t encrypt,
enc_desc->crd_inject = auth_len;
enc_desc->crd_alg = xform->type;
enc_desc->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
bcopy(ivbuf, enc_desc->crd_iv, ZIO_DATA_IV_LEN);
memcpy(enc_desc->crd_iv, ivbuf, ZIO_DATA_IV_LEN);
enc_desc->crd_next = NULL;
#ifdef FCRYPTO_DEBUG

View File

@ -119,7 +119,7 @@ dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
db->db_offset + bufoff);
thiscpy = MIN(PAGESIZE, tocpy - copied);
va = zfs_map_page(*ma, &sf);
bcopy(va, (char *)db->db_data + bufoff, thiscpy);
memcpy((char *)db->db_data + bufoff, va, thiscpy);
zfs_unmap_page(sf);
ma += 1;
bufoff += PAGESIZE;
@ -189,7 +189,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
ASSERT3U(db->db_size, >, PAGE_SIZE);
bufoff = IDX_TO_OFF(m->pindex) % db->db_size;
va = zfs_map_page(m, &sf);
bcopy((char *)db->db_data + bufoff, va, PAGESIZE);
memcpy(va, (char *)db->db_data + bufoff, PAGESIZE);
zfs_unmap_page(sf);
vm_page_valid(m);
dmu_page_lock(m);
@ -231,7 +231,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
tocpy = MIN(db->db_size - bufoff, PAGESIZE - pgoff);
ASSERT3S(tocpy, >=, 0);
if (m != bogus_page)
bcopy((char *)db->db_data + bufoff, va + pgoff, tocpy);
memcpy(va + pgoff, (char *)db->db_data + bufoff, tocpy);
pgoff += tocpy;
ASSERT3S(pgoff, >=, 0);
@ -287,7 +287,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
#endif
if (pgoff != 0) {
ASSERT3P(m, !=, bogus_page);
bzero(va + pgoff, PAGESIZE - pgoff);
memset(va + pgoff, 0, PAGESIZE - pgoff);
zfs_unmap_page(sf);
vm_page_valid(m);
}
@ -309,11 +309,11 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
bufoff = IDX_TO_OFF(m->pindex) % db->db_size;
tocpy = MIN(db->db_size - bufoff, PAGESIZE);
va = zfs_map_page(m, &sf);
bcopy((char *)db->db_data + bufoff, va, tocpy);
memcpy(va, (char *)db->db_data + bufoff, tocpy);
if (tocpy < PAGESIZE) {
ASSERT3S(i, ==, *rahead - 1);
ASSERT3U((db->db_size & PAGE_MASK), !=, 0);
bzero(va + tocpy, PAGESIZE - tocpy);
memset(va + tocpy, 0, PAGESIZE - tocpy);
}
zfs_unmap_page(sf);
vm_page_valid(m);

View File

@ -63,7 +63,7 @@ hkdf_sha512_expand(uint8_t *extract_key, uint8_t *info, uint_t info_len,
crypto_mac_update(&ctx, info, info_len);
crypto_mac_update(&ctx, &c, 1);
crypto_mac_final(&ctx, T, SHA512_DIGEST_LENGTH);
bcopy(T, out_buf + pos,
memcpy(out_buf + pos, T,
(i != N) ? SHA512_DIGEST_LENGTH : (out_len - pos));
pos += SHA512_DIGEST_LENGTH;
}

View File

@ -689,10 +689,10 @@ zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, vtype_t obj_type, zfs_acl_t *aclp,
zobjacep = (zfs_object_ace_t *)aceptr;
aceobjp = (ace_object_t *)acep;
bcopy(aceobjp->a_obj_type, zobjacep->z_object_type,
memcpy(zobjacep->z_object_type, aceobjp->a_obj_type,
sizeof (aceobjp->a_obj_type));
bcopy(aceobjp->a_inherit_obj_type,
zobjacep->z_inherit_type,
memcpy(zobjacep->z_inherit_type,
aceobjp->a_inherit_obj_type,
sizeof (aceobjp->a_inherit_obj_type));
acep = (ace_t *)((caddr_t)acep + sizeof (ace_object_t));
break;
@ -739,11 +739,11 @@ zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr,
}
zobjacep = (zfs_object_ace_t *)zacep;
objacep = (ace_object_t *)acep;
bcopy(zobjacep->z_object_type,
objacep->a_obj_type,
memcpy(objacep->a_obj_type,
zobjacep->z_object_type,
sizeof (zobjacep->z_object_type));
bcopy(zobjacep->z_inherit_type,
objacep->a_inherit_obj_type,
memcpy(objacep->a_inherit_obj_type,
zobjacep->z_inherit_type,
sizeof (zobjacep->z_inherit_type));
ace_size = sizeof (ace_object_t);
break;
@ -1094,7 +1094,7 @@ zfs_acl_node_read(znode_t *zp, boolean_t have_lock, zfs_acl_t **aclpp,
znode_acl.z_acl_extern_obj, 0, aclnode->z_size,
aclnode->z_acldata, DMU_READ_PREFETCH);
} else {
bcopy(znode_acl.z_ace_data, aclnode->z_acldata,
memcpy(aclnode->z_acldata, znode_acl.z_ace_data,
aclnode->z_size);
}
} else {
@ -1282,7 +1282,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
bcopy(aclnode->z_acldata, start,
memcpy(start, aclnode->z_acldata,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
@ -1564,7 +1564,7 @@ zfs_acl_inherit(zfsvfs_t *zfsvfs, vtype_t vtype, zfs_acl_t *paclp,
if ((data1sz = paclp->z_ops->ace_data(pacep, &data1)) != 0) {
data2sz = aclp->z_ops->ace_data(acep, &data2);
VERIFY3U(data2sz, ==, data1sz);
bcopy(data1, data2, data2sz);
memcpy(data2, data1, data2sz);
}
aclp->z_acl_count++;
@ -1633,7 +1633,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
ASSERT_VOP_ELOCKED(ZTOV(dzp), __func__);
} else
ASSERT3P(dzp->z_vnode, ==, NULL);
bzero(acl_ids, sizeof (zfs_acl_ids_t));
memset(acl_ids, 0, sizeof (zfs_acl_ids_t));
acl_ids->z_mode = MAKEIMODE(vap->va_type, vap->va_mode);
if (vsecp)
@ -1847,7 +1847,7 @@ zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
bcopy(aclnode->z_acldata, start,
memcpy(start, aclnode->z_acldata,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}

View File

@ -721,7 +721,7 @@ zfsctl_root_vptocnp(struct vop_vptocnp_args *ap)
VOP_UNLOCK1(dvp);
*ap->a_vpp = dvp;
*ap->a_buflen -= sizeof (dotzfs_name);
bcopy(dotzfs_name, ap->a_buf + *ap->a_buflen, sizeof (dotzfs_name));
memcpy(ap->a_buf + *ap->a_buflen, dotzfs_name, sizeof (dotzfs_name));
return (0);
}
@ -1214,7 +1214,7 @@ zfsctl_snapshot_vptocnp(struct vop_vptocnp_args *ap)
VOP_UNLOCK1(dvp);
*ap->a_vpp = dvp;
*ap->a_buflen -= len;
bcopy(node->sn_name, ap->a_buf + *ap->a_buflen, len);
memcpy(ap->a_buf + *ap->a_buflen, node->sn_name, len);
}
vfs_unbusy(mp);
#if __FreeBSD_version >= 1300045

View File

@ -539,7 +539,7 @@ mappedread_sf(znode_t *zp, int nbytes, zfs_uio_t *uio)
error = dmu_read(os, zp->z_id, start, bytes, va,
DMU_READ_PREFETCH);
if (bytes != PAGESIZE && error == 0)
bzero(va + bytes, PAGESIZE - bytes);
memset(va + bytes, 0, PAGESIZE - bytes);
zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj);
#if __FreeBSD_version >= 1300081
@ -5273,7 +5273,7 @@ zfs_create_attrname(int attrnamespace, const char *name, char *attrname,
{
const char *namespace, *prefix, *suffix;
bzero(attrname, size);
memset(attrname, 0, size);
switch (attrnamespace) {
case EXTATTR_NAMESPACE_USER:
@ -6142,7 +6142,7 @@ zfs_vptocnp(struct vop_vptocnp_args *ap)
}
if (error == 0) {
*ap->a_buflen -= len;
bcopy(name, ap->a_buf + *ap->a_buflen, len);
memcpy(ap->a_buf + *ap->a_buflen, name, len);
*ap->a_vpp = ZTOV(dzp);
}
ZFS_EXIT(zfsvfs);

View File

@ -1975,7 +1975,7 @@ zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
complen = strlen(component);
path -= complen;
ASSERT3P(path, >=, buf);
bcopy(component, path, complen);
memcpy(path, component, complen);
obj = pobj;
if (sa_hdl != hdl) {

View File

@ -211,10 +211,10 @@ zio_crypt_key_destroy_early(zio_crypt_key_t *key)
rw_destroy(&key->zk_salt_lock);
/* free crypto templates */
bzero(&key->zk_session, sizeof (key->zk_session));
memset(&key->zk_session, 0, sizeof (key->zk_session));
/* zero out sensitive data */
bzero(key, sizeof (zio_crypt_key_t));
memset(key, 0, sizeof (zio_crypt_key_t));
}
void
@ -242,7 +242,7 @@ zio_crypt_key_init(uint64_t crypt, zio_crypt_key_t *key)
return (ENOTSUP);
keydata_len = zio_crypt_table[crypt].ci_keylen;
bzero(key, sizeof (zio_crypt_key_t));
memset(key, 0, sizeof (zio_crypt_key_t));
rw_init(&key->zk_salt_lock, NULL, RW_DEFAULT, NULL);
/* fill keydata buffers and salt with random data */
@ -324,7 +324,7 @@ zio_crypt_key_change_salt(zio_crypt_key_t *key)
goto out_unlock;
/* assign the salt and reset the usage count */
bcopy(salt, key->zk_salt, ZIO_DATA_SALT_LEN);
memcpy(key->zk_salt, salt, ZIO_DATA_SALT_LEN);
key->zk_salt_count = 0;
freebsd_crypt_freesession(&key->zk_session);
@ -352,7 +352,7 @@ zio_crypt_key_get_salt(zio_crypt_key_t *key, uint8_t *salt)
rw_enter(&key->zk_salt_lock, RW_READER);
bcopy(key->zk_salt, salt, ZIO_DATA_SALT_LEN);
memcpy(salt, key->zk_salt, ZIO_DATA_SALT_LEN);
salt_change = (atomic_inc_64_nv(&key->zk_salt_count) >=
ZFS_CURRENT_MAX_SALT_USES);
@ -450,9 +450,8 @@ zio_crypt_key_wrap(crypto_key_t *cwkey, zio_crypt_key_t *key, uint8_t *iv,
* the plain text (source) to the cipher buffer (dest).
* We set iovecs[0] -- the authentication data -- below.
*/
bcopy((void*)key->zk_master_keydata, keydata_out, keydata_len);
bcopy((void*)key->zk_hmac_keydata, hmac_keydata_out,
SHA512_HMAC_KEYLEN);
memcpy(keydata_out, key->zk_master_keydata, keydata_len);
memcpy(hmac_keydata_out, key->zk_hmac_keydata, SHA512_HMAC_KEYLEN);
iovecs[1].iov_base = keydata_out;
iovecs[1].iov_len = keydata_len;
iovecs[2].iov_base = hmac_keydata_out;
@ -529,12 +528,11 @@ zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version,
*/
dst = key->zk_master_keydata;
src = keydata;
bcopy(src, dst, keydata_len);
memcpy(dst, src, keydata_len);
dst = key->zk_hmac_keydata;
src = hmac_keydata;
bcopy(src, dst, SHA512_HMAC_KEYLEN);
memcpy(dst, src, SHA512_HMAC_KEYLEN);
iovecs[1].iov_base = key->zk_master_keydata;
iovecs[1].iov_len = keydata_len;
@ -618,7 +616,7 @@ zio_crypt_generate_iv(uint8_t *ivbuf)
return (0);
error:
bzero(ivbuf, ZIO_DATA_IV_LEN);
memset(ivbuf, 0, ZIO_DATA_IV_LEN);
return (ret);
}
@ -633,7 +631,7 @@ zio_crypt_do_hmac(zio_crypt_key_t *key, uint8_t *data, uint_t datalen,
crypto_mac(&key->zk_hmac_key, data, datalen,
raw_digestbuf, SHA512_DIGEST_LENGTH);
bcopy(raw_digestbuf, digestbuf, digestlen);
memcpy(digestbuf, raw_digestbuf, digestlen);
return (0);
}
@ -650,8 +648,8 @@ zio_crypt_generate_iv_salt_dedup(zio_crypt_key_t *key, uint8_t *data,
if (ret != 0)
return (ret);
bcopy(digestbuf, salt, ZIO_DATA_SALT_LEN);
bcopy(digestbuf + ZIO_DATA_SALT_LEN, ivbuf, ZIO_DATA_IV_LEN);
memcpy(salt, digestbuf, ZIO_DATA_SALT_LEN);
memcpy(ivbuf, digestbuf + ZIO_DATA_SALT_LEN, ZIO_DATA_IV_LEN);
return (0);
}
@ -674,18 +672,18 @@ zio_crypt_encode_params_bp(blkptr_t *bp, uint8_t *salt, uint8_t *iv)
ASSERT(BP_IS_ENCRYPTED(bp));
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(salt, &bp->blk_dva[2].dva_word[0], sizeof (uint64_t));
bcopy(iv, &bp->blk_dva[2].dva_word[1], sizeof (uint64_t));
bcopy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
memcpy(&bp->blk_dva[2].dva_word[0], salt, sizeof (uint64_t));
memcpy(&bp->blk_dva[2].dva_word[1], iv, sizeof (uint64_t));
memcpy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
BP_SET_IV2(bp, val32);
} else {
bcopy(salt, &val64, sizeof (uint64_t));
memcpy(&val64, salt, sizeof (uint64_t));
bp->blk_dva[2].dva_word[0] = BSWAP_64(val64);
bcopy(iv, &val64, sizeof (uint64_t));
memcpy(&val64, iv, sizeof (uint64_t));
bp->blk_dva[2].dva_word[1] = BSWAP_64(val64);
bcopy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
memcpy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
BP_SET_IV2(bp, BSWAP_32(val32));
}
}
@ -700,26 +698,26 @@ zio_crypt_decode_params_bp(const blkptr_t *bp, uint8_t *salt, uint8_t *iv)
/* for convenience, so callers don't need to check */
if (BP_IS_AUTHENTICATED(bp)) {
bzero(salt, ZIO_DATA_SALT_LEN);
bzero(iv, ZIO_DATA_IV_LEN);
memset(salt, 0, ZIO_DATA_SALT_LEN);
memset(iv, 0, ZIO_DATA_IV_LEN);
return;
}
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(&bp->blk_dva[2].dva_word[0], salt, sizeof (uint64_t));
bcopy(&bp->blk_dva[2].dva_word[1], iv, sizeof (uint64_t));
memcpy(salt, &bp->blk_dva[2].dva_word[0], sizeof (uint64_t));
memcpy(iv, &bp->blk_dva[2].dva_word[1], sizeof (uint64_t));
val32 = (uint32_t)BP_GET_IV2(bp);
bcopy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
memcpy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
} else {
val64 = BSWAP_64(bp->blk_dva[2].dva_word[0]);
bcopy(&val64, salt, sizeof (uint64_t));
memcpy(salt, &val64, sizeof (uint64_t));
val64 = BSWAP_64(bp->blk_dva[2].dva_word[1]);
bcopy(&val64, iv, sizeof (uint64_t));
memcpy(iv, &val64, sizeof (uint64_t));
val32 = BSWAP_32((uint32_t)BP_GET_IV2(bp));
bcopy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
memcpy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
}
}
@ -732,14 +730,14 @@ zio_crypt_encode_mac_bp(blkptr_t *bp, uint8_t *mac)
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_OBJSET);
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(mac, &bp->blk_cksum.zc_word[2], sizeof (uint64_t));
bcopy(mac + sizeof (uint64_t), &bp->blk_cksum.zc_word[3],
memcpy(&bp->blk_cksum.zc_word[2], mac, sizeof (uint64_t));
memcpy(&bp->blk_cksum.zc_word[3], mac + sizeof (uint64_t),
sizeof (uint64_t));
} else {
bcopy(mac, &val64, sizeof (uint64_t));
memcpy(&val64, mac, sizeof (uint64_t));
bp->blk_cksum.zc_word[2] = BSWAP_64(val64);
bcopy(mac + sizeof (uint64_t), &val64, sizeof (uint64_t));
memcpy(&val64, mac + sizeof (uint64_t), sizeof (uint64_t));
bp->blk_cksum.zc_word[3] = BSWAP_64(val64);
}
}
@ -753,20 +751,20 @@ zio_crypt_decode_mac_bp(const blkptr_t *bp, uint8_t *mac)
/* for convenience, so callers don't need to check */
if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
bzero(mac, ZIO_DATA_MAC_LEN);
memset(mac, 0, ZIO_DATA_MAC_LEN);
return;
}
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(&bp->blk_cksum.zc_word[2], mac, sizeof (uint64_t));
bcopy(&bp->blk_cksum.zc_word[3], mac + sizeof (uint64_t),
memcpy(mac, &bp->blk_cksum.zc_word[2], sizeof (uint64_t));
memcpy(mac + sizeof (uint64_t), &bp->blk_cksum.zc_word[3],
sizeof (uint64_t));
} else {
val64 = BSWAP_64(bp->blk_cksum.zc_word[2]);
bcopy(&val64, mac, sizeof (uint64_t));
memcpy(mac, &val64, sizeof (uint64_t));
val64 = BSWAP_64(bp->blk_cksum.zc_word[3]);
bcopy(&val64, mac + sizeof (uint64_t), sizeof (uint64_t));
memcpy(mac + sizeof (uint64_t), &val64, sizeof (uint64_t));
}
}
@ -775,8 +773,8 @@ zio_crypt_encode_mac_zil(void *data, uint8_t *mac)
{
zil_chain_t *zilc = data;
bcopy(mac, &zilc->zc_eck.zec_cksum.zc_word[2], sizeof (uint64_t));
bcopy(mac + sizeof (uint64_t), &zilc->zc_eck.zec_cksum.zc_word[3],
memcpy(&zilc->zc_eck.zec_cksum.zc_word[2], mac, sizeof (uint64_t));
memcpy(&zilc->zc_eck.zec_cksum.zc_word[3], mac + sizeof (uint64_t),
sizeof (uint64_t));
}
@ -790,8 +788,8 @@ zio_crypt_decode_mac_zil(const void *data, uint8_t *mac)
*/
const zil_chain_t *zilc = data;
bcopy(&zilc->zc_eck.zec_cksum.zc_word[2], mac, sizeof (uint64_t));
bcopy(&zilc->zc_eck.zec_cksum.zc_word[3], mac + sizeof (uint64_t),
memcpy(mac, &zilc->zc_eck.zec_cksum.zc_word[2], sizeof (uint64_t));
memcpy(mac + sizeof (uint64_t), &zilc->zc_eck.zec_cksum.zc_word[3],
sizeof (uint64_t));
}
@ -818,7 +816,7 @@ zio_crypt_copy_dnode_bonus(abd_t *src_abd, uint8_t *dst, uint_t datalen)
if (dnp->dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) &&
dnp->dn_bonuslen != 0) {
bcopy(DN_BONUS(dnp), DN_BONUS(&ddnp[i]),
memcpy(DN_BONUS(&ddnp[i]), DN_BONUS(dnp),
DN_MAX_BONUS_LEN(dnp));
}
}
@ -946,7 +944,7 @@ zio_crypt_bp_do_aad_updates(uint8_t **aadp, uint_t *aad_len, uint64_t version,
blkptr_auth_buf_t bab;
zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
bcopy(&bab, *aadp, bab_len);
memcpy(*aadp, &bab, bab_len);
*aadp += bab_len;
*aad_len += bab_len;
}
@ -961,7 +959,7 @@ zio_crypt_do_dnode_hmac_updates(crypto_context_t ctx, uint64_t version,
uint8_t tmp_dncore[offsetof(dnode_phys_t, dn_blkptr)];
/* authenticate the core dnode (masking out non-portable bits) */
bcopy(dnp, tmp_dncore, sizeof (tmp_dncore));
memcpy(tmp_dncore, dnp, sizeof (tmp_dncore));
adnp = (dnode_phys_t *)tmp_dncore;
if (le_bswap) {
adnp->dn_datablkszsec = BSWAP_16(adnp->dn_datablkszsec);
@ -1057,7 +1055,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
crypto_mac_final(ctx, raw_portable_mac, SHA512_DIGEST_LENGTH);
bcopy(raw_portable_mac, portable_mac, ZIO_OBJSET_MAC_LEN);
memcpy(portable_mac, raw_portable_mac, ZIO_OBJSET_MAC_LEN);
/*
* This is necessary here as we check next whether
@ -1086,7 +1084,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
osp->os_groupused_dnode.dn_type == DMU_OT_NONE) ||
(datalen <= OBJSET_PHYS_SIZE_V1)) {
bzero(local_mac, ZIO_OBJSET_MAC_LEN);
memset(local_mac, 0, ZIO_OBJSET_MAC_LEN);
return (0);
}
@ -1129,13 +1127,13 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
crypto_mac_final(ctx, raw_local_mac, SHA512_DIGEST_LENGTH);
bcopy(raw_local_mac, local_mac, ZIO_OBJSET_MAC_LEN);
memcpy(local_mac, raw_local_mac, ZIO_OBJSET_MAC_LEN);
return (0);
error:
bzero(portable_mac, ZIO_OBJSET_MAC_LEN);
bzero(local_mac, ZIO_OBJSET_MAC_LEN);
memset(portable_mac, 0, ZIO_OBJSET_MAC_LEN);
memset(local_mac, 0, ZIO_OBJSET_MAC_LEN);
return (ret);
}
@ -1172,11 +1170,11 @@ zio_crypt_do_indirect_mac_checksum_impl(boolean_t generate, void *buf,
SHA2Final(digestbuf, &ctx);
if (generate) {
bcopy(digestbuf, cksum, ZIO_DATA_MAC_LEN);
memcpy(cksum, digestbuf, ZIO_DATA_MAC_LEN);
return (0);
}
if (bcmp(digestbuf, cksum, ZIO_DATA_MAC_LEN) != 0) {
if (memcmp(digestbuf, cksum, ZIO_DATA_MAC_LEN) != 0) {
#ifdef FCRYPTO_DEBUG
printf("%s(%d): Setting ECKSUM\n", __FUNCTION__, __LINE__);
#endif
@ -1264,7 +1262,7 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
src = cipherbuf;
dst = plainbuf;
}
bcopy(src, dst, datalen);
memcpy(dst, src, datalen);
/* Find the start and end record of the log block. */
zilc = (zil_chain_t *)src;
@ -1303,7 +1301,7 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
* the embedded checksum will not have been calculated yet, so we don't
* authenticate that.
*/
bcopy(src, aadp, sizeof (zil_chain_t) - sizeof (zio_eck_t));
memcpy(aadp, src, sizeof (zil_chain_t) - sizeof (zio_eck_t));
aadp += sizeof (zil_chain_t) - sizeof (zio_eck_t);
aad_len += sizeof (zil_chain_t) - sizeof (zio_eck_t);
@ -1329,8 +1327,8 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
}
/* copy the common lr_t */
bcopy(slrp, dlrp, sizeof (lr_t));
bcopy(slrp, aadp, sizeof (lr_t));
memcpy(dlrp, slrp, sizeof (lr_t));
memcpy(aadp, slrp, sizeof (lr_t));
aadp += sizeof (lr_t);
aad_len += sizeof (lr_t);
@ -1347,11 +1345,12 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
dst_iovecs[vec].iov_len = crypt_len;
/* copy the bp now since it will not be encrypted */
bcopy(slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
dlrp + sizeof (lr_write_t) - sizeof (blkptr_t),
memcpy(dlrp + sizeof (lr_write_t) - sizeof (blkptr_t),
slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
sizeof (blkptr_t));
memcpy(aadp,
slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
sizeof (blkptr_t));
bcopy(slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
aadp, sizeof (blkptr_t));
aadp += sizeof (blkptr_t);
aad_len += sizeof (blkptr_t);
vec++;
@ -1419,7 +1418,7 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
src = cipherbuf;
dst = plainbuf;
}
bcopy(src, dst, datalen);
memcpy(dst, src, datalen);
sdnp = (dnode_phys_t *)src;
ddnp = (dnode_phys_t *)dst;
@ -1462,10 +1461,11 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
dnp = &sdnp[i];
/* copy over the core fields and blkptrs (kept as plaintext) */
bcopy(dnp, &ddnp[i], (uint8_t *)DN_BONUS(dnp) - (uint8_t *)dnp);
memcpy(&ddnp[i], dnp,
(uint8_t *)DN_BONUS(dnp) - (uint8_t *)dnp);
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
bcopy(DN_SPILL_BLKPTR(dnp), DN_SPILL_BLKPTR(&ddnp[i]),
memcpy(DN_SPILL_BLKPTR(&ddnp[i]), DN_SPILL_BLKPTR(dnp),
sizeof (blkptr_t));
}
@ -1480,7 +1480,7 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
* authenticated data.
*/
crypt_len = offsetof(dnode_phys_t, dn_blkptr);
bcopy(dnp, aadp, crypt_len);
memcpy(aadp, dnp, crypt_len);
adnp = (dnode_phys_t *)aadp;
adnp->dn_flags &= DNODE_CRYPT_PORTABLE_FLAGS_MASK;
adnp->dn_used = 0;
@ -1517,8 +1517,8 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
vec++;
total_len += crypt_len;
} else {
bcopy(DN_BONUS(dnp), DN_BONUS(&ddnp[i]), crypt_len);
bcopy(DN_BONUS(dnp), aadp, crypt_len);
memcpy(DN_BONUS(&ddnp[i]), DN_BONUS(dnp), crypt_len);
memcpy(aadp, DN_BONUS(dnp), crypt_len);
aadp += crypt_len;
aad_len += crypt_len;
}
@ -1561,7 +1561,7 @@ zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf,
ret = SET_ERROR(ENOMEM);
goto error;
}
bzero(cipher_iovecs, nr_cipher * sizeof (iovec_t));
memset(cipher_iovecs, 0, nr_cipher * sizeof (iovec_t));
if (encrypt) {
src = plainbuf;
@ -1570,7 +1570,7 @@ zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf,
src = cipherbuf;
dst = plainbuf;
}
bcopy(src, dst, datalen);
memcpy(dst, src, datalen);
cipher_iovecs[0].iov_base = dst;
cipher_iovecs[0].iov_len = datalen;
@ -1678,8 +1678,8 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
zfs_uio_init(&puio, &puio_s);
zfs_uio_init(&cuio, &cuio_s);
bzero(GET_UIO_STRUCT(&puio), sizeof (struct uio));
bzero(GET_UIO_STRUCT(&cuio), sizeof (struct uio));
memset(GET_UIO_STRUCT(&puio), 0, sizeof (struct uio));
memset(GET_UIO_STRUCT(&cuio), 0, sizeof (struct uio));
#ifdef FCRYPTO_DEBUG
printf("%s(%s, %p, %p, %d, %p, %p, %u, %s, %p, %p, %p)\n",
@ -1710,7 +1710,7 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
rw_enter(&key->zk_salt_lock, RW_READER);
locked = B_TRUE;
if (bcmp(salt, key->zk_salt, ZIO_DATA_SALT_LEN) == 0) {
if (memcmp(salt, key->zk_salt, ZIO_DATA_SALT_LEN) == 0) {
ckey = &key->zk_current_key;
tmpl = &key->zk_session;
} else {
@ -1741,7 +1741,7 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
if (authbuf != NULL)
zio_buf_free(authbuf, datalen);
if (ckey == &tmp_ckey)
bzero(enc_keydata, keydata_len);
memset(enc_keydata, 0, keydata_len);
zio_crypt_destroy_uio(&puio);
zio_crypt_destroy_uio(&cuio);
@ -1753,14 +1753,14 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
kmem_free(failed_decrypt_buf, failed_decrypt_size);
failed_decrypt_buf = kmem_alloc(datalen, KM_SLEEP);
failed_decrypt_size = datalen;
bcopy(cipherbuf, failed_decrypt_buf, datalen);
memcpy(failed_decrypt_buf, cipherbuf, datalen);
}
if (locked)
rw_exit(&key->zk_salt_lock);
if (authbuf != NULL)
zio_buf_free(authbuf, datalen);
if (ckey == &tmp_ckey)
bzero(enc_keydata, keydata_len);
memset(enc_keydata, 0, keydata_len);
zio_crypt_destroy_uio(&puio);
zio_crypt_destroy_uio(&cuio);
return (SET_ERROR(ret));

View File

@ -771,7 +771,6 @@ spl_init(void)
{
int rc = 0;
bzero(&p0, sizeof (proc_t));
spl_random_init();
if ((rc = spl_kvmem_init()))

View File

@ -367,7 +367,7 @@ qat_crypt(qat_encrypt_dir_t dir, uint8_t *src_buf, uint8_t *dst_buf,
aad_len);
if (status != CPA_STATUS_SUCCESS)
goto fail;
bcopy(aad_buf, op_data.pAdditionalAuthData, aad_len);
memcpy(op_data.pAdditionalAuthData, aad_buf, aad_len);
}
bytes_left = enc_len;
@ -413,10 +413,10 @@ qat_crypt(qat_encrypt_dir_t dir, uint8_t *src_buf, uint8_t *dst_buf,
op_data.messageLenToHashInBytes = 0;
op_data.messageLenToCipherInBytes = enc_len;
op_data.ivLenInBytes = ZIO_DATA_IV_LEN;
bcopy(iv_buf, op_data.pIv, ZIO_DATA_IV_LEN);
memcpy(op_data.pIv, iv_buf, ZIO_DATA_IV_LEN);
/* if dir is QAT_DECRYPT, copy digest_buf to pDigestResult */
if (dir == QAT_DECRYPT)
bcopy(digest_buf, op_data.pDigestResult, ZIO_DATA_MAC_LEN);
memcpy(op_data.pDigestResult, digest_buf, ZIO_DATA_MAC_LEN);
cb.verify_result = CPA_FALSE;
init_completion(&cb.complete);
@ -435,7 +435,7 @@ qat_crypt(qat_encrypt_dir_t dir, uint8_t *src_buf, uint8_t *dst_buf,
if (dir == QAT_ENCRYPT) {
/* if dir is QAT_ENCRYPT, save pDigestResult to digest_buf */
bcopy(op_data.pDigestResult, digest_buf, ZIO_DATA_MAC_LEN);
memcpy(digest_buf, op_data.pDigestResult, ZIO_DATA_MAC_LEN);
QAT_STAT_INCR(encrypt_total_out_bytes, enc_len);
} else {
QAT_STAT_INCR(decrypt_total_out_bytes, enc_len);
@ -557,7 +557,7 @@ qat_checksum(uint64_t cksum, uint8_t *buf, uint64_t size, zio_cksum_t *zcp)
goto fail;
}
bcopy(digest_buffer, zcp, sizeof (zio_cksum_t));
memcpy(zcp, digest_buffer, sizeof (zio_cksum_t));
fail:
if (status != CPA_STATUS_SUCCESS)

View File

@ -687,10 +687,10 @@ zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, umode_t obj_mode, zfs_acl_t *aclp,
zobjacep = (zfs_object_ace_t *)aceptr;
aceobjp = (ace_object_t *)acep;
bcopy(aceobjp->a_obj_type, zobjacep->z_object_type,
memcpy(zobjacep->z_object_type, aceobjp->a_obj_type,
sizeof (aceobjp->a_obj_type));
bcopy(aceobjp->a_inherit_obj_type,
zobjacep->z_inherit_type,
memcpy(zobjacep->z_inherit_type,
aceobjp->a_inherit_obj_type,
sizeof (aceobjp->a_inherit_obj_type));
acep = (ace_t *)((caddr_t)acep + sizeof (ace_object_t));
break;
@ -737,11 +737,11 @@ zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr,
}
zobjacep = (zfs_object_ace_t *)zacep;
objacep = (ace_object_t *)acep;
bcopy(zobjacep->z_object_type,
objacep->a_obj_type,
memcpy(objacep->a_obj_type,
zobjacep->z_object_type,
sizeof (zobjacep->z_object_type));
bcopy(zobjacep->z_inherit_type,
objacep->a_inherit_obj_type,
memcpy(objacep->a_inherit_obj_type,
zobjacep->z_inherit_type,
sizeof (zobjacep->z_inherit_type));
ace_size = sizeof (ace_object_t);
break;
@ -1102,7 +1102,7 @@ zfs_acl_node_read(struct znode *zp, boolean_t have_lock, zfs_acl_t **aclpp,
znode_acl.z_acl_extern_obj, 0, aclnode->z_size,
aclnode->z_acldata, DMU_READ_PREFETCH);
} else {
bcopy(znode_acl.z_ace_data, aclnode->z_acldata,
memcpy(aclnode->z_acldata, znode_acl.z_ace_data,
aclnode->z_size);
}
} else {
@ -1447,7 +1447,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
bcopy(aclnode->z_acldata, start,
memcpy(start, aclnode->z_acldata,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
@ -1727,7 +1727,7 @@ zfs_acl_inherit(zfsvfs_t *zfsvfs, umode_t va_mode, zfs_acl_t *paclp,
if ((data1sz = paclp->z_ops->ace_data(pacep, &data1)) != 0) {
VERIFY((data2sz = aclp->z_ops->ace_data(acep,
&data2)) == data1sz);
bcopy(data1, data2, data2sz);
memcpy(data2, data1, data2sz);
}
aclp->z_acl_count++;
@ -1791,7 +1791,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
boolean_t trim = B_FALSE;
boolean_t inherited = B_FALSE;
bzero(acl_ids, sizeof (zfs_acl_ids_t));
memset(acl_ids, 0, sizeof (zfs_acl_ids_t));
acl_ids->z_mode = vap->va_mode;
if (vsecp)
@ -2016,7 +2016,7 @@ zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
bcopy(aclnode->z_acldata, start,
memcpy(start, aclnode->z_acldata,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}

View File

@ -297,7 +297,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name,
*/
dl->dl_namesize = strlen(dl->dl_name) + 1;
name = kmem_alloc(dl->dl_namesize, KM_SLEEP);
bcopy(dl->dl_name, name, dl->dl_namesize);
memcpy(name, dl->dl_name, dl->dl_namesize);
dl->dl_name = name;
}
@ -625,7 +625,7 @@ zfs_purgedir(znode_t *dzp)
skipped += 1;
continue;
}
bzero(&dl, sizeof (dl));
memset(&dl, 0, sizeof (dl));
dl.dl_dzp = dzp;
dl.dl_name = zap.za_name;

View File

@ -103,9 +103,9 @@ zfs_uiomove_iov(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
break;
case UIO_SYSSPACE:
if (rw == UIO_READ)
bcopy(p, iov->iov_base + skip, cnt);
memcpy(iov->iov_base + skip, p, cnt);
else
bcopy(iov->iov_base + skip, p, cnt);
memcpy(p, iov->iov_base + skip, cnt);
break;
default:
ASSERT(0);
@ -138,9 +138,9 @@ zfs_uiomove_bvec(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
paddr = zfs_kmap_atomic(bv->bv_page);
if (rw == UIO_READ)
bcopy(p, paddr + bv->bv_offset + skip, cnt);
memcpy(paddr + bv->bv_offset + skip, p, cnt);
else
bcopy(paddr + bv->bv_offset + skip, p, cnt);
memcpy(p, paddr + bv->bv_offset + skip, cnt);
zfs_kunmap_atomic(paddr);
skip += cnt;
@ -275,7 +275,7 @@ zfs_uiocopy(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio, size_t *cbytes)
zfs_uio_t uio_copy;
int ret;
bcopy(uio, &uio_copy, sizeof (zfs_uio_t));
memcpy(&uio_copy, uio, sizeof (zfs_uio_t));
if (uio->uio_segflg == UIO_BVEC)
ret = zfs_uiomove_bvec(p, n, rw, &uio_copy);

View File

@ -1137,7 +1137,7 @@ zfs_statvfs(struct inode *ip, struct kstatfs *statp)
* We have all of 40 characters to stuff a string here.
* Is there anything useful we could/should provide?
*/
bzero(statp->f_spare, sizeof (statp->f_spare));
memset(statp->f_spare, 0, sizeof (statp->f_spare));
if (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
dmu_objset_projectquota_present(zfsvfs->z_os)) {

View File

@ -1581,7 +1581,7 @@ zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len)
flush_dcache_page(pp);
pb = kmap(pp);
bzero(pb + off, len);
memset(pb + off, 0, len);
kunmap(pp);
if (mapping_writably_mapped(mp))
@ -2153,7 +2153,7 @@ zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
component[0] = '/';
if (is_xattrdir) {
(void) sprintf(component + 1, "<xattrdir>");
strcpy(component + 1, "<xattrdir>");
} else {
error = zap_value_search(osp, pobj, obj,
ZFS_DIRENT_OBJ(-1ULL), component + 1);
@ -2164,7 +2164,7 @@ zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
complen = strlen(component);
path -= complen;
ASSERT(path >= buf);
bcopy(component, path, complen);
memcpy(path, component, complen);
obj = pobj;
if (sa_hdl != hdl) {

View File

@ -216,7 +216,7 @@ zio_crypt_key_destroy(zio_crypt_key_t *key)
crypto_destroy_ctx_template(key->zk_hmac_tmpl);
/* zero out sensitive data */
bzero(key, sizeof (zio_crypt_key_t));
memset(key, 0, sizeof (zio_crypt_key_t));
}
int
@ -230,7 +230,7 @@ zio_crypt_key_init(uint64_t crypt, zio_crypt_key_t *key)
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
keydata_len = zio_crypt_table[crypt].ci_keylen;
bzero(key, sizeof (zio_crypt_key_t));
memset(key, 0, sizeof (zio_crypt_key_t));
/* fill keydata buffers and salt with random data */
ret = random_get_bytes((uint8_t *)&key->zk_guid, sizeof (uint64_t));
@ -317,7 +317,7 @@ zio_crypt_key_change_salt(zio_crypt_key_t *key)
goto out_unlock;
/* assign the salt and reset the usage count */
bcopy(salt, key->zk_salt, ZIO_DATA_SALT_LEN);
memcpy(key->zk_salt, salt, ZIO_DATA_SALT_LEN);
key->zk_salt_count = 0;
/* destroy the old context template and create the new one */
@ -346,7 +346,7 @@ zio_crypt_key_get_salt(zio_crypt_key_t *key, uint8_t *salt)
rw_enter(&key->zk_salt_lock, RW_READER);
bcopy(key->zk_salt, salt, ZIO_DATA_SALT_LEN);
memcpy(salt, key->zk_salt, ZIO_DATA_SALT_LEN);
salt_change = (atomic_inc_64_nv(&key->zk_salt_count) >=
ZFS_CURRENT_MAX_SALT_USES);
@ -652,7 +652,7 @@ zio_crypt_generate_iv(uint8_t *ivbuf)
return (0);
error:
bzero(ivbuf, ZIO_DATA_IV_LEN);
memset(ivbuf, 0, ZIO_DATA_IV_LEN);
return (ret);
}
@ -693,12 +693,12 @@ zio_crypt_do_hmac(zio_crypt_key_t *key, uint8_t *data, uint_t datalen,
goto error;
}
bcopy(raw_digestbuf, digestbuf, digestlen);
memcpy(digestbuf, raw_digestbuf, digestlen);
return (0);
error:
bzero(digestbuf, digestlen);
memset(digestbuf, 0, digestlen);
return (ret);
}
@ -714,8 +714,8 @@ zio_crypt_generate_iv_salt_dedup(zio_crypt_key_t *key, uint8_t *data,
if (ret != 0)
return (ret);
bcopy(digestbuf, salt, ZIO_DATA_SALT_LEN);
bcopy(digestbuf + ZIO_DATA_SALT_LEN, ivbuf, ZIO_DATA_IV_LEN);
memcpy(salt, digestbuf, ZIO_DATA_SALT_LEN);
memcpy(ivbuf, digestbuf + ZIO_DATA_SALT_LEN, ZIO_DATA_IV_LEN);
return (0);
}
@ -738,18 +738,18 @@ zio_crypt_encode_params_bp(blkptr_t *bp, uint8_t *salt, uint8_t *iv)
ASSERT(BP_IS_ENCRYPTED(bp));
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(salt, &bp->blk_dva[2].dva_word[0], sizeof (uint64_t));
bcopy(iv, &bp->blk_dva[2].dva_word[1], sizeof (uint64_t));
bcopy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
memcpy(&bp->blk_dva[2].dva_word[0], salt, sizeof (uint64_t));
memcpy(&bp->blk_dva[2].dva_word[1], iv, sizeof (uint64_t));
memcpy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
BP_SET_IV2(bp, val32);
} else {
bcopy(salt, &val64, sizeof (uint64_t));
memcpy(&val64, salt, sizeof (uint64_t));
bp->blk_dva[2].dva_word[0] = BSWAP_64(val64);
bcopy(iv, &val64, sizeof (uint64_t));
memcpy(&val64, iv, sizeof (uint64_t));
bp->blk_dva[2].dva_word[1] = BSWAP_64(val64);
bcopy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
memcpy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
BP_SET_IV2(bp, BSWAP_32(val32));
}
}
@ -764,26 +764,26 @@ zio_crypt_decode_params_bp(const blkptr_t *bp, uint8_t *salt, uint8_t *iv)
/* for convenience, so callers don't need to check */
if (BP_IS_AUTHENTICATED(bp)) {
bzero(salt, ZIO_DATA_SALT_LEN);
bzero(iv, ZIO_DATA_IV_LEN);
memset(salt, 0, ZIO_DATA_SALT_LEN);
memset(iv, 0, ZIO_DATA_IV_LEN);
return;
}
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(&bp->blk_dva[2].dva_word[0], salt, sizeof (uint64_t));
bcopy(&bp->blk_dva[2].dva_word[1], iv, sizeof (uint64_t));
memcpy(salt, &bp->blk_dva[2].dva_word[0], sizeof (uint64_t));
memcpy(iv, &bp->blk_dva[2].dva_word[1], sizeof (uint64_t));
val32 = (uint32_t)BP_GET_IV2(bp);
bcopy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
memcpy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
} else {
val64 = BSWAP_64(bp->blk_dva[2].dva_word[0]);
bcopy(&val64, salt, sizeof (uint64_t));
memcpy(salt, &val64, sizeof (uint64_t));
val64 = BSWAP_64(bp->blk_dva[2].dva_word[1]);
bcopy(&val64, iv, sizeof (uint64_t));
memcpy(iv, &val64, sizeof (uint64_t));
val32 = BSWAP_32((uint32_t)BP_GET_IV2(bp));
bcopy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
memcpy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
}
}
@ -796,14 +796,14 @@ zio_crypt_encode_mac_bp(blkptr_t *bp, uint8_t *mac)
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_OBJSET);
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(mac, &bp->blk_cksum.zc_word[2], sizeof (uint64_t));
bcopy(mac + sizeof (uint64_t), &bp->blk_cksum.zc_word[3],
memcpy(&bp->blk_cksum.zc_word[2], mac, sizeof (uint64_t));
memcpy(&bp->blk_cksum.zc_word[3], mac + sizeof (uint64_t),
sizeof (uint64_t));
} else {
bcopy(mac, &val64, sizeof (uint64_t));
memcpy(&val64, mac, sizeof (uint64_t));
bp->blk_cksum.zc_word[2] = BSWAP_64(val64);
bcopy(mac + sizeof (uint64_t), &val64, sizeof (uint64_t));
memcpy(&val64, mac + sizeof (uint64_t), sizeof (uint64_t));
bp->blk_cksum.zc_word[3] = BSWAP_64(val64);
}
}
@ -817,20 +817,20 @@ zio_crypt_decode_mac_bp(const blkptr_t *bp, uint8_t *mac)
/* for convenience, so callers don't need to check */
if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
bzero(mac, ZIO_DATA_MAC_LEN);
memset(mac, 0, ZIO_DATA_MAC_LEN);
return;
}
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(&bp->blk_cksum.zc_word[2], mac, sizeof (uint64_t));
bcopy(&bp->blk_cksum.zc_word[3], mac + sizeof (uint64_t),
memcpy(mac, &bp->blk_cksum.zc_word[2], sizeof (uint64_t));
memcpy(mac + sizeof (uint64_t), &bp->blk_cksum.zc_word[3],
sizeof (uint64_t));
} else {
val64 = BSWAP_64(bp->blk_cksum.zc_word[2]);
bcopy(&val64, mac, sizeof (uint64_t));
memcpy(mac, &val64, sizeof (uint64_t));
val64 = BSWAP_64(bp->blk_cksum.zc_word[3]);
bcopy(&val64, mac + sizeof (uint64_t), sizeof (uint64_t));
memcpy(mac + sizeof (uint64_t), &val64, sizeof (uint64_t));
}
}
@ -839,8 +839,8 @@ zio_crypt_encode_mac_zil(void *data, uint8_t *mac)
{
zil_chain_t *zilc = data;
bcopy(mac, &zilc->zc_eck.zec_cksum.zc_word[2], sizeof (uint64_t));
bcopy(mac + sizeof (uint64_t), &zilc->zc_eck.zec_cksum.zc_word[3],
memcpy(&zilc->zc_eck.zec_cksum.zc_word[2], mac, sizeof (uint64_t));
memcpy(&zilc->zc_eck.zec_cksum.zc_word[3], mac + sizeof (uint64_t),
sizeof (uint64_t));
}
@ -854,8 +854,8 @@ zio_crypt_decode_mac_zil(const void *data, uint8_t *mac)
*/
const zil_chain_t *zilc = data;
bcopy(&zilc->zc_eck.zec_cksum.zc_word[2], mac, sizeof (uint64_t));
bcopy(&zilc->zc_eck.zec_cksum.zc_word[3], mac + sizeof (uint64_t),
memcpy(mac, &zilc->zc_eck.zec_cksum.zc_word[2], sizeof (uint64_t));
memcpy(mac + sizeof (uint64_t), &zilc->zc_eck.zec_cksum.zc_word[3],
sizeof (uint64_t));
}
@ -882,7 +882,7 @@ zio_crypt_copy_dnode_bonus(abd_t *src_abd, uint8_t *dst, uint_t datalen)
if (dnp->dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) &&
dnp->dn_bonuslen != 0) {
bcopy(DN_BONUS(dnp), DN_BONUS(&ddnp[i]),
memcpy(DN_BONUS(&ddnp[i]), DN_BONUS(dnp),
DN_MAX_BONUS_LEN(dnp));
}
}
@ -1024,7 +1024,7 @@ zio_crypt_bp_do_aad_updates(uint8_t **aadp, uint_t *aad_len, uint64_t version,
blkptr_auth_buf_t bab;
zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
bcopy(&bab, *aadp, bab_len);
memcpy(*aadp, &bab, bab_len);
*aadp += bab_len;
*aad_len += bab_len;
}
@ -1048,7 +1048,7 @@ zio_crypt_do_dnode_hmac_updates(crypto_context_t ctx, uint64_t version,
* of copying 512-64 unneeded bytes. The compiler seems to be fine
* with that.
*/
bcopy(dnp, &tmp_dncore, dn_core_size);
memcpy(&tmp_dncore, dnp, dn_core_size);
adnp = &tmp_dncore;
if (le_bswap) {
@ -1190,7 +1190,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
goto error;
}
bcopy(raw_portable_mac, portable_mac, ZIO_OBJSET_MAC_LEN);
memcpy(portable_mac, raw_portable_mac, ZIO_OBJSET_MAC_LEN);
/*
* This is necessary here as we check next whether
@ -1219,7 +1219,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
osp->os_groupused_dnode.dn_type == DMU_OT_NONE) ||
(datalen <= OBJSET_PHYS_SIZE_V1)) {
bzero(local_mac, ZIO_OBJSET_MAC_LEN);
memset(local_mac, 0, ZIO_OBJSET_MAC_LEN);
return (0);
}
@ -1282,13 +1282,13 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
goto error;
}
bcopy(raw_local_mac, local_mac, ZIO_OBJSET_MAC_LEN);
memcpy(local_mac, raw_local_mac, ZIO_OBJSET_MAC_LEN);
return (0);
error:
bzero(portable_mac, ZIO_OBJSET_MAC_LEN);
bzero(local_mac, ZIO_OBJSET_MAC_LEN);
memset(portable_mac, 0, ZIO_OBJSET_MAC_LEN);
memset(local_mac, 0, ZIO_OBJSET_MAC_LEN);
return (ret);
}
@ -1324,11 +1324,11 @@ zio_crypt_do_indirect_mac_checksum_impl(boolean_t generate, void *buf,
SHA2Final(digestbuf, &ctx);
if (generate) {
bcopy(digestbuf, cksum, ZIO_DATA_MAC_LEN);
memcpy(cksum, digestbuf, ZIO_DATA_MAC_LEN);
return (0);
}
if (bcmp(digestbuf, cksum, ZIO_DATA_MAC_LEN) != 0)
if (memcmp(digestbuf, cksum, ZIO_DATA_MAC_LEN) != 0)
return (SET_ERROR(ECKSUM));
return (0);
@ -1409,7 +1409,7 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
nr_src = 1;
nr_dst = 0;
}
bzero(dst, datalen);
memset(dst, 0, datalen);
/* find the start and end record of the log block */
zilc = (zil_chain_t *)src;
@ -1460,8 +1460,8 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
* the embedded checksum will not have been calculated yet, so we don't
* authenticate that.
*/
bcopy(src, dst, sizeof (zil_chain_t));
bcopy(src, aadp, sizeof (zil_chain_t) - sizeof (zio_eck_t));
memcpy(dst, src, sizeof (zil_chain_t));
memcpy(aadp, src, sizeof (zil_chain_t) - sizeof (zio_eck_t));
aadp += sizeof (zil_chain_t) - sizeof (zio_eck_t);
aad_len += sizeof (zil_chain_t) - sizeof (zio_eck_t);
@ -1482,8 +1482,8 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
}
/* copy the common lr_t */
bcopy(slrp, dlrp, sizeof (lr_t));
bcopy(slrp, aadp, sizeof (lr_t));
memcpy(dlrp, slrp, sizeof (lr_t));
memcpy(aadp, slrp, sizeof (lr_t));
aadp += sizeof (lr_t);
aad_len += sizeof (lr_t);
@ -1504,11 +1504,12 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
dst_iovecs[nr_iovecs].iov_len = crypt_len;
/* copy the bp now since it will not be encrypted */
bcopy(slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
dlrp + sizeof (lr_write_t) - sizeof (blkptr_t),
memcpy(dlrp + sizeof (lr_write_t) - sizeof (blkptr_t),
slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
sizeof (blkptr_t));
memcpy(aadp,
slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
sizeof (blkptr_t));
bcopy(slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
aadp, sizeof (blkptr_t));
aadp += sizeof (blkptr_t);
aad_len += sizeof (blkptr_t);
nr_iovecs++;
@ -1655,10 +1656,11 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
dnp = &sdnp[i];
/* copy over the core fields and blkptrs (kept as plaintext) */
bcopy(dnp, &ddnp[i], (uint8_t *)DN_BONUS(dnp) - (uint8_t *)dnp);
memcpy(&ddnp[i], dnp,
(uint8_t *)DN_BONUS(dnp) - (uint8_t *)dnp);
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
bcopy(DN_SPILL_BLKPTR(dnp), DN_SPILL_BLKPTR(&ddnp[i]),
memcpy(DN_SPILL_BLKPTR(&ddnp[i]), DN_SPILL_BLKPTR(dnp),
sizeof (blkptr_t));
}
@ -1673,7 +1675,7 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
* authenticated data.
*/
crypt_len = offsetof(dnode_phys_t, dn_blkptr);
bcopy(dnp, aadp, crypt_len);
memcpy(aadp, dnp, crypt_len);
adnp = (dnode_phys_t *)aadp;
adnp->dn_flags &= DNODE_CRYPT_PORTABLE_FLAGS_MASK;
adnp->dn_used = 0;
@ -1716,8 +1718,8 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
nr_iovecs++;
total_len += crypt_len;
} else {
bcopy(DN_BONUS(dnp), DN_BONUS(&ddnp[i]), crypt_len);
bcopy(DN_BONUS(dnp), aadp, crypt_len);
memcpy(DN_BONUS(&ddnp[i]), DN_BONUS(dnp), crypt_len);
memcpy(aadp, DN_BONUS(dnp), crypt_len);
aadp += crypt_len;
aad_len += crypt_len;
}
@ -1898,7 +1900,7 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
rw_enter(&key->zk_salt_lock, RW_READER);
locked = B_TRUE;
if (bcmp(salt, key->zk_salt, ZIO_DATA_SALT_LEN) == 0) {
if (memcmp(salt, key->zk_salt, ZIO_DATA_SALT_LEN) == 0) {
ckey = &key->zk_current_key;
tmpl = key->zk_current_tmpl;
} else {
@ -1948,8 +1950,8 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
/* If the hardware implementation fails fall back to software */
}
bzero(&puio, sizeof (zfs_uio_t));
bzero(&cuio, sizeof (zfs_uio_t));
memset(&puio, 0, sizeof (puio));
memset(&cuio, 0, sizeof (cuio));
/* create uios for encryption */
ret = zio_crypt_init_uios(encrypt, key->zk_version, ot, plainbuf,
@ -1972,7 +1974,7 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
if (authbuf != NULL)
zio_buf_free(authbuf, datalen);
if (ckey == &tmp_ckey)
bzero(enc_keydata, keydata_len);
memset(enc_keydata, 0, keydata_len);
zio_crypt_destroy_uio(&puio);
zio_crypt_destroy_uio(&cuio);
@ -1984,7 +1986,7 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
if (authbuf != NULL)
zio_buf_free(authbuf, datalen);
if (ckey == &tmp_ckey)
bzero(enc_keydata, keydata_len);
memset(enc_keydata, 0, keydata_len);
zio_crypt_destroy_uio(&puio);
zio_crypt_destroy_uio(&cuio);

View File

@ -126,8 +126,8 @@
* which has been filled either by:
*
* 1. a compression step, which will be mostly cached, or
* 2. a bcopy() or copyin(), which will be uncached (because the
* copy is cache-bypassing).
* 2. a memcpy() or copyin(), which will be uncached
* (because the copy is cache-bypassing).
*
* For both cached and uncached data, both fletcher checksums are much faster
* than sha-256, and slower than 'off', which doesn't touch the data at all.

View File

@ -52,7 +52,7 @@ ZFS_NO_SANITIZE_UNDEFINED
static void
fletcher_4_aarch64_neon_init(fletcher_4_ctx_t *ctx)
{
bzero(ctx->aarch64_neon, 4 * sizeof (zfs_fletcher_aarch64_neon_t));
memset(ctx->aarch64_neon, 0, 4 * sizeof (zfs_fletcher_aarch64_neon_t));
}
ZFS_NO_SANITIZE_UNDEFINED

View File

@ -39,7 +39,7 @@ ZFS_NO_SANITIZE_UNDEFINED
static void
fletcher_4_avx512f_init(fletcher_4_ctx_t *ctx)
{
bzero(ctx->avx512, 4 * sizeof (zfs_fletcher_avx512_t));
memset(ctx->avx512, 0, 4 * sizeof (zfs_fletcher_avx512_t));
}
ZFS_NO_SANITIZE_UNDEFINED

View File

@ -51,7 +51,7 @@ ZFS_NO_SANITIZE_UNDEFINED
static void
fletcher_4_avx2_init(fletcher_4_ctx_t *ctx)
{
bzero(ctx->avx, 4 * sizeof (zfs_fletcher_avx_t));
memset(ctx->avx, 0, 4 * sizeof (zfs_fletcher_avx_t));
}
ZFS_NO_SANITIZE_UNDEFINED

View File

@ -53,7 +53,7 @@ ZFS_NO_SANITIZE_UNDEFINED
static void
fletcher_4_sse2_init(fletcher_4_ctx_t *ctx)
{
bzero(ctx->sse, 4 * sizeof (zfs_fletcher_sse_t));
memset(ctx->sse, 0, 4 * sizeof (zfs_fletcher_sse_t));
}
ZFS_NO_SANITIZE_UNDEFINED

View File

@ -51,7 +51,7 @@ ZFS_NO_SANITIZE_UNDEFINED
static void
fletcher_4_superscalar_init(fletcher_4_ctx_t *ctx)
{
bzero(ctx->superscalar, 4 * sizeof (zfs_fletcher_superscalar_t));
memset(ctx->superscalar, 0, 4 * sizeof (zfs_fletcher_superscalar_t));
}
ZFS_NO_SANITIZE_UNDEFINED

View File

@ -51,7 +51,7 @@ ZFS_NO_SANITIZE_UNDEFINED
static void
fletcher_4_superscalar4_init(fletcher_4_ctx_t *ctx)
{
bzero(ctx->superscalar, 4 * sizeof (zfs_fletcher_superscalar_t));
memset(ctx->superscalar, 0, 4 * sizeof (zfs_fletcher_superscalar_t));
}
ZFS_NO_SANITIZE_UNDEFINED

View File

@ -87,7 +87,7 @@ static uint_t aggsum_borrow_shift = 4;
void
aggsum_init(aggsum_t *as, uint64_t value)
{
bzero(as, sizeof (*as));
memset(as, 0, sizeof (*as));
as->as_lower_bound = as->as_upper_bound = value;
mutex_init(&as->as_lock, NULL, MUTEX_DEFAULT, NULL);
/*

View File

@ -250,7 +250,7 @@
* since the physical block is about to be rewritten. The new data contents
* will be contained in the arc_buf_t. As the I/O pipeline performs the write,
* it may compress the data before writing it to disk. The ARC will be called
* with the transformed data and will bcopy the transformed on-disk block into
* with the transformed data and will memcpy the transformed on-disk block into
* a newly allocated b_pabd. Writes are always done into buffers which have
* either been loaned (and hence are new and don't have other readers) or
* buffers which have been released (and hence have their own hdr, if there
@ -1132,7 +1132,7 @@ hdr_full_cons(void *vbuf, void *unused, int kmflag)
(void) unused, (void) kmflag;
arc_buf_hdr_t *hdr = vbuf;
bzero(hdr, HDR_FULL_SIZE);
memset(hdr, 0, HDR_FULL_SIZE);
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL);
zfs_refcount_create(&hdr->b_l1hdr.b_refcnt);
@ -1152,7 +1152,7 @@ hdr_full_crypt_cons(void *vbuf, void *unused, int kmflag)
arc_buf_hdr_t *hdr = vbuf;
hdr_full_cons(vbuf, unused, kmflag);
bzero(&hdr->b_crypt_hdr, sizeof (hdr->b_crypt_hdr));
memset(&hdr->b_crypt_hdr, 0, sizeof (hdr->b_crypt_hdr));
arc_space_consume(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS);
return (0);
@ -1164,7 +1164,7 @@ hdr_l2only_cons(void *vbuf, void *unused, int kmflag)
(void) unused, (void) kmflag;
arc_buf_hdr_t *hdr = vbuf;
bzero(hdr, HDR_L2ONLY_SIZE);
memset(hdr, 0, HDR_L2ONLY_SIZE);
arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
return (0);
@ -1176,7 +1176,7 @@ buf_cons(void *vbuf, void *unused, int kmflag)
(void) unused, (void) kmflag;
arc_buf_t *buf = vbuf;
bzero(buf, sizeof (arc_buf_t));
memset(buf, 0, sizeof (arc_buf_t));
mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
@ -1332,9 +1332,9 @@ arc_get_raw_params(arc_buf_t *buf, boolean_t *byteorder, uint8_t *salt,
ASSERT(HDR_PROTECTED(hdr));
bcopy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN);
bcopy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN);
bcopy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN);
memcpy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
memcpy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
memcpy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
*byteorder = (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
}
@ -1692,7 +1692,7 @@ arc_buf_try_copy_decompressed_data(arc_buf_t *buf)
}
if (!ARC_BUF_COMPRESSED(from)) {
bcopy(from->b_data, buf->b_data, arc_buf_size(buf));
memcpy(buf->b_data, from->b_data, arc_buf_size(buf));
copied = B_TRUE;
break;
}
@ -3349,7 +3349,7 @@ arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
buf_hash_remove(hdr);
bcopy(hdr, nhdr, HDR_L2ONLY_SIZE);
memcpy(nhdr, hdr, HDR_L2ONLY_SIZE);
if (new == hdr_full_cache || new == hdr_full_crypt_cache) {
arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR);
@ -3512,7 +3512,7 @@ arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt)
}
/* unset all members of the original hdr */
bzero(&hdr->b_dva, sizeof (dva_t));
memset(&hdr->b_dva, 0, sizeof (dva_t));
hdr->b_birth = 0;
hdr->b_type = ARC_BUFC_INVALID;
hdr->b_flags = 0;
@ -3537,9 +3537,9 @@ arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt)
hdr->b_crypt_hdr.b_ot = DMU_OT_NONE;
hdr->b_crypt_hdr.b_ebufcnt = 0;
hdr->b_crypt_hdr.b_dsobj = 0;
bzero(hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
bzero(hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
bzero(hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
memset(hdr->b_crypt_hdr.b_salt, 0, ZIO_DATA_SALT_LEN);
memset(hdr->b_crypt_hdr.b_iv, 0, ZIO_DATA_IV_LEN);
memset(hdr->b_crypt_hdr.b_mac, 0, ZIO_DATA_MAC_LEN);
}
buf_discard_identity(hdr);
@ -3577,11 +3577,11 @@ arc_convert_to_raw(arc_buf_t *buf, uint64_t dsobj, boolean_t byteorder,
arc_cksum_free(hdr);
if (salt != NULL)
bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
memcpy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN);
if (iv != NULL)
bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
memcpy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN);
if (mac != NULL)
bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
memcpy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN);
}
/*
@ -3657,9 +3657,9 @@ arc_alloc_raw_buf(spa_t *spa, void *tag, uint64_t dsobj, boolean_t byteorder,
hdr->b_crypt_hdr.b_ot = ot;
hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
memcpy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN);
memcpy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN);
memcpy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN);
/*
* This buffer will be considered encrypted even if the ot is not an
@ -5643,7 +5643,7 @@ arc_bcopy_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
if (buf == NULL)
return;
bcopy(buf->b_data, arg, arc_buf_size(buf));
memcpy(arg, buf->b_data, arc_buf_size(buf));
arc_buf_destroy(buf, arg);
}
@ -7106,11 +7106,11 @@ arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
localprop.zp_byteorder =
(hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
bcopy(hdr->b_crypt_hdr.b_salt, localprop.zp_salt,
memcpy(localprop.zp_salt, hdr->b_crypt_hdr.b_salt,
ZIO_DATA_SALT_LEN);
bcopy(hdr->b_crypt_hdr.b_iv, localprop.zp_iv,
memcpy(localprop.zp_iv, hdr->b_crypt_hdr.b_iv,
ZIO_DATA_IV_LEN);
bcopy(hdr->b_crypt_hdr.b_mac, localprop.zp_mac,
memcpy(localprop.zp_mac, hdr->b_crypt_hdr.b_mac,
ZIO_DATA_MAC_LEN);
if (DMU_OT_IS_ENCRYPTED(localprop.zp_type)) {
localprop.zp_nopwrite = B_FALSE;
@ -8722,14 +8722,15 @@ l2arc_write_done(zio_t *zio)
* block pointer in the header.
*/
if (i == 0) {
bzero(l2dhdr, dev->l2ad_dev_hdr_asize);
memset(l2dhdr, 0,
dev->l2ad_dev_hdr_asize);
} else {
bzero(&l2dhdr->dh_start_lbps[i],
memset(&l2dhdr->dh_start_lbps[i], 0,
sizeof (l2arc_log_blkptr_t));
}
break;
}
bcopy(lb_ptr_buf->lb_ptr, &l2dhdr->dh_start_lbps[i],
memcpy(&l2dhdr->dh_start_lbps[i], lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
lb_ptr_buf = list_next(&dev->l2ad_lbptr_list,
lb_ptr_buf);
@ -9353,7 +9354,7 @@ l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize,
}
ASSERT3U(psize, <=, HDR_GET_PSIZE(hdr));
if (psize < asize)
bzero((char *)tmp + psize, asize - psize);
memset((char *)tmp + psize, 0, asize - psize);
psize = HDR_GET_PSIZE(hdr);
abd_return_buf_copy(cabd, tmp, asize);
to_write = cabd;
@ -9388,7 +9389,7 @@ l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize,
abd_zero_off(eabd, psize, asize - psize);
/* assert that the MAC we got here matches the one we saved */
ASSERT0(bcmp(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN));
ASSERT0(memcmp(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN));
spa_keystore_dsl_key_rele(spa, dck, FTAG);
if (to_write == cabd)
@ -9897,7 +9898,7 @@ l2arc_rebuild_dev(l2arc_dev_t *dev, boolean_t reopen)
if (l2arc_trim_ahead > 0) {
dev->l2ad_trim_all = B_TRUE;
} else {
bzero(l2dhdr, l2dhdr_asize);
memset(l2dhdr, 0, l2dhdr_asize);
l2arc_dev_hdr_update(dev);
}
}
@ -10218,7 +10219,7 @@ l2arc_rebuild(l2arc_dev_t *dev)
goto out;
/* Prepare the rebuild process */
bcopy(l2dhdr->dh_start_lbps, lbps, sizeof (lbps));
memcpy(lbps, l2dhdr->dh_start_lbps, sizeof (lbps));
/* Start the rebuild process */
for (;;) {
@ -10264,7 +10265,7 @@ l2arc_rebuild(l2arc_dev_t *dev)
lb_ptr_buf = kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t), KM_SLEEP);
lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t),
KM_SLEEP);
bcopy(&lbps[0], lb_ptr_buf->lb_ptr,
memcpy(lb_ptr_buf->lb_ptr, &lbps[0],
sizeof (l2arc_log_blkptr_t));
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_lbptr_list, lb_ptr_buf);
@ -10362,7 +10363,7 @@ l2arc_rebuild(l2arc_dev_t *dev)
*/
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"no valid log blocks");
bzero(l2dhdr, dev->l2ad_dev_hdr_asize);
memset(l2dhdr, 0, dev->l2ad_dev_hdr_asize);
l2arc_dev_hdr_update(dev);
} else if (err == ECANCELED) {
/*
@ -10853,13 +10854,13 @@ l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio, l2arc_write_callback_t *cb)
ZIO_CHECKSUM_FLETCHER_4);
if (asize < sizeof (*lb)) {
/* compression succeeded */
bzero(tmpbuf + psize, asize - psize);
memset(tmpbuf + psize, 0, asize - psize);
L2BLK_SET_COMPRESS(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_COMPRESS_LZ4);
} else {
/* compression failed */
bcopy(lb, tmpbuf, sizeof (*lb));
memcpy(tmpbuf, lb, sizeof (*lb));
L2BLK_SET_COMPRESS(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_COMPRESS_OFF);
@ -10885,7 +10886,7 @@ l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio, l2arc_write_callback_t *cb)
* Include the committed log block's pointer in the list of pointers
* to log blocks present in the L2ARC device.
*/
bcopy(&l2dhdr->dh_start_lbps[0], lb_ptr_buf->lb_ptr,
memcpy(lb_ptr_buf->lb_ptr, &l2dhdr->dh_start_lbps[0],
sizeof (l2arc_log_blkptr_t));
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_lbptr_list, lb_ptr_buf);
@ -10974,7 +10975,7 @@ l2arc_log_blk_insert(l2arc_dev_t *dev, const arc_buf_hdr_t *hdr)
ASSERT(HDR_HAS_L2HDR(hdr));
le = &lb->lb_entries[index];
bzero(le, sizeof (*le));
memset(le, 0, sizeof (*le));
le->le_dva = hdr->b_dva;
le->le_birth = hdr->b_birth;
le->le_daddr = hdr->b_l2hdr.b_daddr;

View File

@ -58,7 +58,7 @@ encode_embedded_bp_compressed(blkptr_t *bp, void *data,
ASSERT3U(comp, >=, ZIO_COMPRESS_OFF);
ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS);
bzero(bp, sizeof (*bp));
memset(bp, 0, sizeof (*bp));
BP_SET_EMBEDDED(bp, B_TRUE);
BP_SET_COMPRESS(bp, comp);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);

View File

@ -156,7 +156,7 @@ bpobj_open(bpobj_t *bpo, objset_t *os, uint64_t object)
if (err)
return (err);
bzero(bpo, sizeof (*bpo));
memset(bpo, 0, sizeof (*bpo));
mutex_init(&bpo->bpo_lock, NULL, MUTEX_DEFAULT, NULL);
ASSERT(bpo->bpo_dbuf == NULL);
@ -805,12 +805,12 @@ bpobj_enqueue(bpobj_t *bpo, const blkptr_t *bp, boolean_t bp_freed,
* set of BP's stored, and bpobj_iterate() wouldn't visit
* all the space accounted for in the bpobj.
*/
bzero(&stored_bp, sizeof (stored_bp));
memset(&stored_bp, 0, sizeof (stored_bp));
stored_bp.blk_prop = bp->blk_prop;
stored_bp.blk_birth = bp->blk_birth;
} else if (!BP_GET_DEDUP(bp)) {
/* The bpobj will compress better without the checksum */
bzero(&stored_bp.blk_cksum, sizeof (stored_bp.blk_cksum));
memset(&stored_bp.blk_cksum, 0, sizeof (stored_bp.blk_cksum));
}
stored_bp.blk_fill = 0;

View File

@ -159,7 +159,7 @@ zfs_btree_create(zfs_btree_t *tree, int (*compar) (const void *, const void *),
*/
ASSERT3U(size, <=, (BTREE_LEAF_SIZE - sizeof (zfs_btree_hdr_t)) / 4);
bzero(tree, sizeof (*tree));
memset(tree, 0, sizeof (*tree));
tree->bt_compar = compar;
tree->bt_elem_size = size;
tree->bt_height = -1;

View File

@ -123,7 +123,7 @@ dataset_kstats_create(dataset_kstats_t *dk, objset_t *objset)
dataset_kstat_values_t *dk_kstats =
kmem_alloc(sizeof (empty_dataset_kstats), KM_SLEEP);
bcopy(&empty_dataset_kstats, dk_kstats,
memcpy(dk_kstats, &empty_dataset_kstats,
sizeof (empty_dataset_kstats));
char *ds_name = kmem_zalloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);

View File

@ -280,7 +280,7 @@ dbuf_cons(void *vdb, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
dmu_buf_impl_t *db = vdb;
bzero(db, sizeof (dmu_buf_impl_t));
memset(db, 0, sizeof (dmu_buf_impl_t));
mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL);
@ -1235,7 +1235,7 @@ dbuf_loan_arcbuf(dmu_buf_impl_t *db)
mutex_exit(&db->db_mtx);
abuf = arc_loan_buf(spa, B_FALSE, blksz);
bcopy(db->db.db_data, abuf->b_data, blksz);
memcpy(abuf->b_data, db->db.db_data, blksz);
} else {
abuf = db->db_buf;
arc_loan_inuse_buf(abuf, db);
@ -1356,7 +1356,7 @@ dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
/* freed in flight */
ASSERT(zio == NULL || zio->io_error == 0);
arc_release(buf, db);
bzero(buf->b_data, db->db.db_size);
memset(buf->b_data, 0, db->db.db_size);
arc_buf_freeze(buf);
db->db_freed_in_flight = FALSE;
dbuf_set_data(db, buf);
@ -1395,9 +1395,9 @@ dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
if (bonuslen < max_bonuslen)
bzero(db->db.db_data, max_bonuslen);
memset(db->db.db_data, 0, max_bonuslen);
if (bonuslen)
bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
memcpy(db->db.db_data, DN_BONUS(dn->dn_phys), bonuslen);
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "bonus buffer filled");
return (0);
@ -1446,7 +1446,7 @@ dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn)
if (is_hole) {
dbuf_set_data(db, dbuf_alloc_arcbuf(db));
bzero(db->db.db_data, db->db.db_size);
memset(db->db.db_data, 0, db->db.db_size);
if (db->db_blkptr != NULL && db->db_level > 0 &&
BP_IS_HOLE(db->db_blkptr) &&
@ -1657,7 +1657,7 @@ dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
arc_space_consume(bonuslen, ARC_SPACE_BONUS);
bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen);
memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen);
} else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
dnode_t *dn = DB_DNODE(db);
int size = arc_buf_size(db->db_buf);
@ -1687,7 +1687,7 @@ dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
} else {
dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
}
bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size);
} else {
db->db_buf = NULL;
dbuf_clear_data(db);
@ -1985,7 +1985,7 @@ dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
ASSERT(db->db.db_data != NULL);
arc_release(db->db_buf, db);
rw_enter(&db->db_rwlock, RW_WRITER);
bzero(db->db.db_data, db->db.db_size);
memset(db->db.db_data, 0, db->db.db_size);
rw_exit(&db->db_rwlock);
arc_buf_freeze(db->db_buf);
}
@ -2022,10 +2022,10 @@ dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
/* copy old block data to the new block */
old_buf = db->db_buf;
bcopy(old_buf->b_data, buf->b_data, MIN(osize, size));
memcpy(buf->b_data, old_buf->b_data, MIN(osize, size));
/* zero the remainder */
if (size > osize)
bzero((uint8_t *)buf->b_data + osize, size - osize);
memset((uint8_t *)buf->b_data + osize, 0, size - osize);
mutex_enter(&db->db_mtx);
dbuf_set_data(db, buf);
@ -2655,9 +2655,9 @@ dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
dr->dt.dl.dr_has_raw_params = B_TRUE;
dr->dt.dl.dr_byteorder = byteorder;
bcopy(salt, dr->dt.dl.dr_salt, ZIO_DATA_SALT_LEN);
bcopy(iv, dr->dt.dl.dr_iv, ZIO_DATA_IV_LEN);
bcopy(mac, dr->dt.dl.dr_mac, ZIO_DATA_MAC_LEN);
memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN);
memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN);
memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN);
}
static void
@ -2690,7 +2690,7 @@ dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx)
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
/* we were freed while filling */
/* XXX dbuf_undirty? */
bzero(db->db.db_data, db->db.db_size);
memset(db->db.db_data, 0, db->db.db_size);
db->db_freed_in_flight = FALSE;
DTRACE_SET_STATE(db,
"fill done handling freed in flight");
@ -2802,7 +2802,7 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
ASSERT(!arc_is_encrypted(buf));
mutex_exit(&db->db_mtx);
(void) dbuf_dirty(db, tx);
bcopy(buf->b_data, db->db.db_data, db->db.db_size);
memcpy(db->db.db_data, buf->b_data, db->db.db_size);
arc_buf_destroy(buf, db);
return;
}
@ -3516,7 +3516,7 @@ dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
}
rw_enter(&db->db_rwlock, RW_WRITER);
bcopy(data->b_data, db->db.db_data, arc_buf_size(data));
memcpy(db->db.db_data, data->b_data, arc_buf_size(data));
rw_exit(&db->db_rwlock);
}
@ -4040,7 +4040,7 @@ dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
dnode_t *dn = dr->dr_dnode;
ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
bcopy(data, DN_BONUS(dn->dn_phys), DN_MAX_BONUS_LEN(dn->dn_phys));
memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys));
dbuf_sync_leaf_verify_bonus_dnode(dr);
@ -4460,7 +4460,7 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
} else {
*datap = arc_alloc_buf(os->os_spa, db, type, psize);
}
bcopy(db->db.db_data, (*datap)->b_data, psize);
memcpy((*datap)->b_data, db->db.db_data, psize);
}
db->db_data_pending = dr;
@ -4640,7 +4640,7 @@ dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
* zero out.
*/
rw_enter(&db->db_rwlock, RW_WRITER);
bzero(db->db.db_data, db->db.db_size);
memset(db->db.db_data, 0, db->db.db_size);
rw_exit(&db->db_rwlock);
}
DB_DNODE_EXIT(db);

View File

@ -46,11 +46,11 @@ static kmem_cache_t *ddt_entry_cache;
*/
int zfs_dedup_prefetch = 0;
static const ddt_ops_t *ddt_ops[DDT_TYPES] = {
static const ddt_ops_t *const ddt_ops[DDT_TYPES] = {
&ddt_zap_ops,
};
static const char *ddt_class_name[DDT_CLASSES] = {
static const char *const ddt_class_name[DDT_CLASSES] = {
"ditto",
"duplicate",
"unique",
@ -99,7 +99,7 @@ ddt_object_destroy(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
VERIFY(zap_remove(os, DMU_POOL_DIRECTORY_OBJECT, name, tx) == 0);
VERIFY(zap_remove(os, spa->spa_ddt_stat_object, name, tx) == 0);
VERIFY(ddt_ops[type]->ddt_op_destroy(os, *objectp, tx) == 0);
bzero(&ddt->ddt_object_stats[type][class], sizeof (ddt_object_t));
memset(&ddt->ddt_object_stats[type][class], 0, sizeof (ddt_object_t));
*objectp = 0;
}
@ -322,7 +322,7 @@ ddt_phys_fill(ddt_phys_t *ddp, const blkptr_t *bp)
void
ddt_phys_clear(ddt_phys_t *ddp)
{
bzero(ddp, sizeof (*ddp));
memset(ddp, 0, sizeof (*ddp));
}
void
@ -390,7 +390,7 @@ ddt_stat_generate(ddt_t *ddt, ddt_entry_t *dde, ddt_stat_t *dds)
uint64_t lsize = DDK_GET_LSIZE(ddk);
uint64_t psize = DDK_GET_PSIZE(ddk);
bzero(dds, sizeof (*dds));
memset(dds, 0, sizeof (*dds));
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
uint64_t dsize = 0;
@ -454,7 +454,7 @@ ddt_histogram_add(ddt_histogram_t *dst, const ddt_histogram_t *src)
void
ddt_histogram_stat(ddt_stat_t *dds, const ddt_histogram_t *ddh)
{
bzero(dds, sizeof (*dds));
memset(dds, 0, sizeof (*dds));
for (int h = 0; h < 64; h++)
ddt_stat_add(dds, &ddh->ddh_stat[h], 0);
@ -532,7 +532,7 @@ ddt_get_dedup_dspace(spa_t *spa)
if (spa->spa_dedup_dspace != ~0ULL)
return (spa->spa_dedup_dspace);
bzero(&dds_total, sizeof (ddt_stat_t));
memset(&dds_total, 0, sizeof (ddt_stat_t));
/* Calculate and cache the stats */
ddt_get_dedup_stats(spa, &dds_total);
@ -566,7 +566,7 @@ ddt_compress(void *src, uchar_t *dst, size_t s_len, size_t d_len)
if (c_len == s_len) {
cpfunc = ZIO_COMPRESS_OFF;
bcopy(src, dst, s_len);
memcpy(dst, src, s_len);
}
*version = cpfunc;
@ -586,7 +586,7 @@ ddt_decompress(uchar_t *src, void *dst, size_t s_len, size_t d_len)
if (ci->ci_decompress != NULL)
(void) ci->ci_decompress(src, dst, s_len, d_len, ci->ci_level);
else
bcopy(src, dst, d_len);
memcpy(dst, src, d_len);
if (((version & DDT_COMPRESS_BYTEORDER_MASK) != 0) !=
(ZFS_HOST_BYTEORDER != 0))
@ -633,7 +633,7 @@ ddt_alloc(const ddt_key_t *ddk)
ddt_entry_t *dde;
dde = kmem_cache_alloc(ddt_entry_cache, KM_SLEEP);
bzero(dde, sizeof (ddt_entry_t));
memset(dde, 0, sizeof (ddt_entry_t));
cv_init(&dde->dde_cv, NULL, CV_DEFAULT, NULL);
dde->dde_key = *ddk;
@ -785,7 +785,7 @@ ddt_table_alloc(spa_t *spa, enum zio_checksum c)
ddt_t *ddt;
ddt = kmem_cache_alloc(ddt_cache, KM_SLEEP);
bzero(ddt, sizeof (ddt_t));
memset(ddt, 0, sizeof (ddt_t));
mutex_init(&ddt->ddt_lock, NULL, MUTEX_DEFAULT, NULL);
avl_create(&ddt->ddt_tree, ddt_entry_compare,
@ -847,7 +847,7 @@ ddt_load(spa_t *spa)
/*
* Seed the cached histograms.
*/
bcopy(ddt->ddt_histogram, &ddt->ddt_histogram_cache,
memcpy(&ddt->ddt_histogram_cache, ddt->ddt_histogram,
sizeof (ddt->ddt_histogram));
spa->spa_dedup_dspace = ~0ULL;
}
@ -919,7 +919,7 @@ ddt_repair_start(ddt_t *ddt, const blkptr_t *bp)
}
}
bzero(dde->dde_phys, sizeof (dde->dde_phys));
memset(dde->dde_phys, 0, sizeof (dde->dde_phys));
return (dde);
}
@ -964,7 +964,7 @@ ddt_repair_entry(ddt_t *ddt, ddt_entry_t *dde, ddt_entry_t *rdde, zio_t *rio)
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++, rddp++) {
if (ddp->ddp_phys_birth == 0 ||
ddp->ddp_phys_birth != rddp->ddp_phys_birth ||
bcmp(ddp->ddp_dva, rddp->ddp_dva, sizeof (ddp->ddp_dva)))
memcmp(ddp->ddp_dva, rddp->ddp_dva, sizeof (ddp->ddp_dva)))
continue;
ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk);
zio_nowait(zio_rewrite(zio, zio->io_spa, 0, &blk,
@ -1108,7 +1108,7 @@ ddt_sync_table(ddt_t *ddt, dmu_tx_t *tx, uint64_t txg)
}
}
bcopy(ddt->ddt_histogram, &ddt->ddt_histogram_cache,
memcpy(&ddt->ddt_histogram_cache, ddt->ddt_histogram,
sizeof (ddt->ddt_histogram));
spa->spa_dedup_dspace = ~0ULL;
}

View File

@ -1012,7 +1012,7 @@ dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size,
if (dn->dn_maxblkid == 0) {
uint64_t newsz = offset > dn->dn_datablksz ? 0 :
MIN(size, dn->dn_datablksz - offset);
bzero((char *)buf + newsz, size - newsz);
memset((char *)buf + newsz, 0, size - newsz);
size = newsz;
}
@ -2077,9 +2077,9 @@ dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
zp->zp_nopwrite = nopwrite;
zp->zp_encrypt = encrypt;
zp->zp_byteorder = ZFS_HOST_BYTEORDER;
bzero(zp->zp_salt, ZIO_DATA_SALT_LEN);
bzero(zp->zp_iv, ZIO_DATA_IV_LEN);
bzero(zp->zp_mac, ZIO_DATA_MAC_LEN);
memset(zp->zp_salt, 0, ZIO_DATA_SALT_LEN);
memset(zp->zp_iv, 0, ZIO_DATA_IV_LEN);
memset(zp->zp_mac, 0, ZIO_DATA_MAC_LEN);
zp->zp_zpl_smallblk = DMU_OT_IS_FILE(zp->zp_type) ?
os->os_zpl_special_smallblock : 0;

View File

@ -516,8 +516,8 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
if (arc_buf_size(os->os_phys_buf) < size) {
arc_buf_t *buf = arc_alloc_buf(spa, &os->os_phys_buf,
ARC_BUFC_METADATA, size);
bzero(buf->b_data, size);
bcopy(os->os_phys_buf->b_data, buf->b_data,
memset(buf->b_data, 0, size);
memcpy(buf->b_data, os->os_phys_buf->b_data,
arc_buf_size(os->os_phys_buf));
arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
os->os_phys_buf = buf;
@ -531,7 +531,7 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
os->os_phys_buf = arc_alloc_buf(spa, &os->os_phys_buf,
ARC_BUFC_METADATA, size);
os->os_phys = os->os_phys_buf->b_data;
bzero(os->os_phys, size);
memset(os->os_phys, 0, size);
}
/*
* These properties will be filled in by the logic in zfs_get_zplprop()

View File

@ -1148,7 +1148,7 @@ dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin,
dmu_recv_begin_arg_t drba = { 0 };
int err;
bzero(drc, sizeof (dmu_recv_cookie_t));
memset(drc, 0, sizeof (dmu_recv_cookie_t));
drc->drc_drr_begin = drr_begin;
drc->drc_drrb = &drr_begin->drr_u.drr_begin;
drc->drc_tosnap = tosnap;
@ -1211,7 +1211,6 @@ dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin,
dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
&drba, 5, ZFS_SPACE_CHECK_NORMAL);
} else {
/*
* For non-raw, non-incremental, non-resuming receives the
* user can specify encryption parameters on the command line
@ -1808,7 +1807,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
dmu_buf_will_dirty(db, tx);
ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
bcopy(data, db->db_data, DRR_OBJECT_PAYLOAD_SIZE(drro));
memcpy(db->db_data, data, DRR_OBJECT_PAYLOAD_SIZE(drro));
/*
* Raw bonus buffers have their byteorder determined by the
@ -1949,11 +1948,11 @@ flush_write_batch_impl(struct receive_writer_arg *rwa)
zp.zp_byteorder = ZFS_HOST_BYTEORDER ^
!!DRR_IS_RAW_BYTESWAPPED(drrw->drr_flags) ^
rwa->byteswap;
bcopy(drrw->drr_salt, zp.zp_salt,
memcpy(zp.zp_salt, drrw->drr_salt,
ZIO_DATA_SALT_LEN);
bcopy(drrw->drr_iv, zp.zp_iv,
memcpy(zp.zp_iv, drrw->drr_iv,
ZIO_DATA_IV_LEN);
bcopy(drrw->drr_mac, zp.zp_mac,
memcpy(zp.zp_mac, drrw->drr_mac,
ZIO_DATA_MAC_LEN);
if (DMU_OT_IS_ENCRYPTED(zp.zp_type)) {
zp.zp_nopwrite = B_FALSE;
@ -2218,7 +2217,7 @@ receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
}
}
bcopy(abd_to_buf(abd), abuf->b_data, DRR_SPILL_PAYLOAD_SIZE(drrs));
memcpy(abuf->b_data, abd_to_buf(abd), DRR_SPILL_PAYLOAD_SIZE(drrs));
abd_free(abd);
dbuf_assign_arcbuf((dmu_buf_impl_t *)db_spill, abuf, tx);
@ -2291,9 +2290,9 @@ receive_object_range(struct receive_writer_arg *rwa,
rwa->or_crypt_params_present = B_TRUE;
rwa->or_firstobj = drror->drr_firstobj;
rwa->or_numslots = drror->drr_numslots;
bcopy(drror->drr_salt, rwa->or_salt, ZIO_DATA_SALT_LEN);
bcopy(drror->drr_iv, rwa->or_iv, ZIO_DATA_IV_LEN);
bcopy(drror->drr_mac, rwa->or_mac, ZIO_DATA_MAC_LEN);
memcpy(rwa->or_salt, drror->drr_salt, ZIO_DATA_SALT_LEN);
memcpy(rwa->or_iv, drror->drr_iv, ZIO_DATA_IV_LEN);
memcpy(rwa->or_mac, drror->drr_mac, ZIO_DATA_MAC_LEN);
rwa->or_byteorder = byteorder;
return (0);

View File

@ -379,7 +379,7 @@ dump_free(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
}
}
/* create a FREE record and make it pending */
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_FREE;
drrf->drr_object = object;
drrf->drr_offset = offset;
@ -438,7 +438,7 @@ dump_redact(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
}
}
/* create a REDACT record and make it pending */
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_REDACT;
drrr->drr_object = object;
drrr->drr_offset = offset;
@ -480,7 +480,7 @@ dmu_dump_write(dmu_send_cookie_t *dscp, dmu_object_type_t type, uint64_t object,
dscp->dsc_pending_op = PENDING_NONE;
}
/* write a WRITE record */
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_WRITE;
drrw->drr_object = object;
drrw->drr_type = type;
@ -571,7 +571,7 @@ dump_write_embedded(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
ASSERT(BP_IS_EMBEDDED(bp));
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_WRITE_EMBEDDED;
drrw->drr_object = object;
drrw->drr_offset = offset;
@ -604,7 +604,7 @@ dump_spill(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
}
/* write a SPILL record */
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_SPILL;
drrs->drr_object = object;
drrs->drr_length = blksz;
@ -686,7 +686,7 @@ dump_freeobjects(dmu_send_cookie_t *dscp, uint64_t firstobj, uint64_t numobjs)
}
/* write a FREEOBJECTS record */
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_FREEOBJECTS;
drrfo->drr_firstobj = firstobj;
drrfo->drr_numobjs = numobjs;
@ -727,7 +727,7 @@ dump_dnode(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
}
/* write an OBJECT record */
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_OBJECT;
drro->drr_object = object;
drro->drr_type = dnp->dn_type;
@ -801,7 +801,7 @@ dump_dnode(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
struct send_range record;
blkptr_t *bp = DN_SPILL_BLKPTR(dnp);
bzero(&record, sizeof (struct send_range));
memset(&record, 0, sizeof (struct send_range));
record.type = DATA;
record.object = object;
record.eos_marker = B_FALSE;
@ -841,7 +841,7 @@ dump_object_range(dmu_send_cookie_t *dscp, const blkptr_t *bp,
dscp->dsc_pending_op = PENDING_NONE;
}
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_OBJECT_RANGE;
drror->drr_firstobj = firstobj;
drror->drr_numslots = numslots;
@ -1136,7 +1136,7 @@ send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
record->sru.object.bp = *bp;
size_t size = sizeof (*dnp) * (dnp->dn_extra_slots + 1);
record->sru.object.dnp = kmem_alloc(size, KM_SLEEP);
bcopy(dnp, record->sru.object.dnp, size);
memcpy(record->sru.object.dnp, dnp, size);
bqueue_enqueue(&sta->q, record, sizeof (*record));
return (0);
}
@ -2597,7 +2597,7 @@ dmu_send_impl(struct dmu_send_params *dspp)
* the receive side that the stream is incomplete.
*/
if (!dspp->savedok) {
bzero(drr, sizeof (dmu_replay_record_t));
memset(drr, 0, sizeof (dmu_replay_record_t));
drr->drr_type = DRR_END;
drr->drr_u.drr_end.drr_checksum = dsc.dsc_zc;
drr->drr_u.drr_end.drr_toguid = dsc.dsc_toguid;
@ -2698,7 +2698,7 @@ dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
uint64_t size = dspp.numfromredactsnaps *
sizeof (uint64_t);
dspp.fromredactsnaps = kmem_zalloc(size, KM_SLEEP);
bcopy(fromredact, dspp.fromredactsnaps, size);
memcpy(dspp.fromredactsnaps, fromredact, size);
}
boolean_t is_before =
@ -2883,7 +2883,7 @@ dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
sizeof (uint64_t);
dspp.fromredactsnaps = kmem_zalloc(size,
KM_SLEEP);
bcopy(fromredact, dspp.fromredactsnaps,
memcpy(dspp.fromredactsnaps, fromredact,
size);
}
if (!dsl_dataset_is_before(dspp.to_ds, fromds,

View File

@ -168,8 +168,8 @@ resume_skip_check(traverse_data_t *td, const dnode_phys_t *dnp,
* If we found the block we're trying to resume from, zero
* the bookmark out to indicate that we have resumed.
*/
if (bcmp(zb, td->td_resume, sizeof (*zb)) == 0) {
bzero(td->td_resume, sizeof (*zb));
if (memcmp(zb, td->td_resume, sizeof (*zb)) == 0) {
memset(td->td_resume, 0, sizeof (*zb));
if (td->td_flags & TRAVERSE_POST)
return (RESUME_SKIP_CHILDREN);
}

View File

@ -128,15 +128,15 @@ dnode_cons(void *arg, void *unused, int kmflag)
zfs_refcount_create(&dn->dn_tx_holds);
list_link_init(&dn->dn_link);
bzero(&dn->dn_next_type[0], sizeof (dn->dn_next_type));
bzero(&dn->dn_next_nblkptr[0], sizeof (dn->dn_next_nblkptr));
bzero(&dn->dn_next_nlevels[0], sizeof (dn->dn_next_nlevels));
bzero(&dn->dn_next_indblkshift[0], sizeof (dn->dn_next_indblkshift));
bzero(&dn->dn_next_bonustype[0], sizeof (dn->dn_next_bonustype));
bzero(&dn->dn_rm_spillblk[0], sizeof (dn->dn_rm_spillblk));
bzero(&dn->dn_next_bonuslen[0], sizeof (dn->dn_next_bonuslen));
bzero(&dn->dn_next_blksz[0], sizeof (dn->dn_next_blksz));
bzero(&dn->dn_next_maxblkid[0], sizeof (dn->dn_next_maxblkid));
memset(dn->dn_next_type, 0, sizeof (dn->dn_next_type));
memset(dn->dn_next_nblkptr, 0, sizeof (dn->dn_next_nblkptr));
memset(dn->dn_next_nlevels, 0, sizeof (dn->dn_next_nlevels));
memset(dn->dn_next_indblkshift, 0, sizeof (dn->dn_next_indblkshift));
memset(dn->dn_next_bonustype, 0, sizeof (dn->dn_next_bonustype));
memset(dn->dn_rm_spillblk, 0, sizeof (dn->dn_rm_spillblk));
memset(dn->dn_next_bonuslen, 0, sizeof (dn->dn_next_bonuslen));
memset(dn->dn_next_blksz, 0, sizeof (dn->dn_next_blksz));
memset(dn->dn_next_maxblkid, 0, sizeof (dn->dn_next_maxblkid));
for (int i = 0; i < TXG_SIZE; i++) {
multilist_link_init(&dn->dn_dirty_link[i]);
@ -317,7 +317,7 @@ dnode_byteswap(dnode_phys_t *dnp)
int i;
if (dnp->dn_type == DMU_OT_NONE) {
bzero(dnp, sizeof (dnode_phys_t));
memset(dnp, 0, sizeof (dnode_phys_t));
return;
}
@ -395,7 +395,7 @@ dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
/* clear any data after the end of the new size */
size_t diff = dn->dn_bonuslen - newsize;
char *data_end = ((char *)dn->dn_bonus->db.db_data) + newsize;
bzero(data_end, diff);
memset(data_end, 0, diff);
}
dn->dn_bonuslen = newsize;
@ -596,7 +596,7 @@ dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
DNODE_STAT_BUMP(dnode_allocate);
ASSERT(dn->dn_type == DMU_OT_NONE);
ASSERT(bcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)) == 0);
ASSERT0(memcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)));
ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE);
ASSERT(ot != DMU_OT_NONE);
ASSERT(DMU_OT_IS_VALID(ot));
@ -749,8 +749,6 @@ dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
static void
dnode_move_impl(dnode_t *odn, dnode_t *ndn)
{
int i;
ASSERT(!RW_LOCK_HELD(&odn->dn_struct_rwlock));
ASSERT(MUTEX_NOT_HELD(&odn->dn_mtx));
ASSERT(MUTEX_NOT_HELD(&odn->dn_dbufs_mtx));
@ -774,29 +772,29 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
ndn->dn_datablksz = odn->dn_datablksz;
ndn->dn_maxblkid = odn->dn_maxblkid;
ndn->dn_num_slots = odn->dn_num_slots;
bcopy(&odn->dn_next_type[0], &ndn->dn_next_type[0],
memcpy(ndn->dn_next_type, odn->dn_next_type,
sizeof (odn->dn_next_type));
bcopy(&odn->dn_next_nblkptr[0], &ndn->dn_next_nblkptr[0],
memcpy(ndn->dn_next_nblkptr, odn->dn_next_nblkptr,
sizeof (odn->dn_next_nblkptr));
bcopy(&odn->dn_next_nlevels[0], &ndn->dn_next_nlevels[0],
memcpy(ndn->dn_next_nlevels, odn->dn_next_nlevels,
sizeof (odn->dn_next_nlevels));
bcopy(&odn->dn_next_indblkshift[0], &ndn->dn_next_indblkshift[0],
memcpy(ndn->dn_next_indblkshift, odn->dn_next_indblkshift,
sizeof (odn->dn_next_indblkshift));
bcopy(&odn->dn_next_bonustype[0], &ndn->dn_next_bonustype[0],
memcpy(ndn->dn_next_bonustype, odn->dn_next_bonustype,
sizeof (odn->dn_next_bonustype));
bcopy(&odn->dn_rm_spillblk[0], &ndn->dn_rm_spillblk[0],
memcpy(ndn->dn_rm_spillblk, odn->dn_rm_spillblk,
sizeof (odn->dn_rm_spillblk));
bcopy(&odn->dn_next_bonuslen[0], &ndn->dn_next_bonuslen[0],
memcpy(ndn->dn_next_bonuslen, odn->dn_next_bonuslen,
sizeof (odn->dn_next_bonuslen));
bcopy(&odn->dn_next_blksz[0], &ndn->dn_next_blksz[0],
memcpy(ndn->dn_next_blksz, odn->dn_next_blksz,
sizeof (odn->dn_next_blksz));
bcopy(&odn->dn_next_maxblkid[0], &ndn->dn_next_maxblkid[0],
memcpy(ndn->dn_next_maxblkid, odn->dn_next_maxblkid,
sizeof (odn->dn_next_maxblkid));
for (i = 0; i < TXG_SIZE; i++) {
for (int i = 0; i < TXG_SIZE; i++) {
list_move_tail(&ndn->dn_dirty_records[i],
&odn->dn_dirty_records[i]);
}
bcopy(&odn->dn_free_ranges[0], &ndn->dn_free_ranges[0],
memcpy(ndn->dn_free_ranges, odn->dn_free_ranges,
sizeof (odn->dn_free_ranges));
ndn->dn_allocated_txg = odn->dn_allocated_txg;
ndn->dn_free_txg = odn->dn_free_txg;
@ -850,7 +848,7 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
/*
* Satisfy the destructor.
*/
for (i = 0; i < TXG_SIZE; i++) {
for (int i = 0; i < TXG_SIZE; i++) {
list_create(&odn->dn_dirty_records[i],
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
@ -2081,7 +2079,7 @@ dnode_partial_zero(dnode_t *dn, uint64_t off, uint64_t blkoff, uint64_t len,
dmu_buf_will_dirty(&db->db, tx);
data = db->db.db_data;
bzero(data + blkoff, len);
memset(data + blkoff, 0, len);
}
dbuf_rele(db, FTAG);
}

View File

@ -82,7 +82,7 @@ dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
ASSERT(db->db.db_data);
ASSERT(arc_released(db->db_buf));
ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
memcpy(db->db.db_data, dn->dn_phys->dn_blkptr,
sizeof (blkptr_t) * nblkptr);
arc_buf_freeze(db->db_buf);
@ -119,7 +119,7 @@ dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
mutex_exit(&child->db_mtx);
}
bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr);
memset(dn->dn_phys->dn_blkptr, 0, sizeof (blkptr_t) * nblkptr);
rw_exit(&db->db_rwlock);
if (dn->dn_dbuf != NULL)
@ -158,7 +158,7 @@ free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
dmu_object_type_t type = BP_GET_TYPE(bp);
uint64_t lvl = BP_GET_LEVEL(bp);
bzero(bp, sizeof (blkptr_t));
memset(bp, 0, sizeof (blkptr_t));
if (spa_feature_is_active(dn->dn_objset->os_spa,
SPA_FEATURE_HOLE_BIRTH)) {
@ -347,7 +347,7 @@ free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks,
rw_enter(&db->db_rwlock, RW_WRITER);
for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++)
ASSERT(BP_IS_HOLE(bp));
bzero(db->db.db_data, db->db.db_size);
memset(db->db.db_data, 0, db->db.db_size);
free_blocks(dn, db->db_blkptr, 1, tx);
rw_exit(&db->db_rwlock);
}
@ -597,7 +597,7 @@ dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
ASSERT(dn->dn_free_txg > 0);
if (dn->dn_allocated_txg != dn->dn_free_txg)
dmu_buf_will_dirty(&dn->dn_dbuf->db, tx);
bzero(dn->dn_phys, sizeof (dnode_phys_t) * dn->dn_num_slots);
memset(dn->dn_phys, 0, sizeof (dnode_phys_t) * dn->dn_num_slots);
dnode_free_interior_slots(dn);
mutex_enter(&dn->dn_mtx);
@ -634,7 +634,7 @@ dnode_sync(dnode_t *dn, dmu_tx_t *tx)
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
ASSERT(dnp->dn_type != DMU_OT_NONE ||
bcmp(dnp, &zerodn, DNODE_MIN_SIZE) == 0);
memcmp(dnp, &zerodn, DNODE_MIN_SIZE) == 0);
DNODE_VERIFY(dn);
ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
@ -827,7 +827,7 @@ dnode_sync(dnode_t *dn, dmu_tx_t *tx)
ASSERT(dn->dn_allocated_txg == tx->tx_txg);
if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) {
/* zero the new blkptrs we are gaining */
bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
memset(dnp->dn_blkptr + dnp->dn_nblkptr, 0,
sizeof (blkptr_t) *
(dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr));
#ifdef ZFS_DEBUG

View File

@ -82,7 +82,7 @@ dsl_bookmark_lookup_impl(dsl_dataset_t *ds, const char *shortname,
* Zero out the bookmark in case the one stored on disk
* is in an older, shorter format.
*/
bzero(bmark_phys, sizeof (*bmark_phys));
memset(bmark_phys, 0, sizeof (*bmark_phys));
err = zap_lookup_norm(mos, bmark_zapobj, shortname, sizeof (uint64_t),
sizeof (*bmark_phys) / sizeof (uint64_t), bmark_phys, mt, NULL, 0,
@ -381,7 +381,7 @@ dsl_bookmark_set_phys(zfs_bookmark_phys_t *zbm, dsl_dataset_t *snap)
&zbm->zbm_uncompressed_freed_before_next_snap);
dsl_dataset_rele(nextds, FTAG);
} else {
bzero(&zbm->zbm_flags,
memset(&zbm->zbm_flags, 0,
sizeof (zfs_bookmark_phys_t) -
offsetof(zfs_bookmark_phys_t, zbm_flags));
}
@ -426,8 +426,8 @@ dsl_bookmark_node_add(dsl_dataset_t *hds, dsl_bookmark_node_t *dbn,
spa_feature_incr(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2, tx);
}
__attribute__((unused)) zfs_bookmark_phys_t zero_phys = { 0 };
ASSERT0(bcmp(((char *)&dbn->dbn_phys) + bookmark_phys_size,
zfs_bookmark_phys_t zero_phys = { 0 };
ASSERT0(memcmp(((char *)&dbn->dbn_phys) + bookmark_phys_size,
&zero_phys, sizeof (zfs_bookmark_phys_t) - bookmark_phys_size));
VERIFY0(zap_add(mos, hds->ds_bookmarks_obj, dbn->dbn_name,
@ -482,7 +482,7 @@ dsl_bookmark_create_sync_impl_snap(const char *bookmark, const char *snapshot,
sizeof (redaction_list_phys_t) + num_redact_snaps *
sizeof (uint64_t));
dmu_buf_will_dirty(local_rl->rl_dbuf, tx);
bcopy(redact_snaps, local_rl->rl_phys->rlp_snaps,
memcpy(local_rl->rl_phys->rlp_snaps, redact_snaps,
sizeof (uint64_t) * num_redact_snaps);
local_rl->rl_phys->rlp_num_snaps = num_redact_snaps;
if (bookmark_redacted) {

View File

@ -97,7 +97,7 @@ dsl_wrapping_key_free(dsl_wrapping_key_t *wkey)
ASSERT0(zfs_refcount_count(&wkey->wk_refcnt));
if (wkey->wk_key.ck_data) {
bzero(wkey->wk_key.ck_data,
memset(wkey->wk_key.ck_data, 0,
CRYPTO_BITS2BYTES(wkey->wk_key.ck_length));
kmem_free(wkey->wk_key.ck_data,
CRYPTO_BITS2BYTES(wkey->wk_key.ck_length));
@ -120,7 +120,7 @@ dsl_wrapping_key_create(uint8_t *wkeydata, zfs_keyformat_t keyformat,
wkey->wk_key.ck_data = kmem_alloc(WRAPPING_KEY_LEN, KM_SLEEP);
wkey->wk_key.ck_length = CRYPTO_BYTES2BITS(WRAPPING_KEY_LEN);
bcopy(wkeydata, wkey->wk_key.ck_data, WRAPPING_KEY_LEN);
memcpy(wkey->wk_key.ck_data, wkeydata, WRAPPING_KEY_LEN);
/* initialize the rest of the struct */
zfs_refcount_create(&wkey->wk_refcnt);
@ -591,7 +591,7 @@ dsl_crypto_key_open(objset_t *mos, dsl_wrapping_key_t *wkey,
error:
if (dck != NULL) {
bzero(dck, sizeof (dsl_crypto_key_t));
memset(dck, 0, sizeof (dsl_crypto_key_t));
kmem_free(dck, sizeof (dsl_crypto_key_t));
}
@ -2095,8 +2095,8 @@ dsl_crypto_recv_raw_objset_sync(dsl_dataset_t *ds, dmu_objset_type_t ostype,
* written out raw next time.
*/
arc_release(os->os_phys_buf, &os->os_phys_buf);
bcopy(portable_mac, os->os_phys->os_portable_mac, ZIO_OBJSET_MAC_LEN);
bzero(os->os_phys->os_local_mac, ZIO_OBJSET_MAC_LEN);
memcpy(os->os_phys->os_portable_mac, portable_mac, ZIO_OBJSET_MAC_LEN);
memset(os->os_phys->os_local_mac, 0, ZIO_OBJSET_MAC_LEN);
os->os_flags &= ~OBJSET_FLAG_USERACCOUNTING_COMPLETE;
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
@ -2547,7 +2547,7 @@ dsl_crypto_key_create_sync(uint64_t crypt, dsl_wrapping_key_t *wkey,
DSL_CRYPTO_KEY_VERSION, sizeof (uint64_t), 1, &version, tx));
zio_crypt_key_destroy(&dck.dck_key);
bzero(&dck.dck_key, sizeof (zio_crypt_key_t));
memset(&dck.dck_key, 0, sizeof (zio_crypt_key_t));
return (dck.dck_obj);
}
@ -2687,14 +2687,15 @@ spa_do_crypt_objset_mac_abd(boolean_t generate, spa_t *spa, uint64_t dsobj,
/* if we are generating encode the HMACs in the objset_phys_t */
if (generate) {
bcopy(portable_mac, osp->os_portable_mac, ZIO_OBJSET_MAC_LEN);
bcopy(local_mac, osp->os_local_mac, ZIO_OBJSET_MAC_LEN);
memcpy(osp->os_portable_mac, portable_mac, ZIO_OBJSET_MAC_LEN);
memcpy(osp->os_local_mac, local_mac, ZIO_OBJSET_MAC_LEN);
abd_return_buf_copy(abd, buf, datalen);
return (0);
}
if (bcmp(portable_mac, osp->os_portable_mac, ZIO_OBJSET_MAC_LEN) != 0 ||
bcmp(local_mac, osp->os_local_mac, ZIO_OBJSET_MAC_LEN) != 0) {
if (memcmp(portable_mac, osp->os_portable_mac,
ZIO_OBJSET_MAC_LEN) != 0 ||
memcmp(local_mac, osp->os_local_mac, ZIO_OBJSET_MAC_LEN) != 0) {
abd_return_buf(abd, buf, datalen);
return (SET_ERROR(ECKSUM));
}
@ -2738,11 +2739,11 @@ spa_do_crypt_mac_abd(boolean_t generate, spa_t *spa, uint64_t dsobj, abd_t *abd,
* Otherwise verify that the MAC matched what we expected.
*/
if (generate) {
bcopy(digestbuf, mac, ZIO_DATA_MAC_LEN);
memcpy(mac, digestbuf, ZIO_DATA_MAC_LEN);
return (0);
}
if (bcmp(digestbuf, mac, ZIO_DATA_MAC_LEN) != 0)
if (memcmp(digestbuf, mac, ZIO_DATA_MAC_LEN) != 0)
return (SET_ERROR(ECKSUM));
return (0);
@ -2841,9 +2842,9 @@ spa_do_crypt_abd(boolean_t encrypt, spa_t *spa, const zbookmark_phys_t *zb,
error:
if (encrypt) {
/* zero out any state we might have changed while encrypting */
bzero(salt, ZIO_DATA_SALT_LEN);
bzero(iv, ZIO_DATA_IV_LEN);
bzero(mac, ZIO_DATA_MAC_LEN);
memset(salt, 0, ZIO_DATA_SALT_LEN);
memset(iv, 0, ZIO_DATA_IV_LEN);
memset(mac, 0, ZIO_DATA_MAC_LEN);
abd_return_buf(pabd, plainbuf, datalen);
abd_return_buf_copy(cabd, cipherbuf, datalen);
} else {

View File

@ -1148,7 +1148,7 @@ dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
VERIFY0(dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
dmu_buf_will_dirty(dbuf, tx);
dsphys = dbuf->db_data;
bzero(dsphys, sizeof (dsl_dataset_phys_t));
memset(dsphys, 0, sizeof (dsl_dataset_phys_t));
dsphys->ds_dir_obj = dd->dd_object;
dsphys->ds_flags = flags;
dsphys->ds_fsid_guid = unique_create();
@ -1248,11 +1248,11 @@ dsl_dataset_zero_zil(dsl_dataset_t *ds, dmu_tx_t *tx)
objset_t *os;
VERIFY0(dmu_objset_from_ds(ds, &os));
if (bcmp(&os->os_zil_header, &zero_zil, sizeof (zero_zil)) != 0) {
if (memcmp(&os->os_zil_header, &zero_zil, sizeof (zero_zil)) != 0) {
dsl_pool_t *dp = ds->ds_dir->dd_pool;
zio_t *zio;
bzero(&os->os_zil_header, sizeof (os->os_zil_header));
memset(&os->os_zil_header, 0, sizeof (os->os_zil_header));
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
@ -1696,7 +1696,7 @@ dsl_dataset_snapshot_sync_impl(dsl_dataset_t *ds, const char *snapname,
*/
ASSERT(spa_version(dmu_tx_pool(tx)->dp_spa) >= SPA_VERSION_FAST_SNAP ||
dmu_objset_from_ds(ds, &os) != 0 ||
bcmp(&os->os_phys->os_zil_header, &zero_zil,
memcmp(&os->os_phys->os_zil_header, &zero_zil,
sizeof (zero_zil)) == 0);
/* Should not snapshot a dirty dataset. */
@ -1718,7 +1718,7 @@ dsl_dataset_snapshot_sync_impl(dsl_dataset_t *ds, const char *snapname,
VERIFY0(dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
dmu_buf_will_dirty(dbuf, tx);
dsphys = dbuf->db_data;
bzero(dsphys, sizeof (dsl_dataset_phys_t));
memset(dsphys, 0, sizeof (dsl_dataset_phys_t));
dsphys->ds_dir_obj = ds->ds_dir->dd_object;
dsphys->ds_fsid_guid = unique_create();
(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
@ -2895,7 +2895,7 @@ dsl_dataset_modified_since_snap(dsl_dataset_t *ds, dsl_dataset_t *snap)
return (B_TRUE);
if (dmu_objset_from_ds(snap, &os_snap) != 0)
return (B_TRUE);
return (bcmp(&os->os_phys->os_meta_dnode,
return (memcmp(&os->os_phys->os_meta_dnode,
&os_snap->os_phys->os_meta_dnode,
sizeof (os->os_phys->os_meta_dnode)) != 0);
}
@ -4916,7 +4916,7 @@ dsl_dataset_activate_redaction(dsl_dataset_t *ds, uint64_t *redact_snaps,
if (num_redact_snaps > 0) {
ftuaa->array = kmem_alloc(num_redact_snaps * sizeof (uint64_t),
KM_SLEEP);
bcopy(redact_snaps, ftuaa->array, num_redact_snaps *
memcpy(ftuaa->array, redact_snaps, num_redact_snaps *
sizeof (uint64_t));
}
dsl_dataset_activate_feature(dsobj, SPA_FEATURE_REDACTED_DATASETS,

View File

@ -855,7 +855,7 @@ dsl_deadlist_merge(dsl_deadlist_t *dl, uint64_t obj, dmu_tx_t *tx)
VERIFY0(dmu_bonus_hold(dl->dl_os, obj, FTAG, &bonus));
dlp = bonus->db_data;
dmu_buf_will_dirty(bonus, tx);
bzero(dlp, sizeof (*dlp));
memset(dlp, 0, sizeof (*dlp));
dmu_buf_rele(bonus, FTAG);
mutex_exit(&dl->dl_lock);
}

View File

@ -393,7 +393,7 @@ dsl_scan_resilvering(dsl_pool_t *dp)
static inline void
sio2bp(const scan_io_t *sio, blkptr_t *bp)
{
bzero(bp, sizeof (*bp));
memset(bp, 0, sizeof (*bp));
bp->blk_prop = sio->sio_blk_prop;
bp->blk_phys_birth = sio->sio_phys_birth;
bp->blk_birth = sio->sio_birth;
@ -403,7 +403,7 @@ sio2bp(const scan_io_t *sio, blkptr_t *bp)
ASSERT3U(sio->sio_nr_dvas, >, 0);
ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
bcopy(sio->sio_dva, bp->blk_dva, sio->sio_nr_dvas * sizeof (dva_t));
memcpy(bp->blk_dva, sio->sio_dva, sio->sio_nr_dvas * sizeof (dva_t));
}
static inline void
@ -508,7 +508,7 @@ dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
return (EOVERFLOW);
}
bcopy(zaptmp, &scn->scn_phys,
memcpy(&scn->scn_phys, zaptmp,
SCAN_PHYS_NUMINTS * sizeof (uint64_t));
scn->scn_phys.scn_flags = overflow;
@ -567,7 +567,7 @@ dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
}
}
bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys));
memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys));
/* reload the queue into the in-core state */
if (scn->scn_phys.scn_queue_obj != 0) {
@ -689,7 +689,7 @@ dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type)
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys, tx));
bcopy(&scn->scn_phys, &scn->scn_phys_cached,
memcpy(&scn->scn_phys_cached, &scn->scn_phys,
sizeof (scn->scn_phys));
if (scn->scn_checkpointing)
@ -730,7 +730,7 @@ dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
ASSERT(!dsl_scan_is_running(scn));
ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
bzero(&scn->scn_phys, sizeof (scn->scn_phys));
memset(&scn->scn_phys, 0, sizeof (scn->scn_phys));
scn->scn_phys.scn_func = *funcp;
scn->scn_phys.scn_state = DSS_SCANNING;
scn->scn_phys.scn_min_txg = 0;
@ -798,7 +798,8 @@ dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
mutex_init(&dp->dp_blkstats->zab_lock, NULL,
MUTEX_DEFAULT, NULL);
}
bzero(&dp->dp_blkstats->zab_type, sizeof (dp->dp_blkstats->zab_type));
memset(&dp->dp_blkstats->zab_type, 0,
sizeof (dp->dp_blkstats->zab_type));
if (spa_version(spa) < SPA_VERSION_DSL_SCRUB)
ot = DMU_OT_ZAP_OTHER;
@ -806,7 +807,7 @@ dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset,
ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx);
bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys));
memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys));
dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
@ -1792,14 +1793,15 @@ dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp,
* indicate that it's OK to start checking for suspending
* again.
*/
if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 ||
if (memcmp(zb, &scn->scn_phys.scn_bookmark,
sizeof (*zb)) == 0 ||
zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) {
dprintf("resuming at %llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb));
memset(&scn->scn_phys.scn_bookmark, 0, sizeof (*zb));
}
}
return (B_FALSE);
@ -2651,12 +2653,10 @@ static void
dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx)
{
ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark;
ddt_entry_t dde;
ddt_entry_t dde = {{{{0}}}};
int error;
uint64_t n = 0;
bzero(&dde, sizeof (ddt_entry_t));
while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) {
ddt_t *ddt;
@ -2749,7 +2749,7 @@ dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx)
* In case we suspended right at the end of the ds, zero the
* bookmark so we don't think that we're still trying to resume.
*/
bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t));
memset(&scn->scn_phys.scn_bookmark, 0, sizeof (zbookmark_phys_t));
/*
* Keep pulling things out of the dataset avl queue. Updates to the

View File

@ -53,10 +53,10 @@ abd_checksum_edonr_native(abd_t *abd, uint64_t size,
EdonRState ctx;
ASSERT(ctx_template != NULL);
bcopy(ctx_template, &ctx, sizeof (ctx));
memcpy(&ctx, ctx_template, sizeof (ctx));
(void) abd_iterate_func(abd, 0, size, edonr_incremental, &ctx);
EdonRFinal(&ctx, digest);
bcopy(digest, zcp->zc_word, sizeof (zcp->zc_word));
memcpy(zcp->zc_word, digest, sizeof (zcp->zc_word));
}
/*
@ -108,8 +108,8 @@ abd_checksum_edonr_tmpl_init(const zio_cksum_salt_t *salt)
void
abd_checksum_edonr_tmpl_free(void *ctx_template)
{
EdonRState *ctx = ctx_template;
EdonRState *ctx = ctx_template;
bzero(ctx, sizeof (*ctx));
memset(ctx, 0, sizeof (*ctx));
kmem_free(ctx, sizeof (*ctx));
}

View File

@ -66,7 +66,7 @@ gzip_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
if (d_len != s_len)
return (s_len);
bcopy(s_start, d_start, s_len);
memcpy(d_start, s_start, s_len);
return (s_len);
}
/* if hardware compression fails, do it again with software */
@ -76,7 +76,7 @@ gzip_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
if (d_len != s_len)
return (s_len);
bcopy(s_start, d_start, s_len);
memcpy(d_start, s_start, s_len);
return (s_len);
}

View File

@ -132,7 +132,7 @@ hkdf_sha512_expand(uint8_t *extract_key, uint8_t *info, uint_t info_len,
if (ret != CRYPTO_SUCCESS)
return (SET_ERROR(EIO));
bcopy(T, out_buf + pos,
memcpy(out_buf + pos, T,
(i != N) ? SHA512_DIGEST_LENGTH : (out_len - pos));
pos += SHA512_DIGEST_LENGTH;
}

View File

@ -1952,9 +1952,9 @@ metaslab_aux_histograms_clear(metaslab_t *msp)
*/
ASSERT(msp->ms_loaded);
bzero(msp->ms_synchist, sizeof (msp->ms_synchist));
memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
for (int t = 0; t < TXG_DEFER_SIZE; t++)
bzero(msp->ms_deferhist[t], sizeof (msp->ms_deferhist[t]));
memset(msp->ms_deferhist[t], 0, sizeof (msp->ms_deferhist[t]));
}
static void
@ -2044,13 +2044,13 @@ metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed)
*/
uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE;
if (defer_allowed) {
bcopy(msp->ms_synchist, msp->ms_deferhist[hist_index],
memcpy(msp->ms_deferhist[hist_index], msp->ms_synchist,
sizeof (msp->ms_synchist));
} else {
bzero(msp->ms_deferhist[hist_index],
memset(msp->ms_deferhist[hist_index], 0,
sizeof (msp->ms_deferhist[hist_index]));
}
bzero(msp->ms_synchist, sizeof (msp->ms_synchist));
memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
}
/*
@ -5296,7 +5296,7 @@ metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
goto top;
}
bzero(&dva[d], sizeof (dva_t));
memset(&dva[d], 0, sizeof (dva_t));
metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
return (SET_ERROR(ENOSPC));
@ -5809,7 +5809,7 @@ metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
metaslab_group_alloc_decrement(spa,
DVA_GET_VDEV(&dva[d]), zio, flags,
allocator, B_FALSE);
bzero(&dva[d], sizeof (dva_t));
memset(&dva[d], 0, sizeof (dva_t));
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (error);

View File

@ -78,7 +78,7 @@
static inline void
rs_copy(range_seg_t *src, range_seg_t *dest, range_tree_t *rt)
{
ASSERT3U(rt->rt_type, <=, RANGE_SEG_NUM_TYPES);
ASSERT3U(rt->rt_type, <, RANGE_SEG_NUM_TYPES);
size_t size = 0;
switch (rt->rt_type) {
case RANGE_SEG32:
@ -91,9 +91,9 @@ rs_copy(range_seg_t *src, range_seg_t *dest, range_tree_t *rt)
size = sizeof (range_seg_gap_t);
break;
default:
VERIFY(0);
__builtin_unreachable();
}
bcopy(src, dest, size);
memcpy(dest, src, size);
}
void
@ -701,7 +701,7 @@ range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg)
zfs_btree_clear(&rt->rt_root);
}
bzero(rt->rt_histogram, sizeof (rt->rt_histogram));
memset(rt->rt_histogram, 0, sizeof (rt->rt_histogram));
rt->rt_space = 0;
}

View File

@ -160,7 +160,7 @@ do { \
*(uint64_t *)((uintptr_t)t + 8) = \
*(uint64_t *)((uintptr_t)s + 8); \
} else { \
bcopy(s, t, l); \
memcpy(t, s, l); \
} \
} else { \
sa_copy_data(f, s, t, l); \
@ -414,7 +414,7 @@ sa_add_layout_entry(objset_t *os, const sa_attr_type_t *attrs, int attr_count,
tb->lot_attr_count = attr_count;
tb->lot_attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count,
KM_SLEEP);
bcopy(attrs, tb->lot_attrs, sizeof (sa_attr_type_t) * attr_count);
memcpy(tb->lot_attrs, attrs, sizeof (sa_attr_type_t) * attr_count);
tb->lot_num = lot_num;
tb->lot_hash = hash;
tb->lot_instance = 0;
@ -511,7 +511,7 @@ static void
sa_copy_data(sa_data_locator_t *func, void *datastart, void *target, int buflen)
{
if (func == NULL) {
bcopy(datastart, target, buflen);
memcpy(target, datastart, buflen);
} else {
boolean_t start;
int bytes;
@ -523,7 +523,7 @@ sa_copy_data(sa_data_locator_t *func, void *datastart, void *target, int buflen)
bytes = 0;
while (bytes < buflen) {
func(&dataptr, &length, buflen, start, datastart);
bcopy(dataptr, saptr, length);
memcpy(saptr, dataptr, length);
saptr = (void *)((caddr_t)saptr + length);
bytes += length;
start = B_FALSE;
@ -1664,8 +1664,9 @@ sa_add_projid(sa_handle_t *hdl, dmu_tx_t *tx, uint64_t projid)
&xattr, 8);
if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) {
bcopy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
scanstamp, AV_SCANSTAMP_SZ);
memcpy(scanstamp,
(caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
AV_SCANSTAMP_SZ);
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_SCANSTAMP(zfsvfs), NULL,
scanstamp, AV_SCANSTAMP_SZ);
zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
@ -1873,7 +1874,7 @@ sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
if (dn->dn_bonuslen != 0) {
bonus_data_size = hdl->sa_bonus->db_size;
old_data[0] = kmem_alloc(bonus_data_size, KM_SLEEP);
bcopy(hdl->sa_bonus->db_data, old_data[0],
memcpy(old_data[0], hdl->sa_bonus->db_data,
hdl->sa_bonus->db_size);
bonus_attr_count = hdl->sa_bonus_tab->sa_layout->lot_attr_count;
} else {
@ -1886,7 +1887,7 @@ sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
if ((error = sa_get_spill(hdl)) == 0) {
spill_data_size = hdl->sa_spill->db_size;
old_data[1] = vmem_alloc(spill_data_size, KM_SLEEP);
bcopy(hdl->sa_spill->db_data, old_data[1],
memcpy(old_data[1], hdl->sa_spill->db_data,
hdl->sa_spill->db_size);
spill_attr_count =
hdl->sa_spill_tab->sa_layout->lot_attr_count;

View File

@ -45,13 +45,13 @@ void
abd_checksum_skein_native(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
Skein_512_Ctxt_t ctx;
Skein_512_Ctxt_t ctx;
ASSERT(ctx_template != NULL);
bcopy(ctx_template, &ctx, sizeof (ctx));
memcpy(&ctx, ctx_template, sizeof (ctx));
(void) abd_iterate_func(abd, 0, size, skein_incremental, &ctx);
(void) Skein_512_Final(&ctx, (uint8_t *)zcp);
bzero(&ctx, sizeof (ctx));
memset(&ctx, 0, sizeof (ctx));
}
/*
@ -79,9 +79,8 @@ abd_checksum_skein_byteswap(abd_t *abd, uint64_t size,
void *
abd_checksum_skein_tmpl_init(const zio_cksum_salt_t *salt)
{
Skein_512_Ctxt_t *ctx;
Skein_512_Ctxt_t *ctx = kmem_zalloc(sizeof (*ctx), KM_SLEEP);
ctx = kmem_zalloc(sizeof (*ctx), KM_SLEEP);
(void) Skein_512_InitExt(ctx, sizeof (zio_cksum_t) * 8, 0,
salt->zcs_bytes, sizeof (salt->zcs_bytes));
return (ctx);
@ -94,8 +93,8 @@ abd_checksum_skein_tmpl_init(const zio_cksum_salt_t *salt)
void
abd_checksum_skein_tmpl_free(void *ctx_template)
{
Skein_512_Ctxt_t *ctx = ctx_template;
Skein_512_Ctxt_t *ctx = ctx_template;
bzero(ctx, sizeof (*ctx));
memset(ctx, 0, sizeof (*ctx));
kmem_free(ctx, sizeof (*ctx));
}

View File

@ -947,8 +947,8 @@ spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
{
ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
memcpy(last, &spa->spa_errlist_last, sizeof (avl_tree_t));
memcpy(scrub, &spa->spa_errlist_scrub, sizeof (avl_tree_t));
avl_create(&spa->spa_errlist_scrub,
spa_error_entry_compare, sizeof (spa_error_entry_t),
@ -8495,7 +8495,7 @@ spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
KM_SLEEP) == 0);
bzero(packed + nvsize, bufsize - nvsize);
memset(packed + nvsize, 0, bufsize - nvsize);
dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);

View File

@ -166,7 +166,7 @@ spa_checkpoint_get_stats(spa_t *spa, pool_checkpoint_stat_t *pcs)
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (SET_ERROR(ZFS_ERR_NO_CHECKPOINT));
bzero(pcs, sizeof (pool_checkpoint_stat_t));
memset(pcs, 0, sizeof (pool_checkpoint_stat_t));
int error = zap_contains(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT);

Some files were not shown because too many files have changed in this diff Show More