env: Rename spdk_malloc/zmalloc/realloc/free to spdk_dma_(func)
- rename spdk_malloc_socket to spdk_dma_malloc_socket - rename spdk_malloc to spdk_dma_malloc - rename spdk_zmalloc to spdk_dma_zmalloc - rename spdk_realloc to spdk_dma_realloc - rename spdk_free to spdk_dma_free Change-Id: I52a11b7a4243281f9c56f503e826fd7c4a1fd883 Signed-off-by: John Meneghini <johnm@netapp.com> Reviewed-on: https://review.gerrithub.io/362604 Reviewed-by: Jim Harris <james.r.harris@intel.com> Tested-by: SPDK Automated Test System <sys_sgsw@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
ad476cae97
commit
8a44220b1a
@ -125,7 +125,7 @@ ioat_exit(void)
|
||||
if (dev->ioat) {
|
||||
spdk_ioat_detach(dev->ioat);
|
||||
}
|
||||
spdk_free(dev);
|
||||
spdk_dma_free(dev);
|
||||
}
|
||||
}
|
||||
|
||||
@ -221,7 +221,7 @@ attach_cb(void *cb_ctx, struct spdk_pci_device *pci_dev, struct spdk_ioat_chan *
|
||||
return;
|
||||
}
|
||||
|
||||
dev = spdk_zmalloc(sizeof(*dev), 0, NULL);
|
||||
dev = spdk_dma_zmalloc(sizeof(*dev), 0, NULL);
|
||||
if (dev == NULL) {
|
||||
printf("Failed to allocate device struct\n");
|
||||
return;
|
||||
|
@ -359,7 +359,7 @@ init_src_buffer(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
g_src = spdk_zmalloc(SRC_BUFFER_SIZE, 512, NULL);
|
||||
g_src = spdk_dma_zmalloc(SRC_BUFFER_SIZE, 512, NULL);
|
||||
if (g_src == NULL) {
|
||||
fprintf(stderr, "Allocate src buffer failed\n");
|
||||
return -1;
|
||||
@ -476,7 +476,7 @@ main(int argc, char **argv)
|
||||
rc = dump_result(threads, RTE_MAX_LCORE);
|
||||
|
||||
cleanup:
|
||||
spdk_free(g_src);
|
||||
spdk_dma_free(g_src);
|
||||
ioat_exit();
|
||||
|
||||
return rc;
|
||||
|
@ -297,9 +297,9 @@ static void
|
||||
task_ctor(struct rte_mempool *mp, void *arg, void *__task, unsigned id)
|
||||
{
|
||||
struct arb_task *task = __task;
|
||||
task->buf = spdk_zmalloc(g_arbitration.io_size_bytes, 0x200, NULL);
|
||||
task->buf = spdk_dma_zmalloc(g_arbitration.io_size_bytes, 0x200, NULL);
|
||||
if (task->buf == NULL) {
|
||||
fprintf(stderr, "task->buf spdk_zmalloc failed\n");
|
||||
fprintf(stderr, "task->buf spdk_dma_zmalloc failed\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
@ -441,7 +441,7 @@ cleanup(void)
|
||||
};
|
||||
|
||||
if (rte_mempool_get(task_pool, (void **)&task) == 0) {
|
||||
spdk_free(task->buf);
|
||||
spdk_dma_free(task->buf);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -309,13 +309,13 @@ static int spdk_fio_close(struct thread_data *td, struct fio_file *f)
|
||||
|
||||
static int spdk_fio_iomem_alloc(struct thread_data *td, size_t total_mem)
|
||||
{
|
||||
td->orig_buffer = spdk_zmalloc(total_mem, NVME_IO_ALIGN, NULL);
|
||||
td->orig_buffer = spdk_dma_zmalloc(total_mem, NVME_IO_ALIGN, NULL);
|
||||
return td->orig_buffer == NULL;
|
||||
}
|
||||
|
||||
static void spdk_fio_iomem_free(struct thread_data *td)
|
||||
{
|
||||
spdk_free(td->orig_buffer);
|
||||
spdk_dma_free(td->orig_buffer);
|
||||
}
|
||||
|
||||
static int spdk_fio_io_u_init(struct thread_data *td, struct io_u *io_u)
|
||||
|
@ -108,7 +108,7 @@ read_complete(void *arg, const struct spdk_nvme_cpl *completion)
|
||||
* to exit its polling loop.
|
||||
*/
|
||||
printf("%s", sequence->buf);
|
||||
spdk_free(sequence->buf);
|
||||
spdk_dma_free(sequence->buf);
|
||||
sequence->is_completed = 1;
|
||||
}
|
||||
|
||||
@ -124,8 +124,8 @@ write_complete(void *arg, const struct spdk_nvme_cpl *completion)
|
||||
* the write I/O and allocate a new zeroed buffer for reading
|
||||
* the data back from the NVMe namespace.
|
||||
*/
|
||||
spdk_free(sequence->buf);
|
||||
sequence->buf = spdk_zmalloc(0x1000, 0x1000, NULL);
|
||||
spdk_dma_free(sequence->buf);
|
||||
sequence->buf = spdk_dma_zmalloc(0x1000, 0x1000, NULL);
|
||||
|
||||
rc = spdk_nvme_ns_cmd_read(ns_entry->ns, ns_entry->qpair, sequence->buf,
|
||||
0, /* LBA start */
|
||||
@ -165,11 +165,11 @@ hello_world(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Use spdk_zmalloc to allocate a 4KB zeroed buffer. This memory
|
||||
* Use spdk_dma_zmalloc to allocate a 4KB zeroed buffer. This memory
|
||||
* will be pinned, which is required for data buffers used for SPDK NVMe
|
||||
* I/O operations.
|
||||
*/
|
||||
sequence.buf = spdk_zmalloc(0x1000, 0x1000, NULL);
|
||||
sequence.buf = spdk_dma_zmalloc(0x1000, 0x1000, NULL);
|
||||
sequence.is_completed = 0;
|
||||
sequence.ns_entry = ns_entry;
|
||||
|
||||
|
@ -147,7 +147,7 @@ unregister_dev(struct dev_ctx *dev)
|
||||
static void task_ctor(struct rte_mempool *mp, void *arg, void *__task, unsigned id)
|
||||
{
|
||||
struct perf_task *task = __task;
|
||||
task->buf = spdk_zmalloc(g_io_size_bytes, 0x200, NULL);
|
||||
task->buf = spdk_dma_zmalloc(g_io_size_bytes, 0x200, NULL);
|
||||
if (task->buf == NULL) {
|
||||
fprintf(stderr, "task->buf rte_malloc failed\n");
|
||||
exit(1);
|
||||
|
@ -80,7 +80,7 @@ identify_common_ns_cb(void *cb_arg, const struct spdk_nvme_cpl *cpl)
|
||||
|
||||
if (cpl->status.sc != SPDK_NVME_SC_SUCCESS) {
|
||||
/* Identify Namespace for NSID = FFFFFFFFh is optional, so failure is not fatal. */
|
||||
spdk_free(dev->common_ns_data);
|
||||
spdk_dma_free(dev->common_ns_data);
|
||||
dev->common_ns_data = NULL;
|
||||
}
|
||||
|
||||
@ -102,7 +102,7 @@ attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
|
||||
/* Retrieve controller data */
|
||||
dev->cdata = spdk_nvme_ctrlr_get_data(dev->ctrlr);
|
||||
|
||||
dev->common_ns_data = spdk_zmalloc(sizeof(struct spdk_nvme_ns_data), 4096, NULL);
|
||||
dev->common_ns_data = spdk_dma_zmalloc(sizeof(struct spdk_nvme_ns_data), 4096, NULL);
|
||||
if (dev->common_ns_data == NULL) {
|
||||
fprintf(stderr, "common_ns_data allocation failure\n");
|
||||
return;
|
||||
@ -118,7 +118,7 @@ attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
|
||||
if (spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, dev->common_ns_data,
|
||||
sizeof(struct spdk_nvme_ns_data), identify_common_ns_cb, dev) != 0) {
|
||||
dev->outstanding_admin_cmds--;
|
||||
spdk_free(dev->common_ns_data);
|
||||
spdk_dma_free(dev->common_ns_data);
|
||||
dev->common_ns_data = NULL;
|
||||
}
|
||||
|
||||
@ -357,7 +357,7 @@ get_allocated_nsid(struct dev *dev)
|
||||
struct spdk_nvme_ns_list *ns_list;
|
||||
struct spdk_nvme_cmd cmd = {0};
|
||||
|
||||
ns_list = spdk_zmalloc(sizeof(*ns_list), 4096, NULL);
|
||||
ns_list = spdk_dma_zmalloc(sizeof(*ns_list), 4096, NULL);
|
||||
if (ns_list == NULL) {
|
||||
printf("Allocation error\n");
|
||||
return 0;
|
||||
@ -371,7 +371,7 @@ get_allocated_nsid(struct dev *dev)
|
||||
if (spdk_nvme_ctrlr_cmd_admin_raw(dev->ctrlr, &cmd, ns_list, sizeof(*ns_list),
|
||||
identify_allocated_ns_cb, dev)) {
|
||||
printf("Identify command failed\n");
|
||||
spdk_free(ns_list);
|
||||
spdk_dma_free(ns_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -387,7 +387,7 @@ get_allocated_nsid(struct dev *dev)
|
||||
printf("%u\n", ns_list->ns_list[i]);
|
||||
}
|
||||
|
||||
spdk_free(ns_list);
|
||||
spdk_dma_free(ns_list);
|
||||
|
||||
printf("Please Input Namespace ID: \n");
|
||||
if (!scanf("%u", &nsid)) {
|
||||
@ -404,8 +404,8 @@ ns_attach(struct dev *device, int attachment_op, int ctrlr_id, int ns_id)
|
||||
int ret = 0;
|
||||
struct spdk_nvme_ctrlr_list *ctrlr_list;
|
||||
|
||||
ctrlr_list = spdk_zmalloc(sizeof(struct spdk_nvme_ctrlr_list),
|
||||
4096, NULL);
|
||||
ctrlr_list = spdk_dma_zmalloc(sizeof(struct spdk_nvme_ctrlr_list),
|
||||
4096, NULL);
|
||||
if (ctrlr_list == NULL) {
|
||||
printf("Allocation error (controller list)\n");
|
||||
exit(1);
|
||||
@ -424,7 +424,7 @@ ns_attach(struct dev *device, int attachment_op, int ctrlr_id, int ns_id)
|
||||
fprintf(stdout, "ns attach: Failed\n");
|
||||
}
|
||||
|
||||
spdk_free(ctrlr_list);
|
||||
spdk_dma_free(ctrlr_list);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -434,7 +434,7 @@ ns_manage_add(struct dev *device, uint64_t ns_size, uint64_t ns_capacity, int ns
|
||||
uint32_t nsid;
|
||||
struct spdk_nvme_ns_data *ndata;
|
||||
|
||||
ndata = spdk_zmalloc(sizeof(struct spdk_nvme_ns_data), 4096, NULL);
|
||||
ndata = spdk_dma_zmalloc(sizeof(struct spdk_nvme_ns_data), 4096, NULL);
|
||||
if (ndata == NULL) {
|
||||
printf("Allocation error (namespace data)\n");
|
||||
exit(1);
|
||||
@ -455,7 +455,7 @@ ns_manage_add(struct dev *device, uint64_t ns_size, uint64_t ns_capacity, int ns
|
||||
printf("Created namespace ID %u\n", nsid);
|
||||
}
|
||||
|
||||
spdk_free(ndata);
|
||||
spdk_dma_free(ndata);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -795,7 +795,7 @@ update_firmware_image(void)
|
||||
|
||||
size = fw_stat.st_size;
|
||||
|
||||
fw_image = spdk_zmalloc(size, 4096, NULL);
|
||||
fw_image = spdk_dma_zmalloc(size, 4096, NULL);
|
||||
if (fw_image == NULL) {
|
||||
printf("Allocation error\n");
|
||||
close(fd);
|
||||
@ -805,7 +805,7 @@ update_firmware_image(void)
|
||||
if (read(fd, fw_image, size) != ((ssize_t)(size))) {
|
||||
printf("Read firmware image failed\n");
|
||||
close(fd);
|
||||
spdk_free(fw_image);
|
||||
spdk_dma_free(fw_image);
|
||||
return;
|
||||
}
|
||||
close(fd);
|
||||
@ -813,7 +813,7 @@ update_firmware_image(void)
|
||||
printf("Please Input Slot(0 - 7): \n");
|
||||
if (!scanf("%d", &slot)) {
|
||||
printf("Invalid Slot\n");
|
||||
spdk_free(fw_image);
|
||||
spdk_dma_free(fw_image);
|
||||
while (getchar() != '\n');
|
||||
return;
|
||||
}
|
||||
@ -824,7 +824,7 @@ update_firmware_image(void)
|
||||
} else {
|
||||
printf("spdk_nvme_ctrlr_update_firmware success\n");
|
||||
}
|
||||
spdk_free(fw_image);
|
||||
spdk_dma_free(fw_image);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
|
@ -375,8 +375,8 @@ register_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
entry->latency_page = spdk_zmalloc(sizeof(struct spdk_nvme_intel_rw_latency_page),
|
||||
4096, NULL);
|
||||
entry->latency_page = spdk_dma_zmalloc(sizeof(struct spdk_nvme_intel_rw_latency_page),
|
||||
4096, NULL);
|
||||
if (entry->latency_page == NULL) {
|
||||
printf("Allocation error (latency page)\n");
|
||||
exit(1);
|
||||
@ -516,9 +516,9 @@ aio_check_io(struct ns_worker_ctx *ns_ctx)
|
||||
static void task_ctor(struct rte_mempool *mp, void *arg, void *__task, unsigned id)
|
||||
{
|
||||
struct perf_task *task = __task;
|
||||
task->buf = spdk_zmalloc(g_io_size_bytes, g_io_align, NULL);
|
||||
task->buf = spdk_dma_zmalloc(g_io_size_bytes, g_io_align, NULL);
|
||||
if (task->buf == NULL) {
|
||||
fprintf(stderr, "task->buf spdk_zmalloc failed\n");
|
||||
fprintf(stderr, "task->buf spdk_dma_zmalloc failed\n");
|
||||
exit(1);
|
||||
}
|
||||
memset(task->buf, id % 8, g_io_size_bytes);
|
||||
@ -1314,7 +1314,7 @@ unregister_controllers(void)
|
||||
|
||||
while (entry) {
|
||||
struct ctrlr_entry *next = entry->next;
|
||||
spdk_free(entry->latency_page);
|
||||
spdk_dma_free(entry->latency_page);
|
||||
if (g_latency_ssd_tracking_enable &&
|
||||
spdk_nvme_ctrlr_is_feature_supported(entry->ctrlr, SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING))
|
||||
set_latency_tracking_feature(entry->ctrlr, false);
|
||||
|
@ -76,37 +76,37 @@ void spdk_env_init(const struct spdk_env_opts *opts);
|
||||
* Allocate a pinned, physically contiguous memory buffer with the
|
||||
* given size and alignment.
|
||||
*/
|
||||
void *spdk_malloc(size_t size, size_t align, uint64_t *phys_addr);
|
||||
void *spdk_dma_malloc(size_t size, size_t align, uint64_t *phys_addr);
|
||||
|
||||
/**
|
||||
* Allocate a pinned, physically contiguous memory buffer with the
|
||||
* given size, alignment and socket id.
|
||||
*/
|
||||
void *spdk_malloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id);
|
||||
void *spdk_dma_malloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id);
|
||||
|
||||
/**
|
||||
* Allocate a pinned, physically contiguous memory buffer with the
|
||||
* given size and alignment. The buffer will be zeroed.
|
||||
*/
|
||||
void *spdk_zmalloc(size_t size, size_t align, uint64_t *phys_addr);
|
||||
void *spdk_dma_zmalloc(size_t size, size_t align, uint64_t *phys_addr);
|
||||
|
||||
/**
|
||||
* Allocate a pinned, physically contiguous memory buffer with the
|
||||
* given size, alignment and socket id. The buffer will be zeroed.
|
||||
*/
|
||||
void *spdk_zmalloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id);
|
||||
void *spdk_dma_zmalloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id);
|
||||
|
||||
/**
|
||||
* Resize the allocated and pinned memory buffer with the given
|
||||
* new size and alignment. Existing contents are preserved.
|
||||
*/
|
||||
void *spdk_realloc(void *buf, size_t size, size_t align, uint64_t *phys_addr);
|
||||
void *spdk_dma_realloc(void *buf, size_t size, size_t align, uint64_t *phys_addr);
|
||||
|
||||
/**
|
||||
* Free a memory buffer previously allocated with spdk_zmalloc.
|
||||
* Free a memory buffer previously allocated with spdk_dma_zmalloc.
|
||||
* This call is never made from the performance path.
|
||||
*/
|
||||
void spdk_free(void *buf);
|
||||
void spdk_dma_free(void *buf);
|
||||
|
||||
/**
|
||||
* Reserve a named, process shared memory zone with the given size,
|
||||
|
@ -129,8 +129,8 @@ blockdev_malloc_destruct(void *ctx)
|
||||
{
|
||||
struct malloc_disk *malloc_disk = ctx;
|
||||
blockdev_malloc_delete_from_list(malloc_disk);
|
||||
spdk_free(malloc_disk->malloc_buf);
|
||||
spdk_free(malloc_disk);
|
||||
spdk_dma_free(malloc_disk->malloc_buf);
|
||||
spdk_dma_free(malloc_disk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -376,7 +376,7 @@ struct spdk_bdev *create_malloc_disk(uint64_t num_blocks, uint32_t block_size)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
mdisk = spdk_zmalloc(sizeof(*mdisk), 0, NULL);
|
||||
mdisk = spdk_dma_zmalloc(sizeof(*mdisk), 0, NULL);
|
||||
if (!mdisk) {
|
||||
perror("mdisk");
|
||||
return NULL;
|
||||
@ -388,10 +388,10 @@ struct spdk_bdev *create_malloc_disk(uint64_t num_blocks, uint32_t block_size)
|
||||
* TODO: need to pass a hint so we know which socket to allocate
|
||||
* from on multi-socket systems.
|
||||
*/
|
||||
mdisk->malloc_buf = spdk_zmalloc(num_blocks * block_size, 2 * 1024 * 1024, NULL);
|
||||
mdisk->malloc_buf = spdk_dma_zmalloc(num_blocks * block_size, 2 * 1024 * 1024, NULL);
|
||||
if (!mdisk->malloc_buf) {
|
||||
SPDK_ERRLOG("spdk_zmalloc failed\n");
|
||||
spdk_free(mdisk);
|
||||
SPDK_ERRLOG("spdk_dma_zmalloc failed\n");
|
||||
spdk_dma_free(mdisk);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -417,8 +417,8 @@ struct spdk_bdev *create_malloc_disk(uint64_t num_blocks, uint32_t block_size)
|
||||
|
||||
static void free_malloc_disk(struct malloc_disk *mdisk)
|
||||
{
|
||||
spdk_free(mdisk->malloc_buf);
|
||||
spdk_free(mdisk);
|
||||
spdk_dma_free(mdisk->malloc_buf);
|
||||
spdk_dma_free(mdisk);
|
||||
}
|
||||
|
||||
static int blockdev_malloc_initialize(void)
|
||||
|
@ -63,7 +63,7 @@ blockdev_null_destruct(void *ctx)
|
||||
struct null_bdev *bdev = ctx;
|
||||
|
||||
TAILQ_REMOVE(&g_null_bdev_head, bdev, tailq);
|
||||
spdk_free(bdev);
|
||||
spdk_dma_free(bdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -135,7 +135,7 @@ create_null_bdev(const char *name, uint64_t num_blocks, uint32_t block_size)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bdev = spdk_zmalloc(sizeof(*bdev), 0, NULL);
|
||||
bdev = spdk_dma_zmalloc(sizeof(*bdev), 0, NULL);
|
||||
if (!bdev) {
|
||||
SPDK_ERRLOG("could not allocate null_bdev\n");
|
||||
return NULL;
|
||||
@ -186,7 +186,7 @@ blockdev_null_initialize(void)
|
||||
* Instead of using a real rbuf from the bdev pool, just always point to
|
||||
* this same zeroed buffer.
|
||||
*/
|
||||
g_null_read_buf = spdk_zmalloc(SPDK_BDEV_LARGE_BUF_MAX_SIZE, 0, NULL);
|
||||
g_null_read_buf = spdk_dma_zmalloc(SPDK_BDEV_LARGE_BUF_MAX_SIZE, 0, NULL);
|
||||
|
||||
/*
|
||||
* We need to pick some unique address as our "io device" - so just use the
|
||||
@ -256,7 +256,7 @@ blockdev_null_finish(void)
|
||||
|
||||
TAILQ_FOREACH_SAFE(bdev, &g_null_bdev_head, tailq, tmp) {
|
||||
TAILQ_REMOVE(&g_null_bdev_head, bdev, tailq);
|
||||
spdk_free(bdev);
|
||||
spdk_dma_free(bdev);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -296,16 +296,16 @@ _spdk_blob_serialize_add_page(const struct spdk_blob *blob,
|
||||
if (*page_count == 0) {
|
||||
assert(*pages == NULL);
|
||||
*page_count = 1;
|
||||
*pages = spdk_malloc(sizeof(struct spdk_blob_md_page),
|
||||
sizeof(struct spdk_blob_md_page),
|
||||
NULL);
|
||||
*pages = spdk_dma_malloc(sizeof(struct spdk_blob_md_page),
|
||||
sizeof(struct spdk_blob_md_page),
|
||||
NULL);
|
||||
} else {
|
||||
assert(*pages != NULL);
|
||||
(*page_count)++;
|
||||
*pages = spdk_realloc(*pages,
|
||||
sizeof(struct spdk_blob_md_page) * (*page_count),
|
||||
sizeof(struct spdk_blob_md_page),
|
||||
NULL);
|
||||
*pages = spdk_dma_realloc(*pages,
|
||||
sizeof(struct spdk_blob_md_page) * (*page_count),
|
||||
sizeof(struct spdk_blob_md_page),
|
||||
NULL);
|
||||
}
|
||||
|
||||
if (*pages == NULL) {
|
||||
@ -456,7 +456,7 @@ _spdk_blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pa
|
||||
rc = _spdk_blob_serialize_add_page(blob, pages, page_count,
|
||||
&cur_page);
|
||||
if (rc < 0) {
|
||||
spdk_free(*pages);
|
||||
spdk_dma_free(*pages);
|
||||
*pages = NULL;
|
||||
*page_count = 0;
|
||||
return rc;
|
||||
@ -472,7 +472,7 @@ _spdk_blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pa
|
||||
&required_sz);
|
||||
|
||||
if (rc < 0) {
|
||||
spdk_free(*pages);
|
||||
spdk_dma_free(*pages);
|
||||
*pages = NULL;
|
||||
*page_count = 0;
|
||||
return -1;
|
||||
@ -535,8 +535,8 @@ _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
|
||||
|
||||
/* Read the next page */
|
||||
ctx->num_pages++;
|
||||
ctx->pages = spdk_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages),
|
||||
sizeof(*page), NULL);
|
||||
ctx->pages = spdk_dma_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages),
|
||||
sizeof(*page), NULL);
|
||||
if (ctx->pages == NULL) {
|
||||
ctx->cb_fn(seq, ctx->cb_arg, -ENOMEM);
|
||||
free(ctx);
|
||||
@ -558,7 +558,7 @@ _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
|
||||
ctx->cb_fn(seq, ctx->cb_arg, rc);
|
||||
|
||||
/* Free the memory */
|
||||
spdk_free(ctx->pages);
|
||||
spdk_dma_free(ctx->pages);
|
||||
free(ctx);
|
||||
}
|
||||
|
||||
@ -585,8 +585,8 @@ _spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
|
||||
}
|
||||
|
||||
ctx->blob = blob;
|
||||
ctx->pages = spdk_realloc(ctx->pages, sizeof(struct spdk_blob_md_page),
|
||||
sizeof(struct spdk_blob_md_page), NULL);
|
||||
ctx->pages = spdk_dma_realloc(ctx->pages, sizeof(struct spdk_blob_md_page),
|
||||
sizeof(struct spdk_blob_md_page), NULL);
|
||||
if (!ctx->pages) {
|
||||
free(ctx);
|
||||
cb_fn(seq, cb_arg, -ENOMEM);
|
||||
@ -631,7 +631,7 @@ _spdk_blob_persist_complete(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
|
||||
ctx->cb_fn(seq, ctx->cb_arg, bserrno);
|
||||
|
||||
/* Free the memory */
|
||||
spdk_free(ctx->pages);
|
||||
spdk_dma_free(ctx->pages);
|
||||
free(ctx);
|
||||
}
|
||||
|
||||
@ -984,7 +984,7 @@ _spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
|
||||
for (i = 1; i < blob->active.num_pages; i++) {
|
||||
page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
|
||||
if (page_num >= spdk_bit_array_capacity(bs->used_md_pages)) {
|
||||
spdk_free(ctx->pages);
|
||||
spdk_dma_free(ctx->pages);
|
||||
free(ctx);
|
||||
blob->state = SPDK_BLOB_STATE_DIRTY;
|
||||
cb_fn(seq, cb_arg, -ENOMEM);
|
||||
@ -1234,8 +1234,8 @@ _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserr
|
||||
|
||||
rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
|
||||
if (rc < 0) {
|
||||
spdk_free(ctx->super);
|
||||
spdk_free(ctx->mask);
|
||||
spdk_dma_free(ctx->super);
|
||||
spdk_dma_free(ctx->mask);
|
||||
_spdk_bs_free(ctx->bs);
|
||||
free(ctx);
|
||||
spdk_bs_sequence_finish(seq, -ENOMEM);
|
||||
@ -1255,8 +1255,8 @@ _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserr
|
||||
}
|
||||
}
|
||||
|
||||
spdk_free(ctx->super);
|
||||
spdk_free(ctx->mask);
|
||||
spdk_dma_free(ctx->super);
|
||||
spdk_dma_free(ctx->mask);
|
||||
free(ctx);
|
||||
|
||||
spdk_bs_sequence_finish(seq, bserrno);
|
||||
@ -1266,7 +1266,7 @@ static void
|
||||
_spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
|
||||
{
|
||||
struct spdk_bs_load_ctx *ctx = cb_arg;
|
||||
uint64_t lba, lba_count;
|
||||
uint64_t lba, lba_count, mask_size;
|
||||
uint32_t i, j;
|
||||
int rc;
|
||||
|
||||
@ -1280,8 +1280,8 @@ _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
|
||||
|
||||
rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length);
|
||||
if (rc < 0) {
|
||||
spdk_free(ctx->super);
|
||||
spdk_free(ctx->mask);
|
||||
spdk_dma_free(ctx->super);
|
||||
spdk_dma_free(ctx->mask);
|
||||
_spdk_bs_free(ctx->bs);
|
||||
free(ctx);
|
||||
spdk_bs_sequence_finish(seq, -ENOMEM);
|
||||
@ -1297,13 +1297,13 @@ _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
|
||||
segment >>= 1U;
|
||||
}
|
||||
}
|
||||
spdk_free(ctx->mask);
|
||||
spdk_dma_free(ctx->mask);
|
||||
|
||||
/* Read the used clusters mask */
|
||||
ctx->mask = spdk_zmalloc(ctx->super->used_cluster_mask_len * sizeof(struct spdk_blob_md_page),
|
||||
0x1000, NULL);
|
||||
mask_size = ctx->super->used_cluster_mask_len * sizeof(struct spdk_blob_md_page);
|
||||
ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
|
||||
if (!ctx->mask) {
|
||||
spdk_free(ctx->super);
|
||||
spdk_dma_free(ctx->super);
|
||||
_spdk_bs_free(ctx->bs);
|
||||
free(ctx);
|
||||
spdk_bs_sequence_finish(seq, -ENOMEM);
|
||||
@ -1319,10 +1319,10 @@ static void
|
||||
_spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
|
||||
{
|
||||
struct spdk_bs_load_ctx *ctx = cb_arg;
|
||||
uint64_t lba, lba_count;
|
||||
uint64_t lba, lba_count, mask_size;
|
||||
|
||||
if (ctx->super->version != SPDK_BS_VERSION) {
|
||||
spdk_free(ctx->super);
|
||||
spdk_dma_free(ctx->super);
|
||||
_spdk_bs_free(ctx->bs);
|
||||
free(ctx);
|
||||
spdk_bs_sequence_finish(seq, -EILSEQ);
|
||||
@ -1331,7 +1331,7 @@ _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
|
||||
|
||||
if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
|
||||
sizeof(ctx->super->signature)) != 0) {
|
||||
spdk_free(ctx->super);
|
||||
spdk_dma_free(ctx->super);
|
||||
_spdk_bs_free(ctx->bs);
|
||||
free(ctx);
|
||||
spdk_bs_sequence_finish(seq, -EILSEQ);
|
||||
@ -1344,7 +1344,7 @@ _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
|
||||
* on disk - the code just has not been written yet.
|
||||
*/
|
||||
assert(false);
|
||||
spdk_free(ctx->super);
|
||||
spdk_dma_free(ctx->super);
|
||||
_spdk_bs_free(ctx->bs);
|
||||
free(ctx);
|
||||
spdk_bs_sequence_finish(seq, -EILSEQ);
|
||||
@ -1360,10 +1360,10 @@ _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
|
||||
ctx->bs->md_len = ctx->super->md_len;
|
||||
|
||||
/* Read the used pages mask */
|
||||
ctx->mask = spdk_zmalloc(ctx->super->used_page_mask_len * sizeof(struct spdk_blob_md_page), 0x1000,
|
||||
NULL);
|
||||
mask_size = ctx->super->used_page_mask_len * sizeof(struct spdk_blob_md_page);
|
||||
ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
|
||||
if (!ctx->mask) {
|
||||
spdk_free(ctx->super);
|
||||
spdk_dma_free(ctx->super);
|
||||
_spdk_bs_free(ctx->bs);
|
||||
free(ctx);
|
||||
spdk_bs_sequence_finish(seq, -ENOMEM);
|
||||
@ -1405,7 +1405,7 @@ spdk_bs_load(struct spdk_bs_dev *dev,
|
||||
ctx->bs = bs;
|
||||
|
||||
/* Allocate memory for the super block */
|
||||
ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
|
||||
ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
|
||||
if (!ctx->super) {
|
||||
free(ctx);
|
||||
_spdk_bs_free(bs);
|
||||
@ -1419,7 +1419,7 @@ spdk_bs_load(struct spdk_bs_dev *dev,
|
||||
|
||||
seq = spdk_bs_sequence_start(bs->md_target.md_channel, &cpl);
|
||||
if (!seq) {
|
||||
spdk_free(ctx->super);
|
||||
spdk_dma_free(ctx->super);
|
||||
free(ctx);
|
||||
_spdk_bs_free(bs);
|
||||
cb_fn(cb_arg, NULL, -ENOMEM);
|
||||
@ -1446,7 +1446,7 @@ _spdk_bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserr
|
||||
{
|
||||
struct spdk_bs_init_ctx *ctx = cb_arg;
|
||||
|
||||
spdk_free(ctx->super);
|
||||
spdk_dma_free(ctx->super);
|
||||
free(ctx);
|
||||
|
||||
spdk_bs_sequence_finish(seq, bserrno);
|
||||
@ -1519,7 +1519,7 @@ spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
|
||||
ctx->bs = bs;
|
||||
|
||||
/* Allocate memory for the super block */
|
||||
ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
|
||||
ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
|
||||
if (!ctx->super) {
|
||||
free(ctx);
|
||||
_spdk_bs_free(bs);
|
||||
@ -1575,7 +1575,7 @@ spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
|
||||
|
||||
seq = spdk_bs_sequence_start(bs->md_target.md_channel, &cpl);
|
||||
if (!seq) {
|
||||
spdk_free(ctx->super);
|
||||
spdk_dma_free(ctx->super);
|
||||
free(ctx);
|
||||
_spdk_bs_free(bs);
|
||||
cb_fn(cb_arg, NULL, -ENOMEM);
|
||||
@ -1602,7 +1602,7 @@ _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserr
|
||||
{
|
||||
struct spdk_bs_unload_ctx *ctx = cb_arg;
|
||||
|
||||
spdk_free(ctx->super);
|
||||
spdk_dma_free(ctx->super);
|
||||
|
||||
spdk_bs_sequence_finish(seq, bserrno);
|
||||
|
||||
@ -1615,7 +1615,7 @@ _spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, i
|
||||
{
|
||||
struct spdk_bs_unload_ctx *ctx = cb_arg;
|
||||
|
||||
spdk_free(ctx->mask);
|
||||
spdk_dma_free(ctx->mask);
|
||||
|
||||
/* Update the values in the super block */
|
||||
ctx->super->super_blob = ctx->bs->super_blob;
|
||||
@ -1631,15 +1631,15 @@ _spdk_bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int
|
||||
{
|
||||
struct spdk_bs_unload_ctx *ctx = cb_arg;
|
||||
uint32_t i;
|
||||
uint64_t lba, lba_count;
|
||||
uint64_t lba, lba_count, mask_size;
|
||||
|
||||
spdk_free(ctx->mask);
|
||||
spdk_dma_free(ctx->mask);
|
||||
|
||||
/* Write out the used clusters mask */
|
||||
ctx->mask = spdk_zmalloc(ctx->super->used_cluster_mask_len * sizeof(struct spdk_blob_md_page),
|
||||
0x1000, NULL);
|
||||
mask_size = ctx->super->used_cluster_mask_len * sizeof(struct spdk_blob_md_page);
|
||||
ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
|
||||
if (!ctx->mask) {
|
||||
spdk_free(ctx->super);
|
||||
spdk_dma_free(ctx->super);
|
||||
free(ctx);
|
||||
spdk_bs_sequence_finish(seq, -ENOMEM);
|
||||
return;
|
||||
@ -1670,13 +1670,13 @@ _spdk_bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrn
|
||||
{
|
||||
struct spdk_bs_unload_ctx *ctx = cb_arg;
|
||||
uint32_t i;
|
||||
uint64_t lba, lba_count;
|
||||
uint64_t lba, lba_count, mask_size;
|
||||
|
||||
/* Write out the used page mask */
|
||||
ctx->mask = spdk_zmalloc(ctx->super->used_page_mask_len * sizeof(struct spdk_blob_md_page),
|
||||
0x1000, NULL);
|
||||
mask_size = ctx->super->used_page_mask_len * sizeof(struct spdk_blob_md_page);
|
||||
ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
|
||||
if (!ctx->mask) {
|
||||
spdk_free(ctx->super);
|
||||
spdk_dma_free(ctx->super);
|
||||
free(ctx);
|
||||
spdk_bs_sequence_finish(seq, -ENOMEM);
|
||||
return;
|
||||
@ -1719,7 +1719,7 @@ spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_a
|
||||
|
||||
ctx->bs = bs;
|
||||
|
||||
ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
|
||||
ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
|
||||
if (!ctx->super) {
|
||||
free(ctx);
|
||||
cb_fn(cb_arg, -ENOMEM);
|
||||
@ -1732,7 +1732,7 @@ spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_a
|
||||
|
||||
seq = spdk_bs_sequence_start(bs->md_target.md_channel, &cpl);
|
||||
if (!seq) {
|
||||
spdk_free(ctx->super);
|
||||
spdk_dma_free(ctx->super);
|
||||
free(ctx);
|
||||
cb_fn(cb_arg, -ENOMEM);
|
||||
return;
|
||||
|
@ -1319,7 +1319,7 @@ __rw_done(void *ctx, int bserrno)
|
||||
struct spdk_fs_request *req = ctx;
|
||||
struct spdk_fs_cb_args *args = &req->args;
|
||||
|
||||
spdk_free(args->op.rw.pin_buf);
|
||||
spdk_dma_free(args->op.rw.pin_buf);
|
||||
args->fn.file_op(args->arg, bserrno);
|
||||
free_fs_request(req);
|
||||
}
|
||||
@ -1404,7 +1404,7 @@ __readwrite(struct spdk_file *file, struct spdk_io_channel *_channel,
|
||||
|
||||
__get_page_parameters(file, offset, length, &start_page, &page_size, &num_pages);
|
||||
pin_buf_length = num_pages * page_size;
|
||||
args->op.rw.pin_buf = spdk_malloc(pin_buf_length, 4096, NULL);
|
||||
args->op.rw.pin_buf = spdk_dma_malloc(pin_buf_length, 4096, NULL);
|
||||
|
||||
args->op.rw.start_page = start_page;
|
||||
args->op.rw.num_pages = num_pages;
|
||||
|
@ -127,7 +127,7 @@ copy_engine_ioat_exit(void)
|
||||
TAILQ_REMOVE(&g_devices, dev, tailq);
|
||||
spdk_ioat_detach(dev->ioat);
|
||||
ioat_free_device(dev);
|
||||
spdk_free(dev);
|
||||
spdk_dma_free(dev);
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -260,7 +260,7 @@ attach_cb(void *cb_ctx, struct spdk_pci_device *pci_dev, struct spdk_ioat_chan *
|
||||
{
|
||||
struct ioat_device *dev;
|
||||
|
||||
dev = spdk_zmalloc(sizeof(*dev), 0, NULL);
|
||||
dev = spdk_dma_zmalloc(sizeof(*dev), 0, NULL);
|
||||
if (dev == NULL) {
|
||||
SPDK_ERRLOG("Failed to allocate device struct\n");
|
||||
return;
|
||||
|
@ -43,7 +43,7 @@
|
||||
#include <rte_version.h>
|
||||
|
||||
void *
|
||||
spdk_malloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id)
|
||||
spdk_dma_malloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id)
|
||||
{
|
||||
void *buf = rte_malloc_socket(NULL, size, align, socket_id);
|
||||
if (buf && phys_addr) {
|
||||
@ -53,9 +53,9 @@ spdk_malloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id
|
||||
}
|
||||
|
||||
void *
|
||||
spdk_zmalloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id)
|
||||
spdk_dma_zmalloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id)
|
||||
{
|
||||
void *buf = spdk_malloc_socket(size, align, phys_addr, socket_id);
|
||||
void *buf = spdk_dma_malloc_socket(size, align, phys_addr, socket_id);
|
||||
if (buf) {
|
||||
memset(buf, 0, size);
|
||||
}
|
||||
@ -63,19 +63,19 @@ spdk_zmalloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_i
|
||||
}
|
||||
|
||||
void *
|
||||
spdk_malloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
spdk_dma_malloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
{
|
||||
return spdk_malloc_socket(size, align, phys_addr, SPDK_ENV_SOCKET_ID_ANY);
|
||||
return spdk_dma_malloc_socket(size, align, phys_addr, SPDK_ENV_SOCKET_ID_ANY);
|
||||
}
|
||||
|
||||
void *
|
||||
spdk_zmalloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
spdk_dma_zmalloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
{
|
||||
return spdk_zmalloc_socket(size, align, phys_addr, SPDK_ENV_SOCKET_ID_ANY);
|
||||
return spdk_dma_zmalloc_socket(size, align, phys_addr, SPDK_ENV_SOCKET_ID_ANY);
|
||||
}
|
||||
|
||||
void *
|
||||
spdk_realloc(void *buf, size_t size, size_t align, uint64_t *phys_addr)
|
||||
spdk_dma_realloc(void *buf, size_t size, size_t align, uint64_t *phys_addr)
|
||||
{
|
||||
void *new_buf = rte_realloc(buf, size, align);
|
||||
if (new_buf && phys_addr) {
|
||||
@ -85,7 +85,7 @@ spdk_realloc(void *buf, size_t size, size_t align, uint64_t *phys_addr)
|
||||
}
|
||||
|
||||
void
|
||||
spdk_free(void *buf)
|
||||
spdk_dma_free(void *buf)
|
||||
{
|
||||
rte_free(buf);
|
||||
}
|
||||
|
@ -354,11 +354,11 @@ ioat_channel_destruct(struct spdk_ioat_chan *ioat)
|
||||
}
|
||||
|
||||
if (ioat->hw_ring) {
|
||||
spdk_free(ioat->hw_ring);
|
||||
spdk_dma_free(ioat->hw_ring);
|
||||
}
|
||||
|
||||
if (ioat->comp_update) {
|
||||
spdk_free((void *)ioat->comp_update);
|
||||
spdk_dma_free((void *)ioat->comp_update);
|
||||
ioat->comp_update = NULL;
|
||||
}
|
||||
|
||||
@ -404,8 +404,8 @@ ioat_channel_start(struct spdk_ioat_chan *ioat)
|
||||
ioat->max_xfer_size = 1U << xfercap;
|
||||
}
|
||||
|
||||
ioat->comp_update = spdk_zmalloc(sizeof(*ioat->comp_update), SPDK_IOAT_CHANCMP_ALIGN,
|
||||
&comp_update_bus_addr);
|
||||
ioat->comp_update = spdk_dma_zmalloc(sizeof(*ioat->comp_update), SPDK_IOAT_CHANCMP_ALIGN,
|
||||
&comp_update_bus_addr);
|
||||
if (ioat->comp_update == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -419,8 +419,8 @@ ioat_channel_start(struct spdk_ioat_chan *ioat)
|
||||
return -1;
|
||||
}
|
||||
|
||||
ioat->hw_ring = spdk_zmalloc(num_descriptors * sizeof(union spdk_ioat_hw_desc), 64,
|
||||
&ioat->hw_ring_phys_addr);
|
||||
ioat->hw_ring = spdk_dma_zmalloc(num_descriptors * sizeof(union spdk_ioat_hw_desc), 64,
|
||||
&ioat->hw_ring_phys_addr);
|
||||
if (!ioat->hw_ring) {
|
||||
return -1;
|
||||
}
|
||||
|
@ -617,7 +617,7 @@ spdk_iscsi_app_read_parameters(void)
|
||||
g_spdk_iscsi.MaxSessions = MaxSessions;
|
||||
SPDK_TRACELOG(SPDK_TRACE_DEBUG, "MaxSessions %d\n", g_spdk_iscsi.MaxSessions);
|
||||
|
||||
g_spdk_iscsi.session = spdk_zmalloc(sizeof(void *) * g_spdk_iscsi.MaxSessions, 0, NULL);
|
||||
g_spdk_iscsi.session = spdk_dma_zmalloc(sizeof(void *) * g_spdk_iscsi.MaxSessions, 0, NULL);
|
||||
if (!g_spdk_iscsi.session) {
|
||||
perror("Unable to allocate session pointer array\n");
|
||||
return -1;
|
||||
|
@ -142,7 +142,7 @@ nvme_user_copy_cmd_complete(void *arg, const struct spdk_nvme_cpl *cpl)
|
||||
memcpy(req->user_buffer, req->payload.u.contig, req->payload_size);
|
||||
}
|
||||
|
||||
spdk_free(req->payload.u.contig);
|
||||
spdk_dma_free(req->payload.u.contig);
|
||||
}
|
||||
|
||||
/* Call the user's original callback now that the buffer has been copied */
|
||||
@ -165,7 +165,7 @@ nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair,
|
||||
uint64_t phys_addr;
|
||||
|
||||
if (buffer && payload_size) {
|
||||
contig_buffer = spdk_zmalloc(payload_size, 4096, &phys_addr);
|
||||
contig_buffer = spdk_dma_zmalloc(payload_size, 4096, &phys_addr);
|
||||
if (!contig_buffer) {
|
||||
return NULL;
|
||||
}
|
||||
@ -178,7 +178,7 @@ nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair,
|
||||
req = nvme_allocate_request_contig(qpair, contig_buffer, payload_size, nvme_user_copy_cmd_complete,
|
||||
NULL);
|
||||
if (!req) {
|
||||
spdk_free(contig_buffer);
|
||||
spdk_dma_free(contig_buffer);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -230,7 +230,7 @@ spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
|
||||
TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
|
||||
spdk_bit_array_set(ctrlr->free_io_qids, qpair->id);
|
||||
|
||||
spdk_free(qpair->req_buf);
|
||||
spdk_dma_free(qpair->req_buf);
|
||||
|
||||
if (nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair)) {
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
@ -280,8 +280,8 @@ static int nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr)
|
||||
struct nvme_completion_poll_status status;
|
||||
struct spdk_nvme_intel_log_page_directory *log_page_directory;
|
||||
|
||||
log_page_directory = spdk_zmalloc(sizeof(struct spdk_nvme_intel_log_page_directory),
|
||||
64, &phys_addr);
|
||||
log_page_directory = spdk_dma_zmalloc(sizeof(struct spdk_nvme_intel_log_page_directory),
|
||||
64, &phys_addr);
|
||||
if (log_page_directory == NULL) {
|
||||
SPDK_ERRLOG("could not allocate log_page_directory\n");
|
||||
return -ENXIO;
|
||||
@ -296,13 +296,13 @@ static int nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr)
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
spdk_free(log_page_directory);
|
||||
spdk_dma_free(log_page_directory);
|
||||
SPDK_ERRLOG("nvme_ctrlr_cmd_get_log_page failed!\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
nvme_ctrlr_construct_intel_support_log_page_list(ctrlr, log_page_directory);
|
||||
spdk_free(log_page_directory);
|
||||
spdk_dma_free(log_page_directory);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -745,13 +745,13 @@ nvme_ctrlr_destruct_namespaces(struct spdk_nvme_ctrlr *ctrlr)
|
||||
nvme_ns_destruct(&ctrlr->ns[i]);
|
||||
}
|
||||
|
||||
spdk_free(ctrlr->ns);
|
||||
spdk_dma_free(ctrlr->ns);
|
||||
ctrlr->ns = NULL;
|
||||
ctrlr->num_ns = 0;
|
||||
}
|
||||
|
||||
if (ctrlr->nsdata) {
|
||||
spdk_free(ctrlr->nsdata);
|
||||
spdk_dma_free(ctrlr->nsdata);
|
||||
ctrlr->nsdata = NULL;
|
||||
}
|
||||
}
|
||||
@ -773,14 +773,14 @@ nvme_ctrlr_construct_namespaces(struct spdk_nvme_ctrlr *ctrlr)
|
||||
if (nn != ctrlr->num_ns) {
|
||||
nvme_ctrlr_destruct_namespaces(ctrlr);
|
||||
|
||||
ctrlr->ns = spdk_zmalloc(nn * sizeof(struct spdk_nvme_ns), 64,
|
||||
&phys_addr);
|
||||
ctrlr->ns = spdk_dma_zmalloc(nn * sizeof(struct spdk_nvme_ns), 64,
|
||||
&phys_addr);
|
||||
if (ctrlr->ns == NULL) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ctrlr->nsdata = spdk_zmalloc(nn * sizeof(struct spdk_nvme_ns_data), 64,
|
||||
&phys_addr);
|
||||
ctrlr->nsdata = spdk_dma_zmalloc(nn * sizeof(struct spdk_nvme_ns_data), 64,
|
||||
&phys_addr);
|
||||
if (ctrlr->nsdata == NULL) {
|
||||
goto fail;
|
||||
}
|
||||
@ -914,7 +914,7 @@ nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
|
||||
}
|
||||
|
||||
/* Initialize the per process properties for this ctrlr */
|
||||
ctrlr_proc = spdk_zmalloc(sizeof(struct spdk_nvme_ctrlr_process), 64, NULL);
|
||||
ctrlr_proc = spdk_dma_zmalloc(sizeof(struct spdk_nvme_ctrlr_process), 64, NULL);
|
||||
if (ctrlr_proc == NULL) {
|
||||
SPDK_ERRLOG("failed to allocate memory to track the process props\n");
|
||||
|
||||
@ -951,7 +951,7 @@ nvme_ctrlr_remove_process(struct spdk_nvme_ctrlr *ctrlr,
|
||||
|
||||
TAILQ_REMOVE(&ctrlr->active_procs, proc, tailq);
|
||||
|
||||
spdk_free(proc);
|
||||
spdk_dma_free(proc);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -986,7 +986,7 @@ nvme_ctrlr_cleanup_process(struct spdk_nvme_ctrlr_process *proc)
|
||||
spdk_nvme_ctrlr_free_io_qpair(qpair);
|
||||
}
|
||||
|
||||
spdk_free(proc);
|
||||
spdk_dma_free(proc);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1005,7 +1005,7 @@ nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr)
|
||||
|
||||
assert(STAILQ_EMPTY(&active_proc->active_reqs));
|
||||
|
||||
spdk_free(active_proc);
|
||||
spdk_dma_free(active_proc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -556,7 +556,7 @@ nvme_pcie_ctrlr_construct_admin_qpair(struct spdk_nvme_ctrlr *ctrlr)
|
||||
struct nvme_pcie_qpair *pqpair;
|
||||
int rc;
|
||||
|
||||
pqpair = spdk_zmalloc(sizeof(*pqpair), 64, NULL);
|
||||
pqpair = spdk_dma_zmalloc(sizeof(*pqpair), 64, NULL);
|
||||
if (pqpair == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -672,7 +672,7 @@ struct spdk_nvme_ctrlr *nvme_pcie_ctrlr_construct(const struct spdk_nvme_transpo
|
||||
int rc;
|
||||
struct spdk_pci_id pci_id;
|
||||
|
||||
pctrlr = spdk_zmalloc(sizeof(struct nvme_pcie_ctrlr), 64, NULL);
|
||||
pctrlr = spdk_dma_zmalloc(sizeof(struct nvme_pcie_ctrlr), 64, NULL);
|
||||
if (pctrlr == NULL) {
|
||||
SPDK_ERRLOG("could not allocate ctrlr\n");
|
||||
return NULL;
|
||||
@ -687,7 +687,7 @@ struct spdk_nvme_ctrlr *nvme_pcie_ctrlr_construct(const struct spdk_nvme_transpo
|
||||
|
||||
rc = nvme_pcie_ctrlr_allocate_bars(pctrlr);
|
||||
if (rc != 0) {
|
||||
spdk_free(pctrlr);
|
||||
spdk_dma_free(pctrlr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -698,7 +698,7 @@ struct spdk_nvme_ctrlr *nvme_pcie_ctrlr_construct(const struct spdk_nvme_transpo
|
||||
|
||||
if (nvme_ctrlr_get_cap(&pctrlr->ctrlr, &cap)) {
|
||||
SPDK_ERRLOG("get_cap() failed\n");
|
||||
spdk_free(pctrlr);
|
||||
spdk_dma_free(pctrlr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -786,7 +786,7 @@ nvme_pcie_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
|
||||
spdk_pci_device_detach(devhandle);
|
||||
}
|
||||
|
||||
spdk_free(pctrlr);
|
||||
spdk_dma_free(pctrlr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -860,18 +860,18 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair)
|
||||
}
|
||||
}
|
||||
if (pqpair->sq_in_cmb == false) {
|
||||
pqpair->cmd = spdk_zmalloc(pqpair->num_entries * sizeof(struct spdk_nvme_cmd),
|
||||
0x1000,
|
||||
&pqpair->cmd_bus_addr);
|
||||
pqpair->cmd = spdk_dma_zmalloc(pqpair->num_entries * sizeof(struct spdk_nvme_cmd),
|
||||
0x1000,
|
||||
&pqpair->cmd_bus_addr);
|
||||
if (pqpair->cmd == NULL) {
|
||||
SPDK_ERRLOG("alloc qpair_cmd failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
pqpair->cpl = spdk_zmalloc(pqpair->num_entries * sizeof(struct spdk_nvme_cpl),
|
||||
0x1000,
|
||||
&pqpair->cpl_bus_addr);
|
||||
pqpair->cpl = spdk_dma_zmalloc(pqpair->num_entries * sizeof(struct spdk_nvme_cpl),
|
||||
0x1000,
|
||||
&pqpair->cpl_bus_addr);
|
||||
if (pqpair->cpl == NULL) {
|
||||
SPDK_ERRLOG("alloc qpair_cpl failed\n");
|
||||
return -ENOMEM;
|
||||
@ -887,7 +887,7 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair)
|
||||
* This ensures the PRP list embedded in the nvme_tracker object will not span a
|
||||
* 4KB boundary, while allowing access to trackers in tr[] via normal array indexing.
|
||||
*/
|
||||
pqpair->tr = spdk_zmalloc(num_trackers * sizeof(*tr), sizeof(*tr), &phys_addr);
|
||||
pqpair->tr = spdk_dma_zmalloc(num_trackers * sizeof(*tr), sizeof(*tr), &phys_addr);
|
||||
if (pqpair->tr == NULL) {
|
||||
SPDK_ERRLOG("nvme_tr failed\n");
|
||||
return -ENOMEM;
|
||||
@ -1171,16 +1171,16 @@ nvme_pcie_qpair_destroy(struct spdk_nvme_qpair *qpair)
|
||||
nvme_pcie_admin_qpair_destroy(qpair);
|
||||
}
|
||||
if (pqpair->cmd && !pqpair->sq_in_cmb) {
|
||||
spdk_free(pqpair->cmd);
|
||||
spdk_dma_free(pqpair->cmd);
|
||||
}
|
||||
if (pqpair->cpl) {
|
||||
spdk_free(pqpair->cpl);
|
||||
spdk_dma_free(pqpair->cpl);
|
||||
}
|
||||
if (pqpair->tr) {
|
||||
spdk_free(pqpair->tr);
|
||||
spdk_dma_free(pqpair->tr);
|
||||
}
|
||||
|
||||
spdk_free(pqpair);
|
||||
spdk_dma_free(pqpair);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1411,7 +1411,7 @@ nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
|
||||
|
||||
assert(ctrlr != NULL);
|
||||
|
||||
pqpair = spdk_zmalloc(sizeof(*pqpair), 64, NULL);
|
||||
pqpair = spdk_dma_zmalloc(sizeof(*pqpair), 64, NULL);
|
||||
if (pqpair == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -381,7 +381,7 @@ nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
|
||||
|
||||
req_size_padded = (sizeof(struct nvme_request) + 63) & ~(size_t)63;
|
||||
|
||||
qpair->req_buf = spdk_zmalloc(req_size_padded * num_requests, 64, NULL);
|
||||
qpair->req_buf = spdk_dma_zmalloc(req_size_padded * num_requests, 64, NULL);
|
||||
if (qpair->req_buf == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -574,7 +574,7 @@ nvme_rdma_qpair_fabric_connect(struct nvme_rdma_qpair *rqpair)
|
||||
|
||||
rctrlr = nvme_rdma_ctrlr(ctrlr);
|
||||
|
||||
nvmf_data = spdk_zmalloc(sizeof(*nvmf_data), 0, NULL);
|
||||
nvmf_data = spdk_dma_zmalloc(sizeof(*nvmf_data), 0, NULL);
|
||||
if (!nvmf_data) {
|
||||
SPDK_ERRLOG("nvmf_data allocation error\n");
|
||||
rc = -1;
|
||||
@ -623,7 +623,7 @@ nvme_rdma_qpair_fabric_connect(struct nvme_rdma_qpair *rqpair)
|
||||
rsp = (struct spdk_nvmf_fabric_connect_rsp *)&status.cpl;
|
||||
rctrlr->cntlid = rsp->status_code_specific.success.cntlid;
|
||||
ret:
|
||||
spdk_free(nvmf_data);
|
||||
spdk_dma_free(nvmf_data);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -250,9 +250,9 @@ spdk_nvmf_rdma_conn_destroy(struct spdk_nvmf_rdma_conn *rdma_conn)
|
||||
}
|
||||
|
||||
/* Free all memory */
|
||||
spdk_free(rdma_conn->cmds);
|
||||
spdk_free(rdma_conn->cpls);
|
||||
spdk_free(rdma_conn->bufs);
|
||||
spdk_dma_free(rdma_conn->cmds);
|
||||
spdk_dma_free(rdma_conn->cpls);
|
||||
spdk_dma_free(rdma_conn->bufs);
|
||||
free(rdma_conn->reqs);
|
||||
free(rdma_conn);
|
||||
}
|
||||
@ -318,12 +318,12 @@ spdk_nvmf_rdma_conn_create(struct rdma_cm_id *id, struct ibv_comp_channel *chann
|
||||
|
||||
rdma_conn->reqs = calloc(max_queue_depth, sizeof(*rdma_conn->reqs));
|
||||
rdma_conn->recvs = calloc(max_queue_depth, sizeof(*rdma_conn->recvs));
|
||||
rdma_conn->cmds = spdk_zmalloc(max_queue_depth * sizeof(*rdma_conn->cmds),
|
||||
0x1000, NULL);
|
||||
rdma_conn->cpls = spdk_zmalloc(max_queue_depth * sizeof(*rdma_conn->cpls),
|
||||
0x1000, NULL);
|
||||
rdma_conn->bufs = spdk_zmalloc(max_queue_depth * g_rdma.in_capsule_data_size,
|
||||
0x1000, NULL);
|
||||
rdma_conn->cmds = spdk_dma_zmalloc(max_queue_depth * sizeof(*rdma_conn->cmds),
|
||||
0x1000, NULL);
|
||||
rdma_conn->cpls = spdk_dma_zmalloc(max_queue_depth * sizeof(*rdma_conn->cpls),
|
||||
0x1000, NULL);
|
||||
rdma_conn->bufs = spdk_dma_zmalloc(max_queue_depth * g_rdma.in_capsule_data_size,
|
||||
0x1000, NULL);
|
||||
if (!rdma_conn->reqs || !rdma_conn->recvs || !rdma_conn->cmds ||
|
||||
!rdma_conn->cpls || !rdma_conn->bufs) {
|
||||
SPDK_ERRLOG("Unable to allocate sufficient memory for RDMA queue.\n");
|
||||
@ -1223,8 +1223,8 @@ spdk_nvmf_rdma_session_init(void)
|
||||
/* TODO: Make the number of elements in this pool configurable. For now, one full queue
|
||||
* worth seems reasonable.
|
||||
*/
|
||||
rdma_sess->buf = spdk_zmalloc(g_rdma.max_queue_depth * g_rdma.max_io_size,
|
||||
0x20000, NULL);
|
||||
rdma_sess->buf = spdk_dma_zmalloc(g_rdma.max_queue_depth * g_rdma.max_io_size,
|
||||
0x20000, NULL);
|
||||
if (!rdma_sess->buf) {
|
||||
SPDK_ERRLOG("Large buffer pool allocation failed (%d x %d)\n",
|
||||
g_rdma.max_queue_depth, g_rdma.max_io_size);
|
||||
@ -1253,7 +1253,7 @@ spdk_nvmf_rdma_session_fini(struct spdk_nvmf_session *session)
|
||||
}
|
||||
|
||||
ibv_dereg_mr(rdma_sess->buf_mr);
|
||||
spdk_free(rdma_sess->buf);
|
||||
spdk_dma_free(rdma_sess->buf);
|
||||
free(rdma_sess);
|
||||
}
|
||||
|
||||
@ -1282,7 +1282,7 @@ spdk_nvmf_rdma_session_add_conn(struct spdk_nvmf_session *session,
|
||||
if (!rdma_sess->buf_mr) {
|
||||
SPDK_ERRLOG("Large buffer pool registration failed (%d x %d)\n",
|
||||
g_rdma.max_queue_depth, g_rdma.max_io_size);
|
||||
spdk_free(rdma_sess->buf);
|
||||
spdk_dma_free(rdma_sess->buf);
|
||||
free(rdma_sess);
|
||||
return -1;
|
||||
}
|
||||
|
@ -1510,7 +1510,7 @@ spdk_bdev_scsi_unmap(struct spdk_bdev *bdev,
|
||||
} else {
|
||||
desc = spdk_scsi_task_alloc_data(task, bdesc_data_len - 8);
|
||||
memcpy(desc, &data[8], bdesc_data_len - 8);
|
||||
spdk_free(data);
|
||||
spdk_dma_free(data);
|
||||
}
|
||||
|
||||
max_unmap_bdesc_count = spdk_bdev_get_max_unmap_descriptors(bdev);
|
||||
@ -1702,7 +1702,7 @@ spdk_bdev_scsi_process_primary(struct spdk_bdev *bdev,
|
||||
case SPDK_SPC_INQUIRY:
|
||||
alloc_len = from_be16(&cdb[3]);
|
||||
data_len = spdk_max(4096, alloc_len);
|
||||
data = spdk_zmalloc(data_len, 0, NULL);
|
||||
data = spdk_dma_zmalloc(data_len, 0, NULL);
|
||||
assert(data != NULL);
|
||||
rc = spdk_bdev_scsi_inquiry(bdev, task, cdb, data, data_len);
|
||||
data_len = spdk_min(rc, data_len);
|
||||
@ -1726,7 +1726,7 @@ spdk_bdev_scsi_process_primary(struct spdk_bdev *bdev,
|
||||
}
|
||||
|
||||
data_len = spdk_max(4096, alloc_len);
|
||||
data = spdk_zmalloc(data_len, 0, NULL);
|
||||
data = spdk_dma_zmalloc(data_len, 0, NULL);
|
||||
assert(data != NULL);
|
||||
rc = spdk_bdev_scsi_report_luns(task->lun, sel, data, data_len);
|
||||
data_len = rc;
|
||||
@ -1834,7 +1834,7 @@ spdk_bdev_scsi_process_primary(struct spdk_bdev *bdev,
|
||||
}
|
||||
|
||||
data_len = rc;
|
||||
data = spdk_zmalloc(data_len, 0, NULL);
|
||||
data = spdk_dma_zmalloc(data_len, 0, NULL);
|
||||
assert(data != NULL);
|
||||
|
||||
/* First call with no buffer to discover needed buffer size */
|
||||
@ -1876,7 +1876,7 @@ spdk_bdev_scsi_process_primary(struct spdk_bdev *bdev,
|
||||
spdk_scsi_task_build_sense_data(task, sk, asc, ascq);
|
||||
|
||||
data_len = task->sense_data_len;
|
||||
data = spdk_zmalloc(data_len, 0, NULL);
|
||||
data = spdk_dma_zmalloc(data_len, 0, NULL);
|
||||
assert(data != NULL);
|
||||
memcpy(data, task->sense_data, data_len);
|
||||
break;
|
||||
@ -1925,7 +1925,7 @@ spdk_bdev_scsi_process_primary(struct spdk_bdev *bdev,
|
||||
}
|
||||
|
||||
if (data)
|
||||
spdk_free(data);
|
||||
spdk_dma_free(data);
|
||||
|
||||
return SPDK_SCSI_TASK_COMPLETE;
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ void
|
||||
spdk_scsi_task_free_data(struct spdk_scsi_task *task)
|
||||
{
|
||||
if (task->alloc_len != 0) {
|
||||
spdk_free(task->iov.iov_base);
|
||||
spdk_dma_free(task->iov.iov_base);
|
||||
task->alloc_len = 0;
|
||||
}
|
||||
|
||||
@ -110,7 +110,7 @@ spdk_scsi_task_alloc_data(struct spdk_scsi_task *task, uint32_t alloc_len)
|
||||
{
|
||||
assert(task->alloc_len == 0);
|
||||
|
||||
task->iov.iov_base = spdk_zmalloc(alloc_len, 0, NULL);
|
||||
task->iov.iov_base = spdk_dma_zmalloc(alloc_len, 0, NULL);
|
||||
task->iov.iov_len = alloc_len;
|
||||
task->alloc_len = alloc_len;
|
||||
|
||||
@ -177,7 +177,7 @@ spdk_scsi_task_gather_data(struct spdk_scsi_task *task, int *len)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
buf = spdk_malloc(buf_len, 0, NULL);
|
||||
buf = spdk_dma_malloc(buf_len, 0, NULL);
|
||||
if (buf == NULL) {
|
||||
*len = -1;
|
||||
return NULL;
|
||||
|
@ -73,7 +73,7 @@ spdk_bit_array_free(struct spdk_bit_array **bap)
|
||||
|
||||
ba = *bap;
|
||||
*bap = NULL;
|
||||
spdk_free(ba);
|
||||
spdk_dma_free(ba);
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
@ -109,7 +109,7 @@ spdk_bit_array_resize(struct spdk_bit_array **bap, uint32_t num_bits)
|
||||
*/
|
||||
new_size += SPDK_BIT_ARRAY_WORD_BYTES;
|
||||
|
||||
new_ba = (struct spdk_bit_array *)spdk_realloc(*bap, new_size, 64, NULL);
|
||||
new_ba = (struct spdk_bit_array *)spdk_dma_realloc(*bap, new_size, 64, NULL);
|
||||
if (!new_ba) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -747,7 +747,7 @@ spdk_vhost_scsi_dev_construct(const char *name, uint64_t cpumask)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
svdev = spdk_zmalloc(sizeof(*svdev), SPDK_CACHE_LINE_SIZE, NULL);
|
||||
svdev = spdk_dma_zmalloc(sizeof(*svdev), SPDK_CACHE_LINE_SIZE, NULL);
|
||||
if (svdev == NULL) {
|
||||
SPDK_ERRLOG("Couldn't allocate memory for vhost dev\n");
|
||||
return -ENOMEM;
|
||||
@ -761,7 +761,7 @@ spdk_vhost_scsi_dev_construct(const char *name, uint64_t cpumask)
|
||||
rc = spdk_vhost_dev_register(vdev, &spdk_vhost_scsi_device_backend);
|
||||
if (rc < 0) {
|
||||
free(vdev->name);
|
||||
spdk_free(svdev);
|
||||
spdk_dma_free(svdev);
|
||||
}
|
||||
|
||||
return rc;
|
||||
@ -793,7 +793,7 @@ spdk_vhost_scsi_dev_remove(struct spdk_vhost_scsi_dev *svdev)
|
||||
* it should be already *destructed* (spdk_vhost_dev_destruct)
|
||||
*/
|
||||
free(vdev->name);
|
||||
spdk_free(svdev);
|
||||
spdk_dma_free(svdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ static bool g_completion_success;
|
||||
static void
|
||||
initialize_buffer(char **buf, int pattern, int size)
|
||||
{
|
||||
*buf = spdk_zmalloc(size, 0x1000, NULL);
|
||||
*buf = spdk_dma_zmalloc(size, 0x1000, NULL);
|
||||
memset(*buf, pattern, size);
|
||||
}
|
||||
|
||||
@ -284,8 +284,8 @@ blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length)
|
||||
int rc;
|
||||
rc = memcmp(rx_buf, tx_buf, data_length);
|
||||
|
||||
spdk_free(rx_buf);
|
||||
spdk_free(tx_buf);
|
||||
spdk_dma_free(rx_buf);
|
||||
spdk_dma_free(tx_buf);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -287,7 +287,7 @@ task_ctor(struct rte_mempool *mp, void *arg, void *__task, unsigned id)
|
||||
{
|
||||
struct bdevperf_task *task = __task;
|
||||
|
||||
task->buf = spdk_zmalloc(g_io_size, g_min_alignment, NULL);
|
||||
task->buf = spdk_dma_zmalloc(g_io_size, g_min_alignment, NULL);
|
||||
}
|
||||
|
||||
static __thread unsigned int seed = 0;
|
||||
|
6
test/lib/env/vtophys/vtophys.c
vendored
6
test/lib/env/vtophys/vtophys.c
vendored
@ -83,18 +83,18 @@ vtophys_positive_test(void)
|
||||
int rc = 0;
|
||||
|
||||
for (i = 0; i < 31; i++) {
|
||||
p = spdk_zmalloc(size, 512, NULL);
|
||||
p = spdk_dma_zmalloc(size, 512, NULL);
|
||||
if (p == NULL)
|
||||
continue;
|
||||
|
||||
if (spdk_vtophys(p) == SPDK_VTOPHYS_ERROR) {
|
||||
rc = -1;
|
||||
printf("Err: VA=%p is not mapped to a huge_page,\n", p);
|
||||
spdk_free(p);
|
||||
spdk_dma_free(p);
|
||||
break;
|
||||
}
|
||||
|
||||
spdk_free(p);
|
||||
spdk_dma_free(p);
|
||||
size = size << 1;
|
||||
}
|
||||
|
||||
|
@ -38,12 +38,12 @@
|
||||
#include "ioat/ioat.c"
|
||||
|
||||
void *
|
||||
spdk_zmalloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
spdk_dma_zmalloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
{
|
||||
return calloc(1, size);
|
||||
}
|
||||
|
||||
void spdk_free(void *buf)
|
||||
void spdk_dma_free(void *buf)
|
||||
{
|
||||
free(buf);
|
||||
}
|
||||
|
@ -155,7 +155,7 @@ cleanup(void)
|
||||
|
||||
foreach_dev(dev) {
|
||||
if (dev->health_page) {
|
||||
spdk_free(dev->health_page);
|
||||
spdk_dma_free(dev->health_page);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -270,7 +270,7 @@ attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
|
||||
|
||||
printf("Attached to %s\n", dev->name);
|
||||
|
||||
dev->health_page = spdk_zmalloc(sizeof(*dev->health_page), 4096, NULL);
|
||||
dev->health_page = spdk_dma_zmalloc(sizeof(*dev->health_page), 4096, NULL);
|
||||
if (dev->health_page == NULL) {
|
||||
printf("Allocation error (health page)\n");
|
||||
failed = 1;
|
||||
|
@ -174,7 +174,7 @@ static uint32_t dp_guard_check_extended_lba_test(struct spdk_nvme_ns *ns, struct
|
||||
|
||||
sector_size = spdk_nvme_ns_get_sector_size(ns);
|
||||
md_size = spdk_nvme_ns_get_md_size(ns);
|
||||
req->contig = spdk_zmalloc((sector_size + md_size) * req->lba_count, 0x1000, NULL);
|
||||
req->contig = spdk_dma_zmalloc((sector_size + md_size) * req->lba_count, 0x1000, NULL);
|
||||
if (!req->contig)
|
||||
return 0;
|
||||
|
||||
@ -210,7 +210,7 @@ static uint32_t dp_with_pract_test(struct spdk_nvme_ns *ns, struct io_request *r
|
||||
|
||||
sector_size = spdk_nvme_ns_get_sector_size(ns);
|
||||
/* No additional metadata buffer provided */
|
||||
req->contig = spdk_zmalloc(sector_size * req->lba_count, 0x1000, NULL);
|
||||
req->contig = spdk_dma_zmalloc(sector_size * req->lba_count, 0x1000, NULL);
|
||||
if (!req->contig)
|
||||
return 0;
|
||||
|
||||
@ -257,7 +257,7 @@ static uint32_t dp_without_pract_extended_lba_test(struct spdk_nvme_ns *ns, stru
|
||||
|
||||
sector_size = spdk_nvme_ns_get_sector_size(ns);
|
||||
md_size = spdk_nvme_ns_get_md_size(ns);
|
||||
req->contig = spdk_zmalloc((sector_size + md_size) * req->lba_count, 0x1000, NULL);
|
||||
req->contig = spdk_dma_zmalloc((sector_size + md_size) * req->lba_count, 0x1000, NULL);
|
||||
if (!req->contig)
|
||||
return 0;
|
||||
|
||||
@ -291,7 +291,7 @@ static uint32_t dp_without_flags_extended_lba_test(struct spdk_nvme_ns *ns, stru
|
||||
|
||||
sector_size = spdk_nvme_ns_get_sector_size(ns);
|
||||
md_size = spdk_nvme_ns_get_md_size(ns);
|
||||
req->contig = spdk_zmalloc((sector_size + md_size) * req->lba_count, 0x1000, NULL);
|
||||
req->contig = spdk_dma_zmalloc((sector_size + md_size) * req->lba_count, 0x1000, NULL);
|
||||
if (!req->contig)
|
||||
return 0;
|
||||
|
||||
@ -325,13 +325,13 @@ static uint32_t dp_without_pract_separate_meta_test(struct spdk_nvme_ns *ns, str
|
||||
|
||||
sector_size = spdk_nvme_ns_get_sector_size(ns);
|
||||
md_size = spdk_nvme_ns_get_md_size(ns);
|
||||
req->contig = spdk_zmalloc(sector_size * req->lba_count, 0x1000, NULL);
|
||||
req->contig = spdk_dma_zmalloc(sector_size * req->lba_count, 0x1000, NULL);
|
||||
if (!req->contig)
|
||||
return 0;
|
||||
|
||||
req->metadata = spdk_zmalloc(md_size * req->lba_count, 0x1000, NULL);
|
||||
req->metadata = spdk_dma_zmalloc(md_size * req->lba_count, 0x1000, NULL);
|
||||
if (!req->metadata) {
|
||||
spdk_free(req->contig);
|
||||
spdk_dma_free(req->contig);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -368,13 +368,13 @@ static uint32_t dp_without_pract_separate_meta_apptag_test(struct spdk_nvme_ns *
|
||||
|
||||
sector_size = spdk_nvme_ns_get_sector_size(ns);
|
||||
md_size = spdk_nvme_ns_get_md_size(ns);
|
||||
req->contig = spdk_zmalloc(sector_size * req->lba_count, 0x1000, NULL);
|
||||
req->contig = spdk_dma_zmalloc(sector_size * req->lba_count, 0x1000, NULL);
|
||||
if (!req->contig)
|
||||
return 0;
|
||||
|
||||
req->metadata = spdk_zmalloc(md_size * req->lba_count, 0x1000, NULL);
|
||||
req->metadata = spdk_dma_zmalloc(md_size * req->lba_count, 0x1000, NULL);
|
||||
if (!req->metadata) {
|
||||
spdk_free(req->contig);
|
||||
spdk_dma_free(req->contig);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -409,13 +409,13 @@ static uint32_t dp_without_flags_separate_meta_test(struct spdk_nvme_ns *ns, str
|
||||
|
||||
sector_size = spdk_nvme_ns_get_sector_size(ns);
|
||||
md_size = spdk_nvme_ns_get_md_size(ns);
|
||||
req->contig = spdk_zmalloc(sector_size * req->lba_count, 0x1000, NULL);
|
||||
req->contig = spdk_dma_zmalloc(sector_size * req->lba_count, 0x1000, NULL);
|
||||
if (!req->contig)
|
||||
return 0;
|
||||
|
||||
req->metadata = spdk_zmalloc(md_size * req->lba_count, 0x1000, NULL);
|
||||
req->metadata = spdk_dma_zmalloc(md_size * req->lba_count, 0x1000, NULL);
|
||||
if (!req->metadata) {
|
||||
spdk_free(req->contig);
|
||||
spdk_dma_free(req->contig);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -437,12 +437,12 @@ free_req(struct io_request *req)
|
||||
}
|
||||
|
||||
if (req->contig)
|
||||
spdk_free(req->contig);
|
||||
spdk_dma_free(req->contig);
|
||||
|
||||
if (req->metadata)
|
||||
spdk_free(req->metadata);
|
||||
spdk_dma_free(req->metadata);
|
||||
|
||||
spdk_free(req);
|
||||
spdk_dma_free(req);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -499,7 +499,7 @@ write_read_e2e_dp_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn, con
|
||||
return 0;
|
||||
}
|
||||
|
||||
req = spdk_zmalloc(sizeof(*req), 0, NULL);
|
||||
req = spdk_dma_zmalloc(sizeof(*req), 0, NULL);
|
||||
if (!req) {
|
||||
fprintf(stderr, "Allocate request failed\n");
|
||||
return 0;
|
||||
|
@ -574,15 +574,15 @@ int main(int argc, char **argv)
|
||||
return rc;
|
||||
}
|
||||
|
||||
g_task = spdk_zmalloc(sizeof(struct perf_task), 0, NULL);
|
||||
g_task = spdk_dma_zmalloc(sizeof(struct perf_task), 0, NULL);
|
||||
if (g_task == NULL) {
|
||||
fprintf(stderr, "g_task alloc failed\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
g_task->buf = spdk_zmalloc(g_io_size_bytes, 0x1000, NULL);
|
||||
g_task->buf = spdk_dma_zmalloc(g_io_size_bytes, 0x1000, NULL);
|
||||
if (g_task->buf == NULL) {
|
||||
fprintf(stderr, "g_task->buf spdk_zmalloc failed\n");
|
||||
fprintf(stderr, "g_task->buf spdk_dma_zmalloc failed\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
@ -156,9 +156,9 @@ static void task_ctor(struct rte_mempool *mp, void *arg, void *__task, unsigned
|
||||
{
|
||||
struct reset_task *task = __task;
|
||||
|
||||
task->buf = spdk_zmalloc(g_io_size_bytes, 0x200, NULL);
|
||||
task->buf = spdk_dma_zmalloc(g_io_size_bytes, 0x200, NULL);
|
||||
if (task->buf == NULL) {
|
||||
fprintf(stderr, "task->buf spdk_zmalloc failed\n");
|
||||
fprintf(stderr, "task->buf spdk_dma_zmalloc failed\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ static void build_io_request_0(struct io_request *req)
|
||||
{
|
||||
req->nseg = 1;
|
||||
|
||||
req->iovs[0].base = spdk_zmalloc(0x800, 4, NULL);
|
||||
req->iovs[0].base = spdk_dma_zmalloc(0x800, 4, NULL);
|
||||
req->iovs[0].len = 0x800;
|
||||
}
|
||||
|
||||
@ -139,7 +139,7 @@ static void build_io_request_1(struct io_request *req)
|
||||
req->nseg = 1;
|
||||
|
||||
/* 512B for 1st sge */
|
||||
req->iovs[0].base = spdk_zmalloc(0x200, 0x200, NULL);
|
||||
req->iovs[0].base = spdk_dma_zmalloc(0x200, 0x200, NULL);
|
||||
req->iovs[0].len = 0x200;
|
||||
}
|
||||
|
||||
@ -148,7 +148,7 @@ static void build_io_request_2(struct io_request *req)
|
||||
req->nseg = 1;
|
||||
|
||||
/* 256KB for 1st sge */
|
||||
req->iovs[0].base = spdk_zmalloc(0x40000, 0x1000, NULL);
|
||||
req->iovs[0].base = spdk_dma_zmalloc(0x40000, 0x1000, NULL);
|
||||
req->iovs[0].len = 0x40000;
|
||||
}
|
||||
|
||||
@ -158,16 +158,16 @@ static void build_io_request_3(struct io_request *req)
|
||||
|
||||
/* 2KB for 1st sge, make sure the iov address start at 0x800 boundary,
|
||||
* and end with 0x1000 boundary */
|
||||
req->iovs[0].base = spdk_zmalloc(0x1000, 0x1000, NULL);
|
||||
req->iovs[0].base = spdk_dma_zmalloc(0x1000, 0x1000, NULL);
|
||||
req->iovs[0].offset = 0x800;
|
||||
req->iovs[0].len = 0x800;
|
||||
|
||||
/* 4KB for 2th sge */
|
||||
req->iovs[1].base = spdk_zmalloc(0x1000, 0x1000, NULL);
|
||||
req->iovs[1].base = spdk_dma_zmalloc(0x1000, 0x1000, NULL);
|
||||
req->iovs[1].len = 0x1000;
|
||||
|
||||
/* 12KB for 3th sge */
|
||||
req->iovs[2].base = spdk_zmalloc(0x3000, 0x1000, NULL);
|
||||
req->iovs[2].base = spdk_dma_zmalloc(0x3000, 0x1000, NULL);
|
||||
req->iovs[2].len = 0x3000;
|
||||
}
|
||||
|
||||
@ -178,12 +178,12 @@ static void build_io_request_4(struct io_request *req)
|
||||
req->nseg = 32;
|
||||
|
||||
/* 4KB for 1st sge */
|
||||
req->iovs[0].base = spdk_zmalloc(0x1000, 0x1000, NULL);
|
||||
req->iovs[0].base = spdk_dma_zmalloc(0x1000, 0x1000, NULL);
|
||||
req->iovs[0].len = 0x1000;
|
||||
|
||||
/* 8KB for the rest 31 sge */
|
||||
for (i = 1; i < req->nseg; i++) {
|
||||
req->iovs[i].base = spdk_zmalloc(0x2000, 0x1000, NULL);
|
||||
req->iovs[i].base = spdk_dma_zmalloc(0x2000, 0x1000, NULL);
|
||||
req->iovs[i].len = 0x2000;
|
||||
}
|
||||
}
|
||||
@ -193,7 +193,7 @@ static void build_io_request_5(struct io_request *req)
|
||||
req->nseg = 1;
|
||||
|
||||
/* 8KB for 1st sge */
|
||||
req->iovs[0].base = spdk_zmalloc(0x2000, 0x1000, NULL);
|
||||
req->iovs[0].base = spdk_dma_zmalloc(0x2000, 0x1000, NULL);
|
||||
req->iovs[0].len = 0x2000;
|
||||
}
|
||||
|
||||
@ -202,11 +202,11 @@ static void build_io_request_6(struct io_request *req)
|
||||
req->nseg = 2;
|
||||
|
||||
/* 4KB for 1st sge */
|
||||
req->iovs[0].base = spdk_zmalloc(0x1000, 0x1000, NULL);
|
||||
req->iovs[0].base = spdk_dma_zmalloc(0x1000, 0x1000, NULL);
|
||||
req->iovs[0].len = 0x1000;
|
||||
|
||||
/* 4KB for 2st sge */
|
||||
req->iovs[1].base = spdk_zmalloc(0x1000, 0x1000, NULL);
|
||||
req->iovs[1].base = spdk_dma_zmalloc(0x1000, 0x1000, NULL);
|
||||
req->iovs[1].len = 0x1000;
|
||||
}
|
||||
|
||||
@ -220,7 +220,7 @@ static void build_io_request_7(struct io_request *req)
|
||||
* Create a 64KB sge, but ensure it is *not* aligned on a 4KB
|
||||
* boundary. This is valid for single element buffers with PRP.
|
||||
*/
|
||||
base = spdk_zmalloc(0x11000, 0x1000, NULL);
|
||||
base = spdk_dma_zmalloc(0x11000, 0x1000, NULL);
|
||||
req->misalign = 64;
|
||||
req->iovs[0].base = base + req->misalign;
|
||||
req->iovs[0].len = 0x10000;
|
||||
@ -234,7 +234,7 @@ static void build_io_request_8(struct io_request *req)
|
||||
* 1KB for 1st sge, make sure the iov address does not start and end
|
||||
* at 0x1000 boundary
|
||||
*/
|
||||
req->iovs[0].base = spdk_zmalloc(0x1000, 0x1000, NULL);
|
||||
req->iovs[0].base = spdk_dma_zmalloc(0x1000, 0x1000, NULL);
|
||||
req->iovs[0].offset = 0x400;
|
||||
req->iovs[0].len = 0x400;
|
||||
|
||||
@ -242,7 +242,7 @@ static void build_io_request_8(struct io_request *req)
|
||||
* 1KB for 1st sge, make sure the iov address does not start and end
|
||||
* at 0x1000 boundary
|
||||
*/
|
||||
req->iovs[1].base = spdk_zmalloc(0x1000, 0x1000, NULL);
|
||||
req->iovs[1].base = spdk_dma_zmalloc(0x1000, 0x1000, NULL);
|
||||
req->iovs[1].offset = 0x400;
|
||||
req->iovs[1].len = 0x400;
|
||||
}
|
||||
@ -262,7 +262,7 @@ static void build_io_request_9(struct io_request *req)
|
||||
assert(SPDK_COUNTOF(req_len) == SPDK_COUNTOF(req_off));
|
||||
|
||||
for (i = 0; i < req->nseg; i++) {
|
||||
iovs[i].base = spdk_zmalloc(req_off[i] + req_len[i], 0x4000, NULL);
|
||||
iovs[i].base = spdk_dma_zmalloc(req_off[i] + req_len[i], 0x4000, NULL);
|
||||
iovs[i].offset = req_off[i];
|
||||
iovs[i].len = req_len[i];
|
||||
}
|
||||
@ -282,7 +282,7 @@ static void build_io_request_10(struct io_request *req)
|
||||
assert(SPDK_COUNTOF(req_len) == SPDK_COUNTOF(req_off));
|
||||
|
||||
for (i = 0; i < req->nseg; i++) {
|
||||
iovs[i].base = spdk_zmalloc(req_off[i] + req_len[i], 0x4000, NULL);
|
||||
iovs[i].base = spdk_dma_zmalloc(req_off[i] + req_len[i], 0x4000, NULL);
|
||||
iovs[i].offset = req_off[i];
|
||||
iovs[i].len = req_len[i];
|
||||
}
|
||||
@ -300,10 +300,10 @@ free_req(struct io_request *req)
|
||||
}
|
||||
|
||||
for (i = 0; i < req->nseg; i++) {
|
||||
spdk_free(req->iovs[i].base - req->misalign);
|
||||
spdk_dma_free(req->iovs[i].base - req->misalign);
|
||||
}
|
||||
|
||||
spdk_free(req);
|
||||
spdk_dma_free(req);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -334,7 +334,7 @@ writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn, const ch
|
||||
return 0;
|
||||
}
|
||||
|
||||
req = spdk_zmalloc(sizeof(*req), 0, NULL);
|
||||
req = spdk_dma_zmalloc(sizeof(*req), 0, NULL);
|
||||
if (!req) {
|
||||
fprintf(stderr, "Allocate request failed\n");
|
||||
return 0;
|
||||
|
@ -597,7 +597,7 @@ test_nvme_ns_cmd_dataset_management(void)
|
||||
CU_ASSERT(g_request->cmd.nsid == ns.id);
|
||||
CU_ASSERT(g_request->cmd.cdw10 == 0);
|
||||
CU_ASSERT(g_request->cmd.cdw11 == SPDK_NVME_DSM_ATTR_DEALLOCATE);
|
||||
spdk_free(g_request->payload.u.contig);
|
||||
spdk_dma_free(g_request->payload.u.contig);
|
||||
nvme_free_request(g_request);
|
||||
|
||||
/* TRIM 256 LBAs */
|
||||
@ -609,7 +609,7 @@ test_nvme_ns_cmd_dataset_management(void)
|
||||
CU_ASSERT(g_request->cmd.nsid == ns.id);
|
||||
CU_ASSERT(g_request->cmd.cdw10 == 255u);
|
||||
CU_ASSERT(g_request->cmd.cdw11 == SPDK_NVME_DSM_ATTR_DEALLOCATE);
|
||||
spdk_free(g_request->payload.u.contig);
|
||||
spdk_dma_free(g_request->payload.u.contig);
|
||||
nvme_free_request(g_request);
|
||||
|
||||
rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
|
||||
@ -756,7 +756,7 @@ test_nvme_ns_cmd_reservation_register(void)
|
||||
|
||||
CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
|
||||
|
||||
spdk_free(g_request->payload.u.contig);
|
||||
spdk_dma_free(g_request->payload.u.contig);
|
||||
nvme_free_request(g_request);
|
||||
free(payload);
|
||||
cleanup_after_test(&qpair);
|
||||
@ -794,7 +794,7 @@ test_nvme_ns_cmd_reservation_release(void)
|
||||
|
||||
CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
|
||||
|
||||
spdk_free(g_request->payload.u.contig);
|
||||
spdk_dma_free(g_request->payload.u.contig);
|
||||
nvme_free_request(g_request);
|
||||
free(payload);
|
||||
cleanup_after_test(&qpair);
|
||||
@ -832,7 +832,7 @@ test_nvme_ns_cmd_reservation_acquire(void)
|
||||
|
||||
CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
|
||||
|
||||
spdk_free(g_request->payload.u.contig);
|
||||
spdk_dma_free(g_request->payload.u.contig);
|
||||
nvme_free_request(g_request);
|
||||
free(payload);
|
||||
cleanup_after_test(&qpair);
|
||||
@ -864,7 +864,7 @@ test_nvme_ns_cmd_reservation_report(void)
|
||||
|
||||
CU_ASSERT(g_request->cmd.cdw10 == (size / 4));
|
||||
|
||||
spdk_free(g_request->payload.u.contig);
|
||||
spdk_dma_free(g_request->payload.u.contig);
|
||||
nvme_free_request(g_request);
|
||||
free(payload);
|
||||
cleanup_after_test(&qpair);
|
||||
|
@ -100,7 +100,7 @@ spdk_get_task(uint32_t *owner_task_ctr)
|
||||
}
|
||||
|
||||
void *
|
||||
spdk_malloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
spdk_dma_malloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
{
|
||||
void *buf = malloc(size);
|
||||
if (phys_addr)
|
||||
@ -109,7 +109,7 @@ spdk_malloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
}
|
||||
|
||||
void *
|
||||
spdk_zmalloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
spdk_dma_zmalloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
{
|
||||
void *buf = calloc(size, 1);
|
||||
if (phys_addr)
|
||||
@ -118,7 +118,7 @@ spdk_zmalloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
}
|
||||
|
||||
void
|
||||
spdk_free(void *buf)
|
||||
spdk_dma_free(void *buf)
|
||||
{
|
||||
free(buf);
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ static uint64_t g_test_bdev_num_blocks;
|
||||
static struct spdk_bdev_io g_test_bdev_io;
|
||||
|
||||
void *
|
||||
spdk_malloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
spdk_dma_malloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
{
|
||||
void *buf = malloc(size);
|
||||
if (phys_addr)
|
||||
@ -56,7 +56,7 @@ spdk_malloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
}
|
||||
|
||||
void *
|
||||
spdk_zmalloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
spdk_dma_zmalloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
{
|
||||
void *buf = calloc(size, 1);
|
||||
if (phys_addr)
|
||||
@ -66,7 +66,7 @@ spdk_zmalloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
}
|
||||
|
||||
void
|
||||
spdk_free(void *buf)
|
||||
spdk_dma_free(void *buf)
|
||||
{
|
||||
free(buf);
|
||||
}
|
||||
|
@ -36,7 +36,7 @@
|
||||
#include "spdk/env.h"
|
||||
|
||||
void *
|
||||
spdk_malloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
spdk_dma_malloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
{
|
||||
void *buf = NULL;
|
||||
if (posix_memalign(&buf, align, size)) {
|
||||
@ -49,9 +49,9 @@ spdk_malloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
}
|
||||
|
||||
void *
|
||||
spdk_zmalloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
spdk_dma_zmalloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
{
|
||||
void *buf = spdk_malloc(size, align, phys_addr);
|
||||
void *buf = spdk_dma_malloc(size, align, phys_addr);
|
||||
|
||||
if (buf != NULL) {
|
||||
memset(buf, 0, size);
|
||||
@ -60,24 +60,24 @@ spdk_zmalloc(size_t size, size_t align, uint64_t *phys_addr)
|
||||
}
|
||||
|
||||
void *
|
||||
spdk_malloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id)
|
||||
spdk_dma_malloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id)
|
||||
{
|
||||
return spdk_malloc(size, align, phys_addr);
|
||||
return spdk_dma_malloc(size, align, phys_addr);
|
||||
}
|
||||
|
||||
void *
|
||||
spdk_zmalloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id)
|
||||
spdk_dma_zmalloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id)
|
||||
{
|
||||
return spdk_zmalloc(size, align, phys_addr);
|
||||
return spdk_dma_zmalloc(size, align, phys_addr);
|
||||
}
|
||||
|
||||
void *
|
||||
spdk_realloc(void *buf, size_t size, size_t align, uint64_t *phys_addr)
|
||||
spdk_dma_realloc(void *buf, size_t size, size_t align, uint64_t *phys_addr)
|
||||
{
|
||||
return realloc(buf, size);
|
||||
}
|
||||
|
||||
void spdk_free(void *buf)
|
||||
void spdk_dma_free(void *buf)
|
||||
{
|
||||
free(buf);
|
||||
}
|
||||
|
@ -38,13 +38,13 @@
|
||||
#include "bit_array.c"
|
||||
|
||||
void *
|
||||
spdk_realloc(void *buf, size_t size, size_t align, uint64_t *phys_addr)
|
||||
spdk_dma_realloc(void *buf, size_t size, size_t align, uint64_t *phys_addr)
|
||||
{
|
||||
return realloc(buf, size);
|
||||
}
|
||||
|
||||
void
|
||||
spdk_free(void *buf)
|
||||
spdk_dma_free(void *buf)
|
||||
{
|
||||
free(buf);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user