mempool: distinguish cache and pool debug counters
If cache is enabled, objects will be retrieved/put from/to cache, subsequently from/to the common pool. Now the debug stats calculate the objects retrieved/put from/to cache and pool together, it is better to distinguish them. Signed-off-by: Joyce Kong <joyce.kong@arm.com> Signed-off-by: Dharmik Thakkar <dharmik.thakkar@arm.com> Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com> Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com> Acked-by: Olivier Matz <olivier.matz@6wind.com>
This commit is contained in:
parent
5648704065
commit
cee151b41b
@ -1244,6 +1244,10 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp)
|
||||
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
|
||||
sum.put_bulk += mp->stats[lcore_id].put_bulk;
|
||||
sum.put_objs += mp->stats[lcore_id].put_objs;
|
||||
sum.put_common_pool_bulk += mp->stats[lcore_id].put_common_pool_bulk;
|
||||
sum.put_common_pool_objs += mp->stats[lcore_id].put_common_pool_objs;
|
||||
sum.get_common_pool_bulk += mp->stats[lcore_id].get_common_pool_bulk;
|
||||
sum.get_common_pool_objs += mp->stats[lcore_id].get_common_pool_objs;
|
||||
sum.get_success_bulk += mp->stats[lcore_id].get_success_bulk;
|
||||
sum.get_success_objs += mp->stats[lcore_id].get_success_objs;
|
||||
sum.get_fail_bulk += mp->stats[lcore_id].get_fail_bulk;
|
||||
@ -1254,6 +1258,10 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp)
|
||||
fprintf(f, " stats:\n");
|
||||
fprintf(f, " put_bulk=%"PRIu64"\n", sum.put_bulk);
|
||||
fprintf(f, " put_objs=%"PRIu64"\n", sum.put_objs);
|
||||
fprintf(f, " put_common_pool_bulk=%"PRIu64"\n", sum.put_common_pool_bulk);
|
||||
fprintf(f, " put_common_pool_objs=%"PRIu64"\n", sum.put_common_pool_objs);
|
||||
fprintf(f, " get_common_pool_bulk=%"PRIu64"\n", sum.get_common_pool_bulk);
|
||||
fprintf(f, " get_common_pool_objs=%"PRIu64"\n", sum.get_common_pool_objs);
|
||||
fprintf(f, " get_success_bulk=%"PRIu64"\n", sum.get_success_bulk);
|
||||
fprintf(f, " get_success_objs=%"PRIu64"\n", sum.get_success_objs);
|
||||
fprintf(f, " get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk);
|
||||
|
@ -64,18 +64,23 @@ extern "C" {
|
||||
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
|
||||
/**
|
||||
* A structure that stores the mempool statistics (per-lcore).
|
||||
* Note: Cache stats (put_cache_bulk/objs, get_cache_bulk/objs) are not
|
||||
* captured since they can be calculated from other stats.
|
||||
* For example: put_cache_objs = put_objs - put_common_pool_objs.
|
||||
*/
|
||||
struct rte_mempool_debug_stats {
|
||||
uint64_t put_bulk; /**< Number of puts. */
|
||||
uint64_t put_objs; /**< Number of objects successfully put. */
|
||||
uint64_t get_success_bulk; /**< Successful allocation number. */
|
||||
uint64_t get_success_objs; /**< Objects successfully allocated. */
|
||||
uint64_t get_fail_bulk; /**< Failed allocation number. */
|
||||
uint64_t get_fail_objs; /**< Objects that failed to be allocated. */
|
||||
/** Successful allocation number of contiguous blocks. */
|
||||
uint64_t get_success_blks;
|
||||
/** Failed allocation number of contiguous blocks. */
|
||||
uint64_t get_fail_blks;
|
||||
uint64_t put_bulk; /**< Number of puts. */
|
||||
uint64_t put_objs; /**< Number of objects successfully put. */
|
||||
uint64_t put_common_pool_bulk; /**< Number of bulks enqueued in common pool. */
|
||||
uint64_t put_common_pool_objs; /**< Number of objects enqueued in common pool. */
|
||||
uint64_t get_common_pool_bulk; /**< Number of bulks dequeued from common pool. */
|
||||
uint64_t get_common_pool_objs; /**< Number of objects dequeued from common pool. */
|
||||
uint64_t get_success_bulk; /**< Successful allocation number. */
|
||||
uint64_t get_success_objs; /**< Objects successfully allocated. */
|
||||
uint64_t get_fail_bulk; /**< Failed allocation number. */
|
||||
uint64_t get_fail_objs; /**< Objects that failed to be allocated. */
|
||||
uint64_t get_success_blks; /**< Successful allocation number of contiguous blocks. */
|
||||
uint64_t get_fail_blks; /**< Failed allocation number of contiguous blocks. */
|
||||
} __rte_cache_aligned;
|
||||
#endif
|
||||
|
||||
@ -699,10 +704,16 @@ rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp,
|
||||
void **obj_table, unsigned n)
|
||||
{
|
||||
struct rte_mempool_ops *ops;
|
||||
int ret;
|
||||
|
||||
rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n);
|
||||
ops = rte_mempool_get_ops(mp->ops_index);
|
||||
return ops->dequeue(mp, obj_table, n);
|
||||
ret = ops->dequeue(mp, obj_table, n);
|
||||
if (ret == 0) {
|
||||
__MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1);
|
||||
__MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -749,6 +760,8 @@ rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table,
|
||||
{
|
||||
struct rte_mempool_ops *ops;
|
||||
|
||||
__MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1);
|
||||
__MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n);
|
||||
rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n);
|
||||
ops = rte_mempool_get_ops(mp->ops_index);
|
||||
return ops->enqueue(mp, obj_table, n);
|
||||
|
Loading…
x
Reference in New Issue
Block a user