2017-12-19 15:49:02 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright(c) 2010-2014 Intel Corporation
|
2014-11-25 16:18:04 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <inttypes.h>
|
|
|
|
#include <sys/un.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <dirent.h>
|
|
|
|
#include <errno.h>
|
|
|
|
|
examples/vm_power: add core list parameter
Add in the '-l' command line parameter (also --core-list)
So the user can now pass --corelist=4,6,8-10 and it will
expand out to 4,6,8,9,10 using the parse function provided
in parse.c (parse_set).
This list of cores is then used to enable out-of-band monitoring
to scale up and down these cores based on the ratio of branch
hits versus branch misses. The ratio will be low when a poll
loop is spinning with no packets being received, so the frequency
will be scaled down.
Also , as part of this change, we introduce a core_info struct
which keeps information on each core in the system, and whether
we're doing out of band monitoring on them.
Signed-off-by: David Hunt <david.hunt@intel.com>
Acked-by: Radu Nicolau <radu.nicolau@intel.com>
2018-07-13 14:22:55 +00:00
|
|
|
#include <sys/sysinfo.h>
|
2014-11-25 16:18:04 +00:00
|
|
|
#include <sys/types.h>
|
|
|
|
|
|
|
|
#include <rte_log.h>
|
|
|
|
#include <rte_power.h>
|
|
|
|
#include <rte_spinlock.h>
|
|
|
|
|
2018-07-13 14:22:57 +00:00
|
|
|
#include "channel_manager.h"
|
2014-11-25 16:18:04 +00:00
|
|
|
#include "power_manager.h"
|
2018-07-13 14:22:57 +00:00
|
|
|
#include "oob_monitor.h"
|
2014-11-25 16:18:04 +00:00
|
|
|
|
|
|
|
#define POWER_SCALE_CORE(DIRECTION, core_num , ret) do { \
|
2018-07-13 14:22:57 +00:00
|
|
|
if (core_num >= ci.core_count) \
|
2014-11-25 16:18:04 +00:00
|
|
|
return -1; \
|
2018-07-13 14:22:57 +00:00
|
|
|
if (!(ci.cd[core_num].global_enabled_cpus)) \
|
2014-11-25 16:18:04 +00:00
|
|
|
return -1; \
|
|
|
|
rte_spinlock_lock(&global_core_freq_info[core_num].power_sl); \
|
|
|
|
ret = rte_power_freq_##DIRECTION(core_num); \
|
|
|
|
rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define POWER_SCALE_MASK(DIRECTION, core_mask, ret) do { \
|
|
|
|
int i; \
|
|
|
|
for (i = 0; core_mask; core_mask &= ~(1 << i++)) { \
|
|
|
|
if ((core_mask >> i) & 1) { \
|
2018-07-13 14:22:57 +00:00
|
|
|
if (!(ci.cd[i].global_enabled_cpus)) \
|
2014-11-25 16:18:04 +00:00
|
|
|
continue; \
|
|
|
|
rte_spinlock_lock(&global_core_freq_info[i].power_sl); \
|
|
|
|
if (rte_power_freq_##DIRECTION(i) != 1) \
|
|
|
|
ret = -1; \
|
|
|
|
rte_spinlock_unlock(&global_core_freq_info[i].power_sl); \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
struct freq_info {
|
|
|
|
rte_spinlock_t power_sl;
|
|
|
|
uint32_t freqs[RTE_MAX_LCORE_FREQS];
|
|
|
|
unsigned num_freqs;
|
|
|
|
} __rte_cache_aligned;
|
|
|
|
|
|
|
|
static struct freq_info global_core_freq_info[POWER_MGR_MAX_CPUS];
|
|
|
|
|
examples/vm_power: add core list parameter
Add in the '-l' command line parameter (also --core-list)
So the user can now pass --corelist=4,6,8-10 and it will
expand out to 4,6,8,9,10 using the parse function provided
in parse.c (parse_set).
This list of cores is then used to enable out-of-band monitoring
to scale up and down these cores based on the ratio of branch
hits versus branch misses. The ratio will be low when a poll
loop is spinning with no packets being received, so the frequency
will be scaled down.
Also , as part of this change, we introduce a core_info struct
which keeps information on each core in the system, and whether
we're doing out of band monitoring on them.
Signed-off-by: David Hunt <david.hunt@intel.com>
Acked-by: Radu Nicolau <radu.nicolau@intel.com>
2018-07-13 14:22:55 +00:00
|
|
|
struct core_info ci;
|
2014-11-25 16:18:04 +00:00
|
|
|
|
|
|
|
#define SYSFS_CPU_PATH "/sys/devices/system/cpu/cpu%u/topology/core_id"
|
|
|
|
|
examples/vm_power: add core list parameter
Add in the '-l' command line parameter (also --core-list)
So the user can now pass --corelist=4,6,8-10 and it will
expand out to 4,6,8,9,10 using the parse function provided
in parse.c (parse_set).
This list of cores is then used to enable out-of-band monitoring
to scale up and down these cores based on the ratio of branch
hits versus branch misses. The ratio will be low when a poll
loop is spinning with no packets being received, so the frequency
will be scaled down.
Also , as part of this change, we introduce a core_info struct
which keeps information on each core in the system, and whether
we're doing out of band monitoring on them.
Signed-off-by: David Hunt <david.hunt@intel.com>
Acked-by: Radu Nicolau <radu.nicolau@intel.com>
2018-07-13 14:22:55 +00:00
|
|
|
struct core_info *
|
|
|
|
get_core_info(void)
|
|
|
|
{
|
|
|
|
return &ci;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
core_info_init(void)
|
|
|
|
{
|
|
|
|
struct core_info *ci;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ci = get_core_info();
|
|
|
|
|
|
|
|
ci->core_count = get_nprocs_conf();
|
2018-07-13 14:23:02 +00:00
|
|
|
ci->branch_ratio_threshold = BRANCH_RATIO_THRESHOLD;
|
examples/vm_power: add core list parameter
Add in the '-l' command line parameter (also --core-list)
So the user can now pass --corelist=4,6,8-10 and it will
expand out to 4,6,8,9,10 using the parse function provided
in parse.c (parse_set).
This list of cores is then used to enable out-of-band monitoring
to scale up and down these cores based on the ratio of branch
hits versus branch misses. The ratio will be low when a poll
loop is spinning with no packets being received, so the frequency
will be scaled down.
Also , as part of this change, we introduce a core_info struct
which keeps information on each core in the system, and whether
we're doing out of band monitoring on them.
Signed-off-by: David Hunt <david.hunt@intel.com>
Acked-by: Radu Nicolau <radu.nicolau@intel.com>
2018-07-13 14:22:55 +00:00
|
|
|
ci->cd = malloc(ci->core_count * sizeof(struct core_details));
|
|
|
|
if (!ci->cd) {
|
|
|
|
RTE_LOG(ERR, POWER_MANAGER, "Failed to allocate memory for core info.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
for (i = 0; i < ci->core_count; i++) {
|
|
|
|
ci->cd[i].global_enabled_cpus = 1;
|
|
|
|
ci->cd[i].oob_enabled = 0;
|
|
|
|
ci->cd[i].msr_fd = 0;
|
|
|
|
}
|
|
|
|
printf("%d cores in system\n", ci->core_count);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-11-25 16:18:04 +00:00
|
|
|
int
|
|
|
|
power_manager_init(void)
|
|
|
|
{
|
2018-07-13 14:22:57 +00:00
|
|
|
unsigned int i, num_cpus = 0, num_freqs = 0;
|
2014-11-25 16:18:04 +00:00
|
|
|
int ret = 0;
|
2018-07-13 14:22:57 +00:00
|
|
|
struct core_info *ci;
|
|
|
|
|
|
|
|
rte_power_set_env(PM_ENV_ACPI_CPUFREQ);
|
2014-11-25 16:18:04 +00:00
|
|
|
|
2018-07-13 14:22:57 +00:00
|
|
|
ci = get_core_info();
|
|
|
|
if (!ci) {
|
|
|
|
RTE_LOG(ERR, POWER_MANAGER,
|
|
|
|
"Failed to get core info!\n");
|
2014-11-25 16:18:04 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2018-07-13 14:22:57 +00:00
|
|
|
|
|
|
|
for (i = 0; i < ci->core_count; i++) {
|
|
|
|
if (ci->cd[i].global_enabled_cpus) {
|
|
|
|
if (rte_power_init(i) < 0)
|
|
|
|
RTE_LOG(ERR, POWER_MANAGER,
|
|
|
|
"Unable to initialize power manager "
|
|
|
|
"for core %u\n", i);
|
|
|
|
num_cpus++;
|
|
|
|
num_freqs = rte_power_freqs(i,
|
|
|
|
global_core_freq_info[i].freqs,
|
2017-10-11 16:18:50 +00:00
|
|
|
RTE_MAX_LCORE_FREQS);
|
2018-07-13 14:22:57 +00:00
|
|
|
if (num_freqs == 0) {
|
|
|
|
RTE_LOG(ERR, POWER_MANAGER,
|
|
|
|
"Unable to get frequency list for core %u\n",
|
|
|
|
i);
|
|
|
|
ci->cd[i].oob_enabled = 0;
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
global_core_freq_info[i].num_freqs = num_freqs;
|
|
|
|
|
|
|
|
rte_spinlock_init(&global_core_freq_info[i].power_sl);
|
2014-11-25 16:18:04 +00:00
|
|
|
}
|
2018-07-13 14:22:57 +00:00
|
|
|
if (ci->cd[i].oob_enabled)
|
|
|
|
add_core_to_monitor(i);
|
2014-11-25 16:18:04 +00:00
|
|
|
}
|
2018-07-13 14:22:57 +00:00
|
|
|
RTE_LOG(INFO, POWER_MANAGER, "Managing %u cores out of %u available host cores\n",
|
|
|
|
num_cpus, ci->core_count);
|
2014-11-25 16:18:04 +00:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t
|
|
|
|
power_manager_get_current_frequency(unsigned core_num)
|
|
|
|
{
|
|
|
|
uint32_t freq, index;
|
|
|
|
|
|
|
|
if (core_num >= POWER_MGR_MAX_CPUS) {
|
|
|
|
RTE_LOG(ERR, POWER_MANAGER, "Core(%u) is out of range 0...%d\n",
|
|
|
|
core_num, POWER_MGR_MAX_CPUS-1);
|
|
|
|
return -1;
|
|
|
|
}
|
2018-07-13 14:22:57 +00:00
|
|
|
if (!(ci.cd[core_num].global_enabled_cpus))
|
2014-11-25 16:18:04 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
rte_spinlock_lock(&global_core_freq_info[core_num].power_sl);
|
|
|
|
index = rte_power_get_freq(core_num);
|
|
|
|
rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl);
|
|
|
|
if (index >= POWER_MGR_MAX_CPUS)
|
|
|
|
freq = 0;
|
|
|
|
else
|
|
|
|
freq = global_core_freq_info[core_num].freqs[index];
|
|
|
|
|
|
|
|
return freq;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
power_manager_exit(void)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
int ret = 0;
|
2018-07-13 14:22:57 +00:00
|
|
|
struct core_info *ci;
|
2014-11-25 16:18:04 +00:00
|
|
|
|
2018-07-13 14:22:57 +00:00
|
|
|
ci = get_core_info();
|
|
|
|
if (!ci) {
|
|
|
|
RTE_LOG(ERR, POWER_MANAGER,
|
|
|
|
"Failed to get core info!\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ci->core_count; i++) {
|
|
|
|
if (ci->cd[i].global_enabled_cpus) {
|
|
|
|
if (rte_power_exit(i) < 0) {
|
|
|
|
RTE_LOG(ERR, POWER_MANAGER, "Unable to shutdown power manager "
|
|
|
|
"for core %u\n", i);
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
ci->cd[i].global_enabled_cpus = 0;
|
2014-11-25 16:18:04 +00:00
|
|
|
}
|
2018-07-13 14:22:57 +00:00
|
|
|
remove_core_from_monitor(i);
|
2014-11-25 16:18:04 +00:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
power_manager_scale_mask_up(uint64_t core_mask)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
POWER_SCALE_MASK(up, core_mask, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
power_manager_scale_mask_down(uint64_t core_mask)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
POWER_SCALE_MASK(down, core_mask, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
power_manager_scale_mask_min(uint64_t core_mask)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
POWER_SCALE_MASK(min, core_mask, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
power_manager_scale_mask_max(uint64_t core_mask)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
POWER_SCALE_MASK(max, core_mask, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-09-13 10:44:17 +00:00
|
|
|
int
|
|
|
|
power_manager_enable_turbo_mask(uint64_t core_mask)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
POWER_SCALE_MASK(enable_turbo, core_mask, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
power_manager_disable_turbo_mask(uint64_t core_mask)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
POWER_SCALE_MASK(disable_turbo, core_mask, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-11-25 16:18:04 +00:00
|
|
|
int
|
|
|
|
power_manager_scale_core_up(unsigned core_num)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
POWER_SCALE_CORE(up, core_num, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
power_manager_scale_core_down(unsigned core_num)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
POWER_SCALE_CORE(down, core_num, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
power_manager_scale_core_min(unsigned core_num)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
POWER_SCALE_CORE(min, core_num, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
power_manager_scale_core_max(unsigned core_num)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
POWER_SCALE_CORE(max, core_num, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
2017-09-13 10:44:17 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
power_manager_enable_turbo_core(unsigned int core_num)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
POWER_SCALE_CORE(enable_turbo, core_num, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
power_manager_disable_turbo_core(unsigned int core_num)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
POWER_SCALE_CORE(disable_turbo, core_num, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
2017-10-11 16:18:50 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
power_manager_scale_core_med(unsigned int core_num)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
2018-07-13 14:22:57 +00:00
|
|
|
struct core_info *ci;
|
2017-10-11 16:18:50 +00:00
|
|
|
|
2018-07-13 14:22:57 +00:00
|
|
|
ci = get_core_info();
|
2017-10-11 16:18:50 +00:00
|
|
|
if (core_num >= POWER_MGR_MAX_CPUS)
|
|
|
|
return -1;
|
2018-07-13 14:22:57 +00:00
|
|
|
if (!(ci->cd[core_num].global_enabled_cpus))
|
2017-10-11 16:18:50 +00:00
|
|
|
return -1;
|
|
|
|
rte_spinlock_lock(&global_core_freq_info[core_num].power_sl);
|
|
|
|
ret = rte_power_set_freq(core_num,
|
|
|
|
global_core_freq_info[core_num].num_freqs / 2);
|
|
|
|
rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl);
|
|
|
|
return ret;
|
|
|
|
}
|