examples/power: allow VM to use lcores over 63

Extending the functionality to allow vms to power manage cores beyond 63.

Signed-off-by: David Hunt <david.hunt@intel.com>
Reviewed-by: Anatoly Burakov <anatoly.burakov@intel.com>
This commit is contained in:
David Hunt 2018-12-14 13:31:36 +00:00 committed by Thomas Monjalon
parent b6b22982f5
commit 5776b7a371
4 changed files with 57 additions and 106 deletions

View File

@ -49,7 +49,7 @@ static bool global_hypervisor_available;
*/
struct virtual_machine_info {
char name[CHANNEL_MGR_MAX_NAME_LEN];
rte_atomic64_t pcpu_mask[CHANNEL_CMDS_MAX_CPUS];
uint16_t pcpu_map[CHANNEL_CMDS_MAX_CPUS];
struct channel_info *channels[CHANNEL_CMDS_MAX_VM_CHANNELS];
char channel_mask[POWER_MGR_MAX_CPUS];
uint8_t num_channels;
@ -79,7 +79,6 @@ update_pcpus_mask(struct virtual_machine_info *vm_info)
virVcpuInfoPtr cpuinfo;
unsigned i, j;
int n_vcpus;
uint64_t mask;
memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);
@ -119,27 +118,24 @@ update_pcpus_mask(struct virtual_machine_info *vm_info)
n_vcpus);
vm_info->info.nrVirtCpu = n_vcpus;
}
rte_spinlock_lock(&(vm_info->config_spinlock));
for (i = 0; i < vm_info->info.nrVirtCpu; i++) {
mask = 0;
for (j = 0; j < global_n_host_cpus; j++) {
if (VIR_CPU_USABLE(global_cpumaps, global_maplen, i, j) > 0) {
mask |= 1ULL << j;
}
if (VIR_CPU_USABLE(global_cpumaps,
global_maplen, i, j) <= 0)
continue;
vm_info->pcpu_map[i] = j;
}
rte_atomic64_set(&vm_info->pcpu_mask[i], mask);
}
rte_spinlock_unlock(&(vm_info->config_spinlock));
return 0;
}
int
set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask)
set_pcpu(char *vm_name, unsigned int vcpu, unsigned int pcpu)
{
unsigned i = 0;
int flags = VIR_DOMAIN_AFFECT_LIVE|VIR_DOMAIN_AFFECT_CONFIG;
struct virtual_machine_info *vm_info;
char mask[POWER_MGR_MAX_CPUS];
memcpy(mask, core_mask, POWER_MGR_MAX_CPUS);
if (vcpu >= CHANNEL_CMDS_MAX_CPUS) {
RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds max allowable(%d)\n",
@ -166,17 +162,16 @@ set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask)
return -1;
}
memset(global_cpumaps, 0 , CHANNEL_CMDS_MAX_CPUS * global_maplen);
for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
if (mask[i] != 1)
continue;
VIR_USE_CPU(global_cpumaps, i);
if (i >= global_n_host_cpus) {
RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
"number of CPUs(%u)\n",
i, global_n_host_cpus);
return -1;
}
VIR_USE_CPU(global_cpumaps, pcpu);
if (pcpu >= global_n_host_cpus) {
RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
"number of CPUs(%u)\n",
pcpu, global_n_host_cpus);
return -1;
}
if (virDomainPinVcpuFlags(vm_info->domainPtr, vcpu, global_cpumaps,
global_maplen, flags) < 0) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
@ -185,33 +180,24 @@ set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask)
return -1;
}
rte_spinlock_lock(&(vm_info->config_spinlock));
memcpy(&vm_info->pcpu_mask[vcpu], mask, POWER_MGR_MAX_CPUS);
vm_info->pcpu_map[vcpu] = pcpu;
rte_spinlock_unlock(&(vm_info->config_spinlock));
return 0;
}
int
set_pcpu(char *vm_name, unsigned vcpu, unsigned core_num)
{
char mask[POWER_MGR_MAX_CPUS];
memset(mask, 0, POWER_MGR_MAX_CPUS);
mask[core_num] = 1;
return set_pcpus_mask(vm_name, vcpu, mask);
}
uint64_t
get_pcpus_mask(struct channel_info *chan_info, unsigned vcpu)
uint16_t
get_pcpu(struct channel_info *chan_info, unsigned int vcpu)
{
struct virtual_machine_info *vm_info =
(struct virtual_machine_info *)chan_info->priv_info;
if (global_hypervisor_available && (vm_info != NULL))
return rte_atomic64_read(&vm_info->pcpu_mask[vcpu]);
else
if (global_hypervisor_available && (vm_info != NULL)) {
uint16_t pcpu;
rte_spinlock_lock(&(vm_info->config_spinlock));
pcpu = vm_info->pcpu_map[vcpu];
rte_spinlock_unlock(&(vm_info->config_spinlock));
return pcpu;
} else
return 0;
}
@ -771,9 +757,11 @@ get_info_vm(const char *vm_name, struct vm_info *info)
rte_spinlock_unlock(&(vm_info->config_spinlock));
memcpy(info->name, vm_info->name, sizeof(vm_info->name));
rte_spinlock_lock(&(vm_info->config_spinlock));
for (i = 0; i < info->num_vcpus; i++) {
info->pcpu_mask[i] = rte_atomic64_read(&vm_info->pcpu_mask[i]);
info->pcpu_map[i] = vm_info->pcpu_map[i];
}
rte_spinlock_unlock(&(vm_info->config_spinlock));
return 0;
}
@ -823,7 +811,7 @@ add_vm(const char *vm_name)
}
for (i = 0; i < CHANNEL_CMDS_MAX_CPUS; i++) {
rte_atomic64_init(&new_domain->pcpu_mask[i]);
new_domain->pcpu_map[i] = 0;
}
if (update_pcpus_mask(new_domain) < 0) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting physical CPU pinning\n");

View File

@ -82,7 +82,7 @@ struct channel_info {
struct vm_info {
char name[CHANNEL_MGR_MAX_NAME_LEN]; /**< VM name */
enum vm_status status; /**< libvirt status */
uint64_t pcpu_mask[CHANNEL_CMDS_MAX_CPUS]; /**< pCPU mask for each vCPU */
uint16_t pcpu_map[CHANNEL_CMDS_MAX_CPUS]; /**< pCPU map to vCPU */
unsigned num_vcpus; /**< number of vCPUS */
struct channel_info channels[CHANNEL_MGR_MAX_CHANNELS]; /**< Array of channel_info */
unsigned num_channels; /**< Number of channels */
@ -115,8 +115,7 @@ int channel_manager_init(const char *path);
void channel_manager_exit(void);
/**
* Get the Physical CPU mask for VM lcore channel(vcpu), result is assigned to
* core_mask.
* Get the Physical CPU for VM lcore channel(vcpu).
* It is not thread-safe.
*
* @param chan_info
@ -130,26 +129,7 @@ void channel_manager_exit(void);
* - 0 on error.
* - >0 on success.
*/
uint64_t get_pcpus_mask(struct channel_info *chan_info, unsigned vcpu);
/**
* Set the Physical CPU mask for the specified vCPU.
* It is not thread-safe.
*
* @param name
* Virtual Machine name to lookup
*
* @param vcpu
* The virtual CPU to set.
*
* @param core_mask
* The core mask of the physical CPU(s) to bind the vCPU
*
* @return
* - 0 on success.
* - Negative on error.
*/
int set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask);
uint16_t get_pcpu(struct channel_info *chan_info, unsigned int vcpu);
/**
* Set the Physical CPU for the specified vCPU.
@ -168,7 +148,8 @@ int set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask);
* - 0 on success.
* - Negative on error.
*/
int set_pcpu(char *vm_name, unsigned vcpu, unsigned core_num);
int set_pcpu(char *vm_name, unsigned int vcpu, unsigned int pcpu);
/**
* Add a VM as specified by name to the Channel Manager. The name must
* correspond to a valid libvirt domain name.

View File

@ -356,7 +356,6 @@ get_pcpu_to_control(struct policy *pol)
/* Convert vcpu to pcpu. */
struct vm_info info;
int pcpu, count;
uint64_t mask_u64b;
struct core_info *ci;
ci = get_core_info();
@ -377,13 +376,8 @@ get_pcpu_to_control(struct policy *pol)
*/
get_info_vm(pol->pkt.vm_name, &info);
for (count = 0; count < pol->pkt.num_vcpu; count++) {
mask_u64b =
info.pcpu_mask[pol->pkt.vcpu_to_control[count]];
for (pcpu = 0; mask_u64b;
mask_u64b &= ~(1ULL << pcpu++)) {
if ((mask_u64b >> pcpu) & 1)
pcpu_monitor(pol, ci, pcpu, count);
}
pcpu = info.pcpu_map[pol->pkt.vcpu_to_control[count]];
pcpu_monitor(pol, ci, pcpu, count);
}
} else {
/*
@ -636,8 +630,6 @@ apply_policy(struct policy *pol)
static int
process_request(struct channel_packet *pkt, struct channel_info *chan_info)
{
uint64_t core_mask;
if (chan_info == NULL)
return -1;
@ -646,41 +638,31 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info)
return -1;
if (pkt->command == CPU_POWER) {
core_mask = get_pcpus_mask(chan_info, pkt->resource_id);
if (core_mask == 0) {
/*
* Core mask will be 0 in the case where
* hypervisor is not available so we're working in
* the host, so use the core as the mask.
*/
core_mask = 1ULL << pkt->resource_id;
}
if (__builtin_popcountll(core_mask) == 1) {
unsigned int core_num;
unsigned core_num = __builtin_ffsll(core_mask) - 1;
core_num = get_pcpu(chan_info, pkt->resource_id);
switch (pkt->unit) {
case(CPU_POWER_SCALE_MIN):
power_manager_scale_core_min(core_num);
switch (pkt->unit) {
case(CPU_POWER_SCALE_MIN):
power_manager_scale_core_min(core_num);
break;
case(CPU_POWER_SCALE_MAX):
power_manager_scale_core_max(core_num);
case(CPU_POWER_SCALE_MAX):
power_manager_scale_core_max(core_num);
break;
case(CPU_POWER_SCALE_DOWN):
power_manager_scale_core_down(core_num);
case(CPU_POWER_SCALE_DOWN):
power_manager_scale_core_down(core_num);
break;
case(CPU_POWER_SCALE_UP):
power_manager_scale_core_up(core_num);
case(CPU_POWER_SCALE_UP):
power_manager_scale_core_up(core_num);
break;
case(CPU_POWER_ENABLE_TURBO):
power_manager_enable_turbo_core(core_num);
case(CPU_POWER_ENABLE_TURBO):
power_manager_enable_turbo_core(core_num);
break;
case(CPU_POWER_DISABLE_TURBO):
power_manager_disable_turbo_core(core_num);
case(CPU_POWER_DISABLE_TURBO):
power_manager_disable_turbo_core(core_num);
break;
default:
break;
default:
break;
}
}
}

View File

@ -95,8 +95,8 @@ cmd_show_vm_parsed(void *parsed_result, struct cmdline *cl,
}
cmdline_printf(cl, "Virtual CPU(s): %u\n", info.num_vcpus);
for (i = 0; i < info.num_vcpus; i++) {
cmdline_printf(cl, " [%u]: Physical CPU Mask 0x%"PRIx64"\n", i,
info.pcpu_mask[i]);
cmdline_printf(cl, " [%u]: Physical CPU %d\n", i,
info.pcpu_map[i]);
}
}