eal: remove sync version of power monitor

Currently, the "sync" version of power monitor intrinsic is supposed to
be used for purposes of waking up a sleeping core. However, there are
better ways to achieve the same result, so remove the unneeded function.

Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
This commit is contained in:
Anatoly Burakov 2021-01-14 14:46:06 +00:00 committed by Thomas Monjalon
parent 6a17919b0e
commit 1b36fc280a
5 changed files with 0 additions and 121 deletions

View File

@ -17,20 +17,6 @@ rte_power_monitor(const struct rte_power_monitor_cond *pmc,
return -ENOTSUP;
}
/**
* This function is not supported on ARM.
*/
int
rte_power_monitor_sync(const struct rte_power_monitor_cond *pmc,
const uint64_t tsc_timestamp, rte_spinlock_t *lck)
{
RTE_SET_USED(pmc);
RTE_SET_USED(tsc_timestamp);
RTE_SET_USED(lck);
return -ENOTSUP;
}
/**
* This function is not supported on ARM.
*/

View File

@ -61,44 +61,6 @@ struct rte_power_monitor_cond {
__rte_experimental
int rte_power_monitor(const struct rte_power_monitor_cond *pmc,
const uint64_t tsc_timestamp);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
* Monitor specific address for changes. This will cause the CPU to enter an
* architecture-defined optimized power state until either the specified
* memory address is written to, a certain TSC timestamp is reached, or other
* reasons cause the CPU to wake up.
*
* Additionally, an `expected` 64-bit value and 64-bit mask are provided. If
* mask is non-zero, the current value pointed to by the `p` pointer will be
* checked against the expected value, and if they match, the entering of
* optimized power state may be aborted.
*
* This call will also lock a spinlock on entering sleep, and release it on
* waking up the CPU.
*
* @warning It is responsibility of the user to check if this function is
* supported at runtime using `rte_cpu_get_intrinsics_support()` API call.
*
* @param pmc
* The monitoring condition structure.
* @param tsc_timestamp
* Maximum TSC timestamp to wait for. Note that the wait behavior is
* architecture-dependent.
* @param lck
* A spinlock that must be locked before entering the function, will be
* unlocked while the CPU is sleeping, and will be locked again once the CPU
* wakes up.
*
* @return
* 0 on success
* -EINVAL on invalid parameters
* -ENOTSUP if unsupported
*/
__rte_experimental
int rte_power_monitor_sync(const struct rte_power_monitor_cond *pmc,
const uint64_t tsc_timestamp, rte_spinlock_t *lck);
/**
* @warning

View File

@ -17,20 +17,6 @@ rte_power_monitor(const struct rte_power_monitor_cond *pmc,
return -ENOTSUP;
}
/**
* This function is not supported on PPC64.
*/
int
rte_power_monitor_sync(const struct rte_power_monitor_cond *pmc,
const uint64_t tsc_timestamp, rte_spinlock_t *lck)
{
RTE_SET_USED(pmc);
RTE_SET_USED(tsc_timestamp);
RTE_SET_USED(lck);
return -ENOTSUP;
}
/**
* This function is not supported on PPC64.
*/

View File

@ -406,7 +406,6 @@ EXPERIMENTAL {
# added in 21.02
rte_power_monitor;
rte_power_monitor_sync;
rte_power_pause;
rte_thread_tls_key_create;
rte_thread_tls_key_delete;

View File

@ -90,60 +90,6 @@ rte_power_monitor(const struct rte_power_monitor_cond *pmc,
return 0;
}
/**
* This function uses UMONITOR/UMWAIT instructions and will enter C0.2 state.
* For more information about usage of these instructions, please refer to
* Intel(R) 64 and IA-32 Architectures Software Developer's Manual.
*/
int
rte_power_monitor_sync(const struct rte_power_monitor_cond *pmc,
const uint64_t tsc_timestamp, rte_spinlock_t *lck)
{
const uint32_t tsc_l = (uint32_t)tsc_timestamp;
const uint32_t tsc_h = (uint32_t)(tsc_timestamp >> 32);
/* prevent user from running this instruction if it's not supported */
if (!wait_supported)
return -ENOTSUP;
if (pmc == NULL || lck == NULL)
return -EINVAL;
if (__check_val_size(pmc->data_sz) < 0)
return -EINVAL;
/*
* we're using raw byte codes for now as only the newest compiler
* versions support this instruction natively.
*/
/* set address for UMONITOR */
asm volatile(".byte 0xf3, 0x0f, 0xae, 0xf7;"
:
: "D"(pmc->addr));
if (pmc->mask) {
const uint64_t cur_value = __get_umwait_val(
pmc->addr, pmc->data_sz);
const uint64_t masked = cur_value & pmc->mask;
/* if the masked value is already matching, abort */
if (masked == pmc->val)
return 0;
}
rte_spinlock_unlock(lck);
/* execute UMWAIT */
asm volatile(".byte 0xf2, 0x0f, 0xae, 0xf7;"
: /* ignore rflags */
: "D"(0), /* enter C0.2 */
"a"(tsc_l), "d"(tsc_h));
rte_spinlock_lock(lck);
return 0;
}
/**
* This function uses TPAUSE instruction and will enter C0.2 state. For more
* information about usage of this instruction, please refer to Intel(R) 64 and