event/dlb: remove driver
Remove event/dlb driver from DPDK code base. Updated release note's removal section to reflect the same. Also updated doc/guides/rel_notes/release_20_11.rst to fix the the missing link issue due to removal of doc/guides/eventdevs/dlb.rst Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
This commit is contained in:
parent
41f27795bb
commit
698fa82941
@ -1211,11 +1211,6 @@ Cavium OCTEON TX timvf
|
||||
M: Pavan Nikhilesh <pbhagavatula@marvell.com>
|
||||
F: drivers/event/octeontx/timvf_*
|
||||
|
||||
Intel DLB
|
||||
M: Timothy McDaniel <timothy.mcdaniel@intel.com>
|
||||
F: drivers/event/dlb/
|
||||
F: doc/guides/eventdevs/dlb.rst
|
||||
|
||||
Intel DLB2
|
||||
M: Timothy McDaniel <timothy.mcdaniel@intel.com>
|
||||
F: drivers/event/dlb2/
|
||||
|
@ -1030,12 +1030,6 @@ test_eventdev_selftest_dpaa2(void)
|
||||
return test_eventdev_selftest_impl("event_dpaa2", "");
|
||||
}
|
||||
|
||||
static int
|
||||
test_eventdev_selftest_dlb(void)
|
||||
{
|
||||
return test_eventdev_selftest_impl("dlb_event", "");
|
||||
}
|
||||
|
||||
static int
|
||||
test_eventdev_selftest_dlb2(void)
|
||||
{
|
||||
@ -1049,5 +1043,4 @@ REGISTER_TEST_COMMAND(eventdev_selftest_octeontx,
|
||||
REGISTER_TEST_COMMAND(eventdev_selftest_octeontx2,
|
||||
test_eventdev_selftest_octeontx2);
|
||||
REGISTER_TEST_COMMAND(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2);
|
||||
REGISTER_TEST_COMMAND(eventdev_selftest_dlb, test_eventdev_selftest_dlb);
|
||||
REGISTER_TEST_COMMAND(eventdev_selftest_dlb2, test_eventdev_selftest_dlb2);
|
||||
|
@ -139,12 +139,6 @@
|
||||
/* QEDE PMD defines */
|
||||
#define RTE_LIBRTE_QEDE_FW ""
|
||||
|
||||
/* DLB PMD defines */
|
||||
#define RTE_LIBRTE_PMD_DLB_POLL_INTERVAL 1000
|
||||
#define RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE 0
|
||||
#undef RTE_LIBRTE_PMD_DLB_QUELL_STATS
|
||||
#define RTE_LIBRTE_PMD_DLB_SW_CREDIT_QUANTA 32
|
||||
|
||||
/* DLB2 defines */
|
||||
#define RTE_LIBRTE_PMD_DLB2_POLL_INTERVAL 1000
|
||||
#define RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE 0
|
||||
|
@ -54,7 +54,6 @@ The public API headers are grouped by topics:
|
||||
[dpaa2_cmdif] (@ref rte_pmd_dpaa2_cmdif.h),
|
||||
[dpaa2_qdma] (@ref rte_pmd_dpaa2_qdma.h),
|
||||
[crypto_scheduler] (@ref rte_cryptodev_scheduler.h),
|
||||
[dlb] (@ref rte_pmd_dlb.h),
|
||||
[dlb2] (@ref rte_pmd_dlb2.h),
|
||||
[ifpga] (@ref rte_pmd_ifpga.h)
|
||||
|
||||
|
@ -7,7 +7,6 @@ USE_MDFILE_AS_MAINPAGE = @TOPDIR@/doc/api/doxy-api-index.md
|
||||
INPUT = @TOPDIR@/doc/api/doxy-api-index.md \
|
||||
@TOPDIR@/drivers/bus/vdev \
|
||||
@TOPDIR@/drivers/crypto/scheduler \
|
||||
@TOPDIR@/drivers/event/dlb \
|
||||
@TOPDIR@/drivers/event/dlb2 \
|
||||
@TOPDIR@/drivers/mempool/dpaa2 \
|
||||
@TOPDIR@/drivers/net/ark \
|
||||
|
@ -1,341 +0,0 @@
|
||||
.. SPDX-License-Identifier: BSD-3-Clause
|
||||
Copyright(c) 2020 Intel Corporation.
|
||||
|
||||
Driver for the Intel® Dynamic Load Balancer (DLB)
|
||||
=================================================
|
||||
|
||||
The DPDK dlb poll mode driver supports the Intel® Dynamic Load Balancer.
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup
|
||||
the basic DPDK environment.
|
||||
|
||||
Configuration
|
||||
-------------
|
||||
|
||||
The DLB PF PMD is a user-space PMD that uses VFIO to gain direct
|
||||
device access. To use this operation mode, the PCIe PF device must be bound
|
||||
to a DPDK-compatible VFIO driver, such as vfio-pci.
|
||||
|
||||
Eventdev API Notes
|
||||
------------------
|
||||
|
||||
The DLB provides the functions of a DPDK event device; specifically, it
|
||||
supports atomic, ordered, and parallel scheduling events from queues to ports.
|
||||
However, the DLB hardware is not a perfect match to the eventdev API. Some DLB
|
||||
features are abstracted by the PMD (e.g. directed ports), some are only
|
||||
accessible as vdev command-line parameters, and certain eventdev features are
|
||||
not supported (e.g. the event flow ID is not maintained during scheduling).
|
||||
|
||||
In general the dlb PMD is designed for ease-of-use and does not require a
|
||||
detailed understanding of the hardware, but these details are important when
|
||||
writing high-performance code. This section describes the places where the
|
||||
eventdev API and DLB misalign.
|
||||
|
||||
Scheduling Domain Configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
There are 32 scheduling domainis the DLB.
|
||||
When one is configured, it allocates load-balanced and
|
||||
directed queues, ports, credits, and other hardware resources. Some
|
||||
resource allocations are user-controlled -- the number of queues, for example
|
||||
-- and others, like credit pools (one directed and one load-balanced pool per
|
||||
scheduling domain), are not.
|
||||
|
||||
The DLB is a closed system eventdev, and as such the ``nb_events_limit`` device
|
||||
setup argument and the per-port ``new_event_threshold`` argument apply as
|
||||
defined in the eventdev header file. The limit is applied to all enqueues,
|
||||
regardless of whether it will consume a directed or load-balanced credit.
|
||||
|
||||
Reconfiguration
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
The Eventdev API allows one to reconfigure a device, its ports, and its queues
|
||||
by first stopping the device, calling the configuration function(s), then
|
||||
restarting the device. The DLB does not support configuring an individual queue
|
||||
or port without first reconfiguring the entire device, however, so there are
|
||||
certain reconfiguration sequences that are valid in the eventdev API but not
|
||||
supported by the PMD.
|
||||
|
||||
Specifically, the PMD supports the following configuration sequence:
|
||||
1. Configure and start the device
|
||||
2. Stop the device
|
||||
3. (Optional) Reconfigure the device
|
||||
4. (Optional) If step 3 is run:
|
||||
|
||||
a. Setup queue(s). The reconfigured queue(s) lose their previous port links.
|
||||
b. The reconfigured port(s) lose their previous queue links.
|
||||
|
||||
5. (Optional, only if steps 4a and 4b are run) Link port(s) to queue(s)
|
||||
6. Restart the device. If the device is reconfigured in step 3 but one or more
|
||||
of its ports or queues are not, the PMD will apply their previous
|
||||
configuration (including port->queue links) at this time.
|
||||
|
||||
The PMD does not support the following configuration sequences:
|
||||
1. Configure and start the device
|
||||
2. Stop the device
|
||||
3. Setup queue or setup port
|
||||
4. Start the device
|
||||
|
||||
This sequence is not supported because the event device must be reconfigured
|
||||
before its ports or queues can be.
|
||||
|
||||
Load-Balanced Queues
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
A load-balanced queue can support atomic and ordered scheduling, or atomic and
|
||||
unordered scheduling, but not atomic and unordered and ordered scheduling. A
|
||||
queue's scheduling types are controlled by the event queue configuration.
|
||||
|
||||
If the user sets the ``RTE_EVENT_QUEUE_CFG_ALL_TYPES`` flag, the
|
||||
``nb_atomic_order_sequences`` determines the supported scheduling types.
|
||||
With non-zero ``nb_atomic_order_sequences``, the queue is configured for atomic
|
||||
and ordered scheduling. In this case, ``RTE_SCHED_TYPE_PARALLEL`` scheduling is
|
||||
supported by scheduling those events as ordered events. Note that when the
|
||||
event is dequeued, its sched_type will be ``RTE_SCHED_TYPE_ORDERED``. Else if
|
||||
``nb_atomic_order_sequences`` is zero, the queue is configured for atomic and
|
||||
unordered scheduling. In this case, ``RTE_SCHED_TYPE_ORDERED`` is unsupported.
|
||||
|
||||
If the ``RTE_EVENT_QUEUE_CFG_ALL_TYPES`` flag is not set, schedule_type
|
||||
dictates the queue's scheduling type.
|
||||
|
||||
The ``nb_atomic_order_sequences`` queue configuration field sets the ordered
|
||||
queue's reorder buffer size. DLB has 4 groups of ordered queues, where each
|
||||
group is configured to contain either 1 queue with 1024 reorder entries, 2
|
||||
queues with 512 reorder entries, and so on down to 32 queues with 32 entries.
|
||||
|
||||
When a load-balanced queue is created, the PMD will configure a new sequence
|
||||
number group on-demand if num_sequence_numbers does not match a pre-existing
|
||||
group with available reorder buffer entries. If all sequence number groups are
|
||||
in use, no new group will be created and queue configuration will fail. (Note
|
||||
that when the PMD is used with a virtual DLB device, it cannot change the
|
||||
sequence number configuration.)
|
||||
|
||||
The queue's ``nb_atomic_flows`` parameter is ignored by the DLB PMD, because
|
||||
the DLB does not limit the number of flows a queue can track. In the DLB, all
|
||||
load-balanced queues can use the full 16-bit flow ID range.
|
||||
|
||||
Load-balanced and Directed Ports
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
DLB ports come in two flavors: load-balanced and directed. The eventdev API
|
||||
does not have the same concept, but it has a similar one: ports and queues that
|
||||
are singly-linked (i.e. linked to a single queue or port, respectively).
|
||||
|
||||
The ``rte_event_dev_info_get()`` function reports the number of available
|
||||
event ports and queues (among other things). For the DLB PMD, max_event_ports
|
||||
and max_event_queues report the number of available load-balanced ports and
|
||||
queues, and max_single_link_event_port_queue_pairs reports the number of
|
||||
available directed ports and queues.
|
||||
|
||||
When a scheduling domain is created in ``rte_event_dev_configure()``, the user
|
||||
specifies ``nb_event_ports`` and ``nb_single_link_event_port_queues``, which
|
||||
control the total number of ports (load-balanced and directed) and the number
|
||||
of directed ports. Hence, the number of requested load-balanced ports is
|
||||
``nb_event_ports - nb_single_link_event_ports``. The ``nb_event_queues`` field
|
||||
specifies the total number of queues (load-balanced and directed). The number
|
||||
of directed queues comes from ``nb_single_link_event_port_queues``, since
|
||||
directed ports and queues come in pairs.
|
||||
|
||||
When a port is setup, the ``RTE_EVENT_PORT_CFG_SINGLE_LINK`` flag determines
|
||||
whether it should be configured as a directed (the flag is set) or a
|
||||
load-balanced (the flag is unset) port. Similarly, the
|
||||
``RTE_EVENT_QUEUE_CFG_SINGLE_LINK`` queue configuration flag controls
|
||||
whether it is a directed or load-balanced queue.
|
||||
|
||||
Load-balanced ports can only be linked to load-balanced queues, and directed
|
||||
ports can only be linked to directed queues. Furthermore, directed ports can
|
||||
only be linked to a single directed queue (and vice versa), and that link
|
||||
cannot change after the eventdev is started.
|
||||
|
||||
The eventdev API does not have a directed scheduling type. To support directed
|
||||
traffic, the dlb PMD detects when an event is being sent to a directed queue
|
||||
and overrides its scheduling type. Note that the originally selected scheduling
|
||||
type (atomic, ordered, or parallel) is not preserved, and an event's sched_type
|
||||
will be set to ``RTE_SCHED_TYPE_ATOMIC`` when it is dequeued from a directed
|
||||
port.
|
||||
|
||||
Flow ID
|
||||
~~~~~~~
|
||||
|
||||
The flow ID field is not preserved in the event when it is scheduled in the
|
||||
DLB, because the DLB hardware control word format does not have sufficient
|
||||
space to preserve every event field. As a result, the flow ID specified with
|
||||
the enqueued event will not be in the dequeued event. If this field is
|
||||
required, the application should pass it through an out-of-band path (for
|
||||
example in the mbuf's udata64 field, if the event points to an mbuf) or
|
||||
reconstruct the flow ID after receiving the event.
|
||||
|
||||
Also, the DLB hardware control word supports a 16-bit flow ID. Since struct
|
||||
rte_event's flow_id field is 20 bits, the DLB PMD drops the most significant
|
||||
four bits from the event's flow ID.
|
||||
|
||||
Hardware Credits
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
DLB uses a hardware credit scheme to prevent software from overflowing hardware
|
||||
event storage, with each unit of storage represented by a credit. A port spends
|
||||
a credit to enqueue an event, and hardware refills the ports with credits as the
|
||||
events are scheduled to ports. Refills come from credit pools, and each port is
|
||||
a member of a load-balanced credit pool and a directed credit pool. The
|
||||
load-balanced credits are used to enqueue to load-balanced queues, and directed
|
||||
credits are used for directed queues.
|
||||
|
||||
A DLB eventdev contains one load-balanced and one directed credit pool. These
|
||||
pools' sizes are controlled by the nb_events_limit field in struct
|
||||
rte_event_dev_config. The load-balanced pool is sized to contain
|
||||
nb_events_limit credits, and the directed pool is sized to contain
|
||||
nb_events_limit/4 credits. The directed pool size can be overridden with the
|
||||
num_dir_credits vdev argument, like so:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
--vdev=dlb1_event,num_dir_credits=<value>
|
||||
|
||||
This can be used if the default allocation is too low or too high for the
|
||||
specific application needs. The PMD also supports a vdev arg that limits the
|
||||
max_num_events reported by rte_event_dev_info_get():
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
--vdev=dlb1_event,max_num_events=<value>
|
||||
|
||||
By default, max_num_events is reported as the total available load-balanced
|
||||
credits. If multiple DLB-based applications are being used, it may be desirable
|
||||
to control how many load-balanced credits each application uses, particularly
|
||||
when application(s) are written to configure nb_events_limit equal to the
|
||||
reported max_num_events.
|
||||
|
||||
Each port is a member of both credit pools. A port's credit allocation is
|
||||
defined by its low watermark, high watermark, and refill quanta. These three
|
||||
parameters are calculated by the dlb PMD like so:
|
||||
|
||||
- The load-balanced high watermark is set to the port's enqueue_depth.
|
||||
The directed high watermark is set to the minimum of the enqueue_depth and
|
||||
the directed pool size divided by the total number of ports.
|
||||
- The refill quanta is set to half the high watermark.
|
||||
- The low watermark is set to the minimum of 16 and the refill quanta.
|
||||
|
||||
When the eventdev is started, each port is pre-allocated a high watermark's
|
||||
worth of credits. For example, if an eventdev contains four ports with enqueue
|
||||
depths of 32 and a load-balanced credit pool size of 4096, each port will start
|
||||
with 32 load-balanced credits, and there will be 3968 credits available to
|
||||
replenish the ports. Thus, a single port is not capable of enqueueing up to the
|
||||
nb_events_limit (without any events being dequeued), since the other ports are
|
||||
retaining their initial credit allocation; in short, all ports must enqueue in
|
||||
order to reach the limit.
|
||||
|
||||
If a port attempts to enqueue and has no credits available, the enqueue
|
||||
operation will fail and the application must retry the enqueue. Credits are
|
||||
replenished asynchronously by the DLB hardware.
|
||||
|
||||
Software Credits
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
The DLB is a "closed system" event dev, and the DLB PMD layers a software
|
||||
credit scheme on top of the hardware credit scheme in order to comply with
|
||||
the per-port backpressure described in the eventdev API.
|
||||
|
||||
The DLB's hardware scheme is local to a queue/pipeline stage: a port spends a
|
||||
credit when it enqueues to a queue, and credits are later replenished after the
|
||||
events are dequeued and released.
|
||||
|
||||
In the software credit scheme, a credit is consumed when a new (.op =
|
||||
RTE_EVENT_OP_NEW) event is injected into the system, and the credit is
|
||||
replenished when the event is released from the system (either explicitly with
|
||||
RTE_EVENT_OP_RELEASE or implicitly in dequeue_burst()).
|
||||
|
||||
In this model, an event is "in the system" from its first enqueue into eventdev
|
||||
until it is last dequeued. If the event goes through multiple event queues, it
|
||||
is still considered "in the system" while a worker thread is processing it.
|
||||
|
||||
A port will fail to enqueue if the number of events in the system exceeds its
|
||||
``new_event_threshold`` (specified at port setup time). A port will also fail
|
||||
to enqueue if it lacks enough hardware credits to enqueue; load-balanced
|
||||
credits are used to enqueue to a load-balanced queue, and directed credits are
|
||||
used to enqueue to a directed queue.
|
||||
|
||||
The out-of-credit situations are typically transient, and an eventdev
|
||||
application using the DLB ought to retry its enqueues if they fail.
|
||||
If enqueue fails, DLB PMD sets rte_errno as follows:
|
||||
|
||||
- -ENOSPC: Credit exhaustion (either hardware or software)
|
||||
- -EINVAL: Invalid argument, such as port ID, queue ID, or sched_type.
|
||||
|
||||
Depending on the pipeline the application has constructed, it's possible to
|
||||
enter a credit deadlock scenario wherein the worker thread lacks the credit
|
||||
to enqueue an event, and it must dequeue an event before it can recover the
|
||||
credit. If the worker thread retries its enqueue indefinitely, it will not
|
||||
make forward progress. Such deadlock is possible if the application has event
|
||||
"loops", in which an event in dequeued from queue A and later enqueued back to
|
||||
queue A.
|
||||
|
||||
Due to this, workers should stop retrying after a time, release the events it
|
||||
is attempting to enqueue, and dequeue more events. It is important that the
|
||||
worker release the events and don't simply set them aside to retry the enqueue
|
||||
again later, because the port has limited history list size (by default, twice
|
||||
the port's dequeue_depth).
|
||||
|
||||
Priority
|
||||
~~~~~~~~
|
||||
|
||||
The DLB supports event priority and per-port queue service priority, as
|
||||
described in the eventdev header file. The DLB does not support 'global' event
|
||||
queue priority established at queue creation time.
|
||||
|
||||
DLB supports 8 event and queue service priority levels. For both priority
|
||||
types, the PMD uses the upper three bits of the priority field to determine the
|
||||
DLB priority, discarding the 5 least significant bits. The 5 least significant
|
||||
event priority bits are not preserved when an event is enqueued.
|
||||
|
||||
Atomic Inflights Allocation
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
In the last stage prior to scheduling an atomic event to a CQ, DLB holds the
|
||||
inflight event in a temporary buffer that is divided among load-balanced
|
||||
queues. If a queue's atomic buffer storage fills up, this can result in
|
||||
head-of-line-blocking. For example:
|
||||
|
||||
- An LDB queue allocated N atomic buffer entries
|
||||
- All N entries are filled with events from flow X, which is pinned to CQ 0.
|
||||
|
||||
Until CQ 0 releases 1+ events, no other atomic flows for that LDB queue can be
|
||||
scheduled. The likelihood of this case depends on the eventdev configuration,
|
||||
traffic behavior, event processing latency, potential for a worker to be
|
||||
interrupted or otherwise delayed, etc.
|
||||
|
||||
By default, the PMD allocates 16 buffer entries for each load-balanced queue,
|
||||
which provides an even division across all 128 queues but potentially wastes
|
||||
buffer space (e.g. if not all queues are used, or aren't used for atomic
|
||||
scheduling).
|
||||
|
||||
The PMD provides a dev arg to override the default per-queue allocation. To
|
||||
increase a vdev's per-queue atomic-inflight allocation to (for example) 64:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
--vdev=dlb1_event,atm_inflights=64
|
||||
|
||||
Deferred Scheduling
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The DLB PMD's default behavior for managing a CQ is to "pop" the CQ once per
|
||||
dequeued event before returning from rte_event_dequeue_burst(). This frees the
|
||||
corresponding entries in the CQ, which enables the DLB to schedule more events
|
||||
to it.
|
||||
|
||||
To support applications seeking finer-grained scheduling control -- for example
|
||||
deferring scheduling to get the best possible priority scheduling and
|
||||
load-balancing -- the PMD supports a deferred scheduling mode. In this mode,
|
||||
the CQ entry is not popped until the *subsequent* rte_event_dequeue_burst()
|
||||
call. This mode only applies to load-balanced event ports with dequeue depth of
|
||||
1.
|
||||
|
||||
To enable deferred scheduling, use the defer_sched vdev argument like so:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
--vdev=dlb1_event,defer_sched=on
|
||||
|
@ -11,7 +11,6 @@ application through the eventdev API.
|
||||
:maxdepth: 2
|
||||
:numbered:
|
||||
|
||||
dlb
|
||||
dlb2
|
||||
dpaa
|
||||
dpaa2
|
||||
|
@ -345,8 +345,7 @@ New Features
|
||||
|
||||
* **Added a new driver for the Intel Dynamic Load Balancer v1.0 device.**
|
||||
|
||||
Added the new ``dlb`` eventdev driver for the Intel DLB V1.0 device. See the
|
||||
:doc:`../eventdevs/dlb` eventdev guide for more details on this new driver.
|
||||
Added the new ``dlb`` eventdev driver for the Intel DLB V1.0 device.
|
||||
|
||||
* **Added a new driver for the Intel Dynamic Load Balancer v2.0 device.**
|
||||
|
||||
|
@ -169,6 +169,9 @@ Removed Items
|
||||
Also, make sure to start the actual text at the margin.
|
||||
=======================================================
|
||||
|
||||
* Removed support for Intel DLB V1 hardware. This is not a broad market device,
|
||||
and existing customers already obtain the source code directly from Intel.
|
||||
|
||||
|
||||
API Changes
|
||||
-----------
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,79 +0,0 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "dlb_priv.h"
|
||||
|
||||
/* DLB PMD Internal interface function pointers.
|
||||
* If VDEV (bifurcated PMD), these will resolve to functions that issue ioctls
|
||||
* serviced by DLB kernel module.
|
||||
* If PCI (PF PMD), these will be implemented locally in user mode.
|
||||
*/
|
||||
|
||||
void (*dlb_iface_low_level_io_init)(struct dlb_eventdev *dlb);
|
||||
|
||||
int (*dlb_iface_open)(struct dlb_hw_dev *handle, const char *name);
|
||||
|
||||
void (*dlb_iface_domain_close)(struct dlb_eventdev *dlb);
|
||||
|
||||
int (*dlb_iface_get_device_version)(struct dlb_hw_dev *handle,
|
||||
uint8_t *revision);
|
||||
|
||||
int (*dlb_iface_get_num_resources)(struct dlb_hw_dev *handle,
|
||||
struct dlb_get_num_resources_args *rsrcs);
|
||||
|
||||
int (*dlb_iface_sched_domain_create)(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_sched_domain_args *args);
|
||||
|
||||
int (*dlb_iface_ldb_credit_pool_create)(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_ldb_pool_args *cfg);
|
||||
|
||||
int (*dlb_iface_dir_credit_pool_create)(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_dir_pool_args *cfg);
|
||||
|
||||
int (*dlb_iface_dir_queue_create)(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_dir_queue_args *cfg);
|
||||
|
||||
int (*dlb_iface_ldb_queue_create)(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_ldb_queue_args *cfg);
|
||||
|
||||
int (*dlb_iface_ldb_port_create)(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_ldb_port_args *cfg,
|
||||
enum dlb_cq_poll_modes poll_mode);
|
||||
|
||||
int (*dlb_iface_dir_port_create)(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_dir_port_args *cfg,
|
||||
enum dlb_cq_poll_modes poll_mode);
|
||||
|
||||
int (*dlb_iface_map_qid)(struct dlb_hw_dev *handle,
|
||||
struct dlb_map_qid_args *cfg);
|
||||
|
||||
int (*dlb_iface_unmap_qid)(struct dlb_hw_dev *handle,
|
||||
struct dlb_unmap_qid_args *cfg);
|
||||
|
||||
int (*dlb_iface_sched_domain_start)(struct dlb_hw_dev *handle,
|
||||
struct dlb_start_domain_args *cfg);
|
||||
|
||||
int (*dlb_iface_pending_port_unmaps)(struct dlb_hw_dev *handle,
|
||||
struct dlb_pending_port_unmaps_args *args);
|
||||
|
||||
int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,
|
||||
enum dlb_cq_poll_modes *mode);
|
||||
|
||||
int (*dlb_iface_get_sn_allocation)(struct dlb_hw_dev *handle,
|
||||
struct dlb_get_sn_allocation_args *args);
|
||||
|
||||
int (*dlb_iface_set_sn_allocation)(struct dlb_hw_dev *handle,
|
||||
struct dlb_set_sn_allocation_args *args);
|
||||
|
||||
int (*dlb_iface_get_sn_occupancy)(struct dlb_hw_dev *handle,
|
||||
struct dlb_get_sn_occupancy_args *args);
|
||||
|
||||
int (*dlb_iface_get_ldb_queue_depth)(struct dlb_hw_dev *handle,
|
||||
struct dlb_get_ldb_queue_depth_args *args);
|
||||
|
||||
int (*dlb_iface_get_dir_queue_depth)(struct dlb_hw_dev *handle,
|
||||
struct dlb_get_dir_queue_depth_args *args);
|
||||
|
@ -1,82 +0,0 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _DLB_IFACE_H
|
||||
#define _DLB_IFACE_H
|
||||
|
||||
/* DLB PMD Internal interface function pointers.
|
||||
* If VDEV (bifurcated PMD), these will resolve to functions that issue ioctls
|
||||
* serviced by DLB kernel module.
|
||||
* If PCI (PF PMD), these will be implemented locally in user mode.
|
||||
*/
|
||||
|
||||
extern void (*dlb_iface_low_level_io_init)(struct dlb_eventdev *dlb);
|
||||
|
||||
extern int (*dlb_iface_open)(struct dlb_hw_dev *handle, const char *name);
|
||||
|
||||
extern void (*dlb_iface_domain_close)(struct dlb_eventdev *dlb);
|
||||
|
||||
extern int (*dlb_iface_get_device_version)(struct dlb_hw_dev *handle,
|
||||
uint8_t *revision);
|
||||
|
||||
extern int (*dlb_iface_get_num_resources)(struct dlb_hw_dev *handle,
|
||||
struct dlb_get_num_resources_args *rsrcs);
|
||||
|
||||
extern int (*dlb_iface_sched_domain_create)(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_sched_domain_args *args);
|
||||
|
||||
extern int (*dlb_iface_ldb_credit_pool_create)(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_ldb_pool_args *cfg);
|
||||
|
||||
extern int (*dlb_iface_dir_credit_pool_create)(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_dir_pool_args *cfg);
|
||||
|
||||
extern int (*dlb_iface_ldb_queue_create)(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_ldb_queue_args *cfg);
|
||||
|
||||
extern int (*dlb_iface_dir_queue_create)(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_dir_queue_args *cfg);
|
||||
|
||||
extern int (*dlb_iface_ldb_port_create)(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_ldb_port_args *cfg,
|
||||
enum dlb_cq_poll_modes poll_mode);
|
||||
|
||||
extern int (*dlb_iface_dir_port_create)(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_dir_port_args *cfg,
|
||||
enum dlb_cq_poll_modes poll_mode);
|
||||
|
||||
extern int (*dlb_iface_ldb_queue_create)(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_ldb_queue_args *cfg);
|
||||
|
||||
extern int (*dlb_iface_map_qid)(struct dlb_hw_dev *handle,
|
||||
struct dlb_map_qid_args *cfg);
|
||||
|
||||
extern int (*dlb_iface_unmap_qid)(struct dlb_hw_dev *handle,
|
||||
struct dlb_unmap_qid_args *cfg);
|
||||
|
||||
extern int (*dlb_iface_sched_domain_start)(struct dlb_hw_dev *handle,
|
||||
struct dlb_start_domain_args *cfg);
|
||||
|
||||
extern int (*dlb_iface_pending_port_unmaps)(struct dlb_hw_dev *handle,
|
||||
struct dlb_pending_port_unmaps_args *args);
|
||||
|
||||
extern int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,
|
||||
enum dlb_cq_poll_modes *mode);
|
||||
|
||||
extern int (*dlb_iface_get_sn_allocation)(struct dlb_hw_dev *handle,
|
||||
struct dlb_get_sn_allocation_args *args);
|
||||
|
||||
extern int (*dlb_iface_set_sn_allocation)(struct dlb_hw_dev *handle,
|
||||
struct dlb_set_sn_allocation_args *args);
|
||||
|
||||
extern int (*dlb_iface_get_sn_occupancy)(struct dlb_hw_dev *handle,
|
||||
struct dlb_get_sn_occupancy_args *args);
|
||||
|
||||
extern int (*dlb_iface_get_ldb_queue_depth)(struct dlb_hw_dev *handle,
|
||||
struct dlb_get_ldb_queue_depth_args *args);
|
||||
|
||||
extern int (*dlb_iface_get_dir_queue_depth)(struct dlb_hw_dev *handle,
|
||||
struct dlb_get_dir_queue_depth_args *args);
|
||||
|
||||
#endif /* _DLB_IFACE_H */
|
@ -1,36 +0,0 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _DLB_INLINE_FNS_H_
|
||||
#define _DLB_INLINE_FNS_H_
|
||||
|
||||
#include "rte_memcpy.h"
|
||||
#include "rte_io.h"
|
||||
|
||||
/* Inline functions required in more than one source file. */
|
||||
|
||||
static inline struct dlb_eventdev *
|
||||
dlb_pmd_priv(const struct rte_eventdev *eventdev)
|
||||
{
|
||||
return eventdev->data->dev_private;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dlb_movntdq_single(void *dest, void *src)
|
||||
{
|
||||
long long *_src = (long long *)src;
|
||||
__m128i src_data0 = (__m128i){_src[0], _src[1]};
|
||||
|
||||
_mm_stream_si128(dest, src_data0);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dlb_movdir64b(void *dest, void *src)
|
||||
{
|
||||
asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
|
||||
:
|
||||
: "a" (dest), "d" (src));
|
||||
}
|
||||
|
||||
#endif /* _DLB_INLINE_FNS_H_ */
|
@ -1,25 +0,0 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _DLB_EVDEV_LOG_H_
|
||||
#define _DLB_EVDEV_LOG_H_
|
||||
|
||||
extern int eventdev_dlb_log_level;
|
||||
|
||||
/* Dynamic logging */
|
||||
#define DLB_LOG_IMPL(level, fmt, args...) \
|
||||
rte_log(RTE_LOG_ ## level, eventdev_dlb_log_level, "%s" fmt "\n", \
|
||||
__func__, ##args)
|
||||
|
||||
#define DLB_LOG_INFO(fmt, args...) \
|
||||
DLB_LOG_IMPL(INFO, fmt, ## args)
|
||||
|
||||
#define DLB_LOG_ERR(fmt, args...) \
|
||||
DLB_LOG_IMPL(ERR, fmt, ## args)
|
||||
|
||||
/* remove debug logs at compile time unless actually debugging */
|
||||
#define DLB_LOG_DBG(fmt, args...) \
|
||||
RTE_LOG_DP(DEBUG, PMD, fmt, ## args)
|
||||
|
||||
#endif /* _DLB_EVDEV_LOG_H_ */
|
@ -1,511 +0,0 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _DLB_PRIV_H_
|
||||
#define _DLB_PRIV_H_
|
||||
|
||||
#include <emmintrin.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#include <rte_bus_pci.h>
|
||||
#include <rte_eventdev.h>
|
||||
#include <eventdev_pmd.h>
|
||||
#include <eventdev_pmd_pci.h>
|
||||
#include <rte_pci.h>
|
||||
|
||||
#include "dlb_user.h"
|
||||
#include "dlb_log.h"
|
||||
#include "rte_pmd_dlb.h"
|
||||
|
||||
#ifndef RTE_LIBRTE_PMD_DLB_QUELL_STATS
|
||||
#define DLB_INC_STAT(_stat, _incr_val) ((_stat) += _incr_val)
|
||||
#else
|
||||
#define DLB_INC_STAT(_stat, _incr_val)
|
||||
#endif
|
||||
|
||||
#define EVDEV_DLB_NAME_PMD_STR "dlb_event"
|
||||
|
||||
/* command line arg strings */
|
||||
#define NUMA_NODE_ARG "numa_node"
|
||||
#define DLB_MAX_NUM_EVENTS "max_num_events"
|
||||
#define DLB_NUM_DIR_CREDITS "num_dir_credits"
|
||||
#define DEV_ID_ARG "dev_id"
|
||||
#define DLB_DEFER_SCHED_ARG "defer_sched"
|
||||
#define DLB_NUM_ATM_INFLIGHTS_ARG "atm_inflights"
|
||||
|
||||
/* Begin HW related defines and structs */
|
||||
|
||||
#define DLB_MAX_NUM_DOMAINS 32
|
||||
#define DLB_MAX_NUM_VFS 16
|
||||
#define DLB_MAX_NUM_LDB_QUEUES 128
|
||||
#define DLB_MAX_NUM_LDB_PORTS 64
|
||||
#define DLB_MAX_NUM_DIR_PORTS 128
|
||||
#define DLB_MAX_NUM_DIR_QUEUES 128
|
||||
#define DLB_MAX_NUM_FLOWS (64 * 1024)
|
||||
#define DLB_MAX_NUM_LDB_CREDITS 16384
|
||||
#define DLB_MAX_NUM_DIR_CREDITS 4096
|
||||
#define DLB_MAX_NUM_LDB_CREDIT_POOLS 64
|
||||
#define DLB_MAX_NUM_DIR_CREDIT_POOLS 64
|
||||
#define DLB_MAX_NUM_HIST_LIST_ENTRIES 5120
|
||||
#define DLB_MAX_NUM_ATM_INFLIGHTS 2048
|
||||
#define DLB_MAX_NUM_QIDS_PER_LDB_CQ 8
|
||||
#define DLB_QID_PRIORITIES 8
|
||||
#define DLB_MAX_DEVICE_PATH 32
|
||||
#define DLB_MIN_DEQUEUE_TIMEOUT_NS 1
|
||||
#define DLB_NUM_SN_GROUPS 4
|
||||
#define DLB_MAX_LDB_SN_ALLOC 1024
|
||||
/* Note: "- 1" here to support the timeout range check in eventdev_autotest */
|
||||
#define DLB_MAX_DEQUEUE_TIMEOUT_NS (UINT32_MAX - 1)
|
||||
#define DLB_DEF_UNORDERED_QID_INFLIGHTS 2048
|
||||
|
||||
/* 5120 total hist list entries and 64 total ldb ports, which
|
||||
* makes for 5120/64 == 80 hist list entries per port. However, CQ
|
||||
* depth must be a power of 2 and must also be >= HIST LIST entries.
|
||||
* As a result we just limit the maximum dequeue depth to 64.
|
||||
*/
|
||||
#define DLB_MIN_LDB_CQ_DEPTH 1
|
||||
#define DLB_MIN_DIR_CQ_DEPTH 8
|
||||
#define DLB_MIN_HARDWARE_CQ_DEPTH 8
|
||||
#define DLB_MAX_CQ_DEPTH 64
|
||||
#define DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT \
|
||||
DLB_MAX_CQ_DEPTH
|
||||
|
||||
/* Static per queue/port provisioning values */
|
||||
#define DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE 16
|
||||
|
||||
#define PP_BASE(is_dir) ((is_dir) ? DLB_DIR_PP_BASE : DLB_LDB_PP_BASE)
|
||||
|
||||
#define DLB_NUM_QES_PER_CACHE_LINE 4
|
||||
|
||||
#define DLB_MAX_ENQUEUE_DEPTH 64
|
||||
#define DLB_MIN_ENQUEUE_DEPTH 4
|
||||
|
||||
#define DLB_NAME_SIZE 64
|
||||
|
||||
/* Use the upper 3 bits of the event priority to select the DLB priority */
|
||||
#define EV_TO_DLB_PRIO(x) ((x) >> 5)
|
||||
#define DLB_TO_EV_PRIO(x) ((x) << 5)
|
||||
|
||||
enum dlb_hw_port_type {
|
||||
DLB_LDB,
|
||||
DLB_DIR,
|
||||
|
||||
/* NUM_DLB_PORT_TYPES must be last */
|
||||
NUM_DLB_PORT_TYPES
|
||||
};
|
||||
|
||||
#define PORT_TYPE(p) ((p)->is_directed ? DLB_DIR : DLB_LDB)
|
||||
|
||||
/* Do not change - must match hardware! */
|
||||
enum dlb_hw_sched_type {
|
||||
DLB_SCHED_ATOMIC = 0,
|
||||
DLB_SCHED_UNORDERED,
|
||||
DLB_SCHED_ORDERED,
|
||||
DLB_SCHED_DIRECTED,
|
||||
|
||||
/* DLB_NUM_HW_SCHED_TYPES must be last */
|
||||
DLB_NUM_HW_SCHED_TYPES
|
||||
};
|
||||
|
||||
struct dlb_devargs {
|
||||
int socket_id;
|
||||
int max_num_events;
|
||||
int num_dir_credits_override;
|
||||
int dev_id;
|
||||
int defer_sched;
|
||||
int num_atm_inflights;
|
||||
};
|
||||
|
||||
struct dlb_hw_rsrcs {
|
||||
int32_t nb_events_limit;
|
||||
uint32_t num_queues; /* Total queues (ldb + dir) */
|
||||
uint32_t num_ldb_queues; /* Number of available ldb queues */
|
||||
uint32_t num_ldb_ports; /* Number of load balanced ports */
|
||||
uint32_t num_dir_ports; /* Number of directed ports */
|
||||
uint32_t num_ldb_credits; /* Number of load balanced credits */
|
||||
uint32_t num_dir_credits; /* Number of directed credits */
|
||||
uint32_t reorder_window_size; /* Size of reorder window */
|
||||
};
|
||||
|
||||
struct dlb_hw_resource_info {
|
||||
/**> Max resources that can be provided */
|
||||
struct dlb_hw_rsrcs hw_rsrc_max;
|
||||
int num_sched_domains;
|
||||
uint32_t socket_id;
|
||||
/**> EAL flags passed to this DLB instance, allowing the application to
|
||||
* identify the pmd backend indicating hardware or software.
|
||||
*/
|
||||
const char *eal_flags;
|
||||
};
|
||||
|
||||
/* hw-specific format - do not change */
|
||||
|
||||
struct dlb_event_type {
|
||||
uint8_t major:4;
|
||||
uint8_t unused:4;
|
||||
uint8_t sub;
|
||||
};
|
||||
|
||||
union dlb_opaque_data {
|
||||
uint16_t opaque_data;
|
||||
struct dlb_event_type event_type;
|
||||
};
|
||||
|
||||
struct dlb_msg_info {
|
||||
uint8_t qid;
|
||||
uint8_t sched_type:2;
|
||||
uint8_t priority:3;
|
||||
uint8_t msg_type:3;
|
||||
};
|
||||
|
||||
#define DLB_NEW_CMD_BYTE 0x08
|
||||
#define DLB_FWD_CMD_BYTE 0x0A
|
||||
#define DLB_COMP_CMD_BYTE 0x02
|
||||
#define DLB_NOOP_CMD_BYTE 0x00
|
||||
#define DLB_POP_CMD_BYTE 0x01
|
||||
|
||||
/* hw-specific format - do not change */
|
||||
struct dlb_enqueue_qe {
|
||||
uint64_t data;
|
||||
/* Word 3 */
|
||||
union dlb_opaque_data u;
|
||||
uint8_t qid;
|
||||
uint8_t sched_type:2;
|
||||
uint8_t priority:3;
|
||||
uint8_t msg_type:3;
|
||||
/* Word 4 */
|
||||
uint16_t lock_id;
|
||||
uint8_t meas_lat:1;
|
||||
uint8_t rsvd1:2;
|
||||
uint8_t no_dec:1;
|
||||
uint8_t cmp_id:4;
|
||||
union {
|
||||
uint8_t cmd_byte;
|
||||
struct {
|
||||
uint8_t cq_token:1;
|
||||
uint8_t qe_comp:1;
|
||||
uint8_t qe_frag:1;
|
||||
uint8_t qe_valid:1;
|
||||
uint8_t int_arm:1;
|
||||
uint8_t error:1;
|
||||
uint8_t rsvd:2;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
/* hw-specific format - do not change */
|
||||
struct dlb_cq_pop_qe {
|
||||
uint64_t data;
|
||||
union dlb_opaque_data u;
|
||||
uint8_t qid;
|
||||
uint8_t sched_type:2;
|
||||
uint8_t priority:3;
|
||||
uint8_t msg_type:3;
|
||||
uint16_t tokens:10;
|
||||
uint16_t rsvd2:6;
|
||||
uint8_t meas_lat:1;
|
||||
uint8_t rsvd1:2;
|
||||
uint8_t no_dec:1;
|
||||
uint8_t cmp_id:4;
|
||||
union {
|
||||
uint8_t cmd_byte;
|
||||
struct {
|
||||
uint8_t cq_token:1;
|
||||
uint8_t qe_comp:1;
|
||||
uint8_t qe_frag:1;
|
||||
uint8_t qe_valid:1;
|
||||
uint8_t int_arm:1;
|
||||
uint8_t error:1;
|
||||
uint8_t rsvd:2;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
/* hw-specific format - do not change */
|
||||
struct dlb_dequeue_qe {
|
||||
uint64_t data;
|
||||
union dlb_opaque_data u;
|
||||
uint8_t qid;
|
||||
uint8_t sched_type:2;
|
||||
uint8_t priority:3;
|
||||
uint8_t msg_type:3;
|
||||
uint16_t pp_id:10;
|
||||
uint16_t rsvd0:6;
|
||||
uint8_t debug;
|
||||
uint8_t cq_gen:1;
|
||||
uint8_t qid_depth:1;
|
||||
uint8_t rsvd1:3;
|
||||
uint8_t error:1;
|
||||
uint8_t rsvd2:2;
|
||||
};
|
||||
|
||||
enum dlb_port_state {
|
||||
PORT_CLOSED,
|
||||
PORT_STARTED,
|
||||
PORT_STOPPED
|
||||
};
|
||||
|
||||
enum dlb_configuration_state {
|
||||
/* The resource has not been configured */
|
||||
DLB_NOT_CONFIGURED,
|
||||
/* The resource was configured, but the device was stopped */
|
||||
DLB_PREV_CONFIGURED,
|
||||
/* The resource is currently configured */
|
||||
DLB_CONFIGURED
|
||||
};
|
||||
|
||||
struct dlb_port {
|
||||
uint32_t id;
|
||||
bool is_directed;
|
||||
bool gen_bit;
|
||||
uint16_t dir_credits;
|
||||
uint32_t dequeue_depth;
|
||||
enum dlb_token_pop_mode token_pop_mode;
|
||||
int pp_mmio_base;
|
||||
uint16_t cached_ldb_credits;
|
||||
uint16_t ldb_pushcount_at_credit_expiry;
|
||||
uint16_t ldb_credits;
|
||||
uint16_t cached_dir_credits;
|
||||
uint16_t dir_pushcount_at_credit_expiry;
|
||||
bool int_armed;
|
||||
bool use_rsvd_token_scheme;
|
||||
uint8_t cq_rsvd_token_deficit;
|
||||
uint16_t owed_tokens;
|
||||
int16_t issued_releases;
|
||||
int16_t token_pop_thresh;
|
||||
int cq_depth;
|
||||
uint16_t cq_idx;
|
||||
uint16_t cq_idx_unmasked;
|
||||
uint16_t cq_depth_mask;
|
||||
uint16_t gen_bit_shift;
|
||||
enum dlb_port_state state;
|
||||
enum dlb_configuration_state config_state;
|
||||
int num_mapped_qids;
|
||||
uint8_t *qid_mappings;
|
||||
struct dlb_enqueue_qe *qe4; /* Cache line's worth of QEs (4) */
|
||||
struct dlb_cq_pop_qe *consume_qe;
|
||||
struct dlb_eventdev *dlb; /* back ptr */
|
||||
struct dlb_eventdev_port *ev_port; /* back ptr */
|
||||
};
|
||||
|
||||
/* Per-process per-port mmio and memory pointers */
|
||||
struct process_local_port_data {
|
||||
uint64_t *pp_addr;
|
||||
uint16_t *ldb_popcount;
|
||||
uint16_t *dir_popcount;
|
||||
struct dlb_dequeue_qe *cq_base;
|
||||
const struct rte_memzone *mz;
|
||||
bool mmaped;
|
||||
};
|
||||
|
||||
struct dlb_config {
|
||||
int configured;
|
||||
int reserved;
|
||||
uint32_t ldb_credit_pool_id;
|
||||
uint32_t dir_credit_pool_id;
|
||||
uint32_t num_ldb_credits;
|
||||
uint32_t num_dir_credits;
|
||||
struct dlb_create_sched_domain_args resources;
|
||||
};
|
||||
|
||||
struct dlb_hw_dev {
|
||||
struct dlb_config cfg;
|
||||
struct dlb_hw_resource_info info;
|
||||
void *pf_dev; /* opaque pointer to PF PMD dev (struct dlb_dev) */
|
||||
int device_id;
|
||||
uint32_t domain_id;
|
||||
int domain_id_valid;
|
||||
rte_spinlock_t resource_lock; /* for MP support */
|
||||
} __rte_cache_aligned;
|
||||
|
||||
/* End HW related defines and structs */
|
||||
|
||||
/* Begin DLB PMD Eventdev related defines and structs */
|
||||
|
||||
#define DLB_MAX_NUM_QUEUES \
|
||||
(DLB_MAX_NUM_DIR_QUEUES + DLB_MAX_NUM_LDB_QUEUES)
|
||||
|
||||
#define DLB_MAX_NUM_PORTS (DLB_MAX_NUM_DIR_PORTS + DLB_MAX_NUM_LDB_PORTS)
|
||||
#define DLB_MAX_INPUT_QUEUE_DEPTH 256
|
||||
|
||||
/** Structure to hold the queue to port link establishment attributes */
|
||||
|
||||
struct dlb_event_queue_link {
|
||||
uint8_t queue_id;
|
||||
uint8_t priority;
|
||||
bool mapped;
|
||||
bool valid;
|
||||
};
|
||||
|
||||
struct dlb_traffic_stats {
|
||||
uint64_t rx_ok;
|
||||
uint64_t rx_drop;
|
||||
uint64_t rx_interrupt_wait;
|
||||
uint64_t rx_umonitor_umwait;
|
||||
uint64_t tx_ok;
|
||||
uint64_t total_polls;
|
||||
uint64_t zero_polls;
|
||||
uint64_t tx_nospc_ldb_hw_credits;
|
||||
uint64_t tx_nospc_dir_hw_credits;
|
||||
uint64_t tx_nospc_inflight_max;
|
||||
uint64_t tx_nospc_new_event_limit;
|
||||
uint64_t tx_nospc_inflight_credits;
|
||||
};
|
||||
|
||||
struct dlb_port_stats {
|
||||
struct dlb_traffic_stats traffic;
|
||||
uint64_t tx_op_cnt[4]; /* indexed by rte_event.op */
|
||||
uint64_t tx_implicit_rel;
|
||||
uint64_t tx_sched_cnt[DLB_NUM_HW_SCHED_TYPES];
|
||||
uint64_t tx_invalid;
|
||||
uint64_t rx_sched_cnt[DLB_NUM_HW_SCHED_TYPES];
|
||||
uint64_t rx_sched_invalid;
|
||||
uint64_t enq_ok[DLB_MAX_NUM_QUEUES]; /* per-queue enq_ok */
|
||||
};
|
||||
|
||||
struct dlb_eventdev_port {
|
||||
struct dlb_port qm_port; /* hw specific data structure */
|
||||
struct rte_event_port_conf conf; /* user-supplied configuration */
|
||||
uint16_t inflight_credits; /* num credits this port has right now */
|
||||
uint16_t credit_update_quanta;
|
||||
struct dlb_eventdev *dlb; /* backlink optimization */
|
||||
struct dlb_port_stats stats __rte_cache_aligned;
|
||||
struct dlb_event_queue_link link[DLB_MAX_NUM_QIDS_PER_LDB_CQ];
|
||||
int num_links;
|
||||
uint32_t id;
|
||||
/* num releases yet to be completed on this port.
|
||||
* Only applies to load-balanced ports.
|
||||
*/
|
||||
uint16_t outstanding_releases;
|
||||
uint16_t inflight_max; /* app requested max inflights for this port */
|
||||
/* setup_done is set when the event port is setup */
|
||||
bool setup_done;
|
||||
/* enq_configured is set when the qm port is created */
|
||||
bool enq_configured;
|
||||
uint8_t implicit_release; /* release events before dequeueing */
|
||||
} __rte_cache_aligned;
|
||||
|
||||
struct dlb_queue {
|
||||
uint32_t num_qid_inflights; /* User config */
|
||||
uint32_t num_atm_inflights; /* User config */
|
||||
enum dlb_configuration_state config_state;
|
||||
int sched_type; /* LB queue only */
|
||||
uint32_t id;
|
||||
bool is_directed;
|
||||
};
|
||||
|
||||
struct dlb_eventdev_queue {
|
||||
struct dlb_queue qm_queue;
|
||||
struct rte_event_queue_conf conf; /* User config */
|
||||
uint64_t enq_ok;
|
||||
uint32_t id;
|
||||
bool setup_done;
|
||||
uint8_t num_links;
|
||||
};
|
||||
|
||||
enum dlb_run_state {
|
||||
DLB_RUN_STATE_STOPPED = 0,
|
||||
DLB_RUN_STATE_STOPPING,
|
||||
DLB_RUN_STATE_STARTING,
|
||||
DLB_RUN_STATE_STARTED
|
||||
};
|
||||
|
||||
struct dlb_eventdev {
|
||||
struct dlb_eventdev_port ev_ports[DLB_MAX_NUM_PORTS];
|
||||
struct dlb_eventdev_queue ev_queues[DLB_MAX_NUM_QUEUES];
|
||||
uint8_t qm_ldb_to_ev_queue_id[DLB_MAX_NUM_QUEUES];
|
||||
uint8_t qm_dir_to_ev_queue_id[DLB_MAX_NUM_QUEUES];
|
||||
|
||||
/* store num stats and offset of the stats for each queue */
|
||||
uint16_t xstats_count_per_qid[DLB_MAX_NUM_QUEUES];
|
||||
uint16_t xstats_offset_for_qid[DLB_MAX_NUM_QUEUES];
|
||||
|
||||
/* store num stats and offset of the stats for each port */
|
||||
uint16_t xstats_count_per_port[DLB_MAX_NUM_PORTS];
|
||||
uint16_t xstats_offset_for_port[DLB_MAX_NUM_PORTS];
|
||||
struct dlb_get_num_resources_args hw_rsrc_query_results;
|
||||
uint32_t xstats_count_mode_queue;
|
||||
struct dlb_hw_dev qm_instance; /* strictly hw related */
|
||||
uint64_t global_dequeue_wait_ticks;
|
||||
struct dlb_xstats_entry *xstats;
|
||||
struct rte_eventdev *event_dev; /* backlink to dev */
|
||||
uint32_t xstats_count_mode_port;
|
||||
uint32_t xstats_count_mode_dev;
|
||||
uint32_t xstats_count;
|
||||
uint32_t inflights; /* use __atomic builtins to access */
|
||||
uint32_t new_event_limit;
|
||||
int max_num_events_override;
|
||||
int num_dir_credits_override;
|
||||
volatile enum dlb_run_state run_state;
|
||||
uint16_t num_dir_queues; /* total num of evdev dir queues requested */
|
||||
uint16_t num_dir_credits;
|
||||
uint16_t num_ldb_credits;
|
||||
uint16_t num_queues; /* total queues */
|
||||
uint16_t num_ldb_queues; /* total num of evdev ldb queues requested */
|
||||
uint16_t num_ports; /* total num of evdev ports requested */
|
||||
uint16_t num_ldb_ports; /* total num of ldb ports requested */
|
||||
uint16_t num_dir_ports; /* total num of dir ports requested */
|
||||
bool is_vdev;
|
||||
bool umwait_allowed;
|
||||
bool global_dequeue_wait; /* Not using per dequeue wait if true */
|
||||
bool defer_sched;
|
||||
unsigned int num_atm_inflights_per_queue;
|
||||
enum dlb_cq_poll_modes poll_mode;
|
||||
uint8_t revision;
|
||||
bool configured;
|
||||
};
|
||||
|
||||
/* End Eventdev related defines and structs */
|
||||
|
||||
/* externs */
|
||||
|
||||
extern struct process_local_port_data dlb_port[][NUM_DLB_PORT_TYPES];
|
||||
|
||||
/* Forwards for non-inlined functions */
|
||||
|
||||
void dlb_eventdev_dump(struct rte_eventdev *dev, FILE *f);
|
||||
|
||||
int dlb_xstats_init(struct dlb_eventdev *dlb);
|
||||
|
||||
void dlb_xstats_uninit(struct dlb_eventdev *dlb);
|
||||
|
||||
int dlb_eventdev_xstats_get(const struct rte_eventdev *dev,
|
||||
enum rte_event_dev_xstats_mode mode,
|
||||
uint8_t queue_port_id, const unsigned int ids[],
|
||||
uint64_t values[], unsigned int n);
|
||||
|
||||
int dlb_eventdev_xstats_get_names(const struct rte_eventdev *dev,
|
||||
enum rte_event_dev_xstats_mode mode,
|
||||
uint8_t queue_port_id,
|
||||
struct rte_event_dev_xstats_name *xstat_names,
|
||||
unsigned int *ids, unsigned int size);
|
||||
|
||||
uint64_t dlb_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
|
||||
const char *name, unsigned int *id);
|
||||
|
||||
int dlb_eventdev_xstats_reset(struct rte_eventdev *dev,
|
||||
enum rte_event_dev_xstats_mode mode,
|
||||
int16_t queue_port_id,
|
||||
const uint32_t ids[],
|
||||
uint32_t nb_ids);
|
||||
|
||||
int test_dlb_eventdev(void);
|
||||
|
||||
int dlb_primary_eventdev_probe(struct rte_eventdev *dev,
|
||||
const char *name,
|
||||
struct dlb_devargs *dlb_args);
|
||||
|
||||
int dlb_secondary_eventdev_probe(struct rte_eventdev *dev,
|
||||
const char *name);
|
||||
|
||||
uint32_t dlb_get_queue_depth(struct dlb_eventdev *dlb,
|
||||
struct dlb_eventdev_queue *queue);
|
||||
|
||||
int dlb_parse_params(const char *params,
|
||||
const char *name,
|
||||
struct dlb_devargs *dlb_args);
|
||||
|
||||
void dlb_entry_points_init(struct rte_eventdev *dev);
|
||||
|
||||
#endif /* _DLB_PRIV_H_ */
|
File diff suppressed because it is too large
Load Diff
@ -1,814 +0,0 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __DLB_USER_H
|
||||
#define __DLB_USER_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define DLB_MAX_NAME_LEN 64
|
||||
|
||||
enum dlb_error {
|
||||
DLB_ST_SUCCESS = 0,
|
||||
DLB_ST_NAME_EXISTS,
|
||||
DLB_ST_DOMAIN_UNAVAILABLE,
|
||||
DLB_ST_LDB_PORTS_UNAVAILABLE,
|
||||
DLB_ST_DIR_PORTS_UNAVAILABLE,
|
||||
DLB_ST_LDB_QUEUES_UNAVAILABLE,
|
||||
DLB_ST_LDB_CREDITS_UNAVAILABLE,
|
||||
DLB_ST_DIR_CREDITS_UNAVAILABLE,
|
||||
DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE,
|
||||
DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE,
|
||||
DLB_ST_SEQUENCE_NUMBERS_UNAVAILABLE,
|
||||
DLB_ST_INVALID_DOMAIN_ID,
|
||||
DLB_ST_INVALID_QID_INFLIGHT_ALLOCATION,
|
||||
DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE,
|
||||
DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE,
|
||||
DLB_ST_INVALID_LDB_CREDIT_POOL_ID,
|
||||
DLB_ST_INVALID_DIR_CREDIT_POOL_ID,
|
||||
DLB_ST_INVALID_POP_COUNT_VIRT_ADDR,
|
||||
DLB_ST_INVALID_LDB_QUEUE_ID,
|
||||
DLB_ST_INVALID_CQ_DEPTH,
|
||||
DLB_ST_INVALID_CQ_VIRT_ADDR,
|
||||
DLB_ST_INVALID_PORT_ID,
|
||||
DLB_ST_INVALID_QID,
|
||||
DLB_ST_INVALID_PRIORITY,
|
||||
DLB_ST_NO_QID_SLOTS_AVAILABLE,
|
||||
DLB_ST_QED_FREELIST_ENTRIES_UNAVAILABLE,
|
||||
DLB_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE,
|
||||
DLB_ST_INVALID_DIR_QUEUE_ID,
|
||||
DLB_ST_DIR_QUEUES_UNAVAILABLE,
|
||||
DLB_ST_INVALID_LDB_CREDIT_LOW_WATERMARK,
|
||||
DLB_ST_INVALID_LDB_CREDIT_QUANTUM,
|
||||
DLB_ST_INVALID_DIR_CREDIT_LOW_WATERMARK,
|
||||
DLB_ST_INVALID_DIR_CREDIT_QUANTUM,
|
||||
DLB_ST_DOMAIN_NOT_CONFIGURED,
|
||||
DLB_ST_PID_ALREADY_ATTACHED,
|
||||
DLB_ST_PID_NOT_ATTACHED,
|
||||
DLB_ST_INTERNAL_ERROR,
|
||||
DLB_ST_DOMAIN_IN_USE,
|
||||
DLB_ST_IOMMU_MAPPING_ERROR,
|
||||
DLB_ST_FAIL_TO_PIN_MEMORY_PAGE,
|
||||
DLB_ST_UNABLE_TO_PIN_POPCOUNT_PAGES,
|
||||
DLB_ST_UNABLE_TO_PIN_CQ_PAGES,
|
||||
DLB_ST_DISCONTIGUOUS_CQ_MEMORY,
|
||||
DLB_ST_DISCONTIGUOUS_POP_COUNT_MEMORY,
|
||||
DLB_ST_DOMAIN_STARTED,
|
||||
DLB_ST_LARGE_POOL_NOT_SPECIFIED,
|
||||
DLB_ST_SMALL_POOL_NOT_SPECIFIED,
|
||||
DLB_ST_NEITHER_POOL_SPECIFIED,
|
||||
DLB_ST_DOMAIN_NOT_STARTED,
|
||||
DLB_ST_INVALID_MEASUREMENT_DURATION,
|
||||
DLB_ST_INVALID_PERF_METRIC_GROUP_ID,
|
||||
DLB_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES,
|
||||
DLB_ST_DOMAIN_RESET_FAILED,
|
||||
DLB_ST_MBOX_ERROR,
|
||||
DLB_ST_INVALID_HIST_LIST_DEPTH,
|
||||
DLB_ST_NO_MEMORY,
|
||||
};
|
||||
|
||||
static const char dlb_error_strings[][128] = {
|
||||
"DLB_ST_SUCCESS",
|
||||
"DLB_ST_NAME_EXISTS",
|
||||
"DLB_ST_DOMAIN_UNAVAILABLE",
|
||||
"DLB_ST_LDB_PORTS_UNAVAILABLE",
|
||||
"DLB_ST_DIR_PORTS_UNAVAILABLE",
|
||||
"DLB_ST_LDB_QUEUES_UNAVAILABLE",
|
||||
"DLB_ST_LDB_CREDITS_UNAVAILABLE",
|
||||
"DLB_ST_DIR_CREDITS_UNAVAILABLE",
|
||||
"DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE",
|
||||
"DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE",
|
||||
"DLB_ST_SEQUENCE_NUMBERS_UNAVAILABLE",
|
||||
"DLB_ST_INVALID_DOMAIN_ID",
|
||||
"DLB_ST_INVALID_QID_INFLIGHT_ALLOCATION",
|
||||
"DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE",
|
||||
"DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE",
|
||||
"DLB_ST_INVALID_LDB_CREDIT_POOL_ID",
|
||||
"DLB_ST_INVALID_DIR_CREDIT_POOL_ID",
|
||||
"DLB_ST_INVALID_POP_COUNT_VIRT_ADDR",
|
||||
"DLB_ST_INVALID_LDB_QUEUE_ID",
|
||||
"DLB_ST_INVALID_CQ_DEPTH",
|
||||
"DLB_ST_INVALID_CQ_VIRT_ADDR",
|
||||
"DLB_ST_INVALID_PORT_ID",
|
||||
"DLB_ST_INVALID_QID",
|
||||
"DLB_ST_INVALID_PRIORITY",
|
||||
"DLB_ST_NO_QID_SLOTS_AVAILABLE",
|
||||
"DLB_ST_QED_FREELIST_ENTRIES_UNAVAILABLE",
|
||||
"DLB_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE",
|
||||
"DLB_ST_INVALID_DIR_QUEUE_ID",
|
||||
"DLB_ST_DIR_QUEUES_UNAVAILABLE",
|
||||
"DLB_ST_INVALID_LDB_CREDIT_LOW_WATERMARK",
|
||||
"DLB_ST_INVALID_LDB_CREDIT_QUANTUM",
|
||||
"DLB_ST_INVALID_DIR_CREDIT_LOW_WATERMARK",
|
||||
"DLB_ST_INVALID_DIR_CREDIT_QUANTUM",
|
||||
"DLB_ST_DOMAIN_NOT_CONFIGURED",
|
||||
"DLB_ST_PID_ALREADY_ATTACHED",
|
||||
"DLB_ST_PID_NOT_ATTACHED",
|
||||
"DLB_ST_INTERNAL_ERROR",
|
||||
"DLB_ST_DOMAIN_IN_USE",
|
||||
"DLB_ST_IOMMU_MAPPING_ERROR",
|
||||
"DLB_ST_FAIL_TO_PIN_MEMORY_PAGE",
|
||||
"DLB_ST_UNABLE_TO_PIN_POPCOUNT_PAGES",
|
||||
"DLB_ST_UNABLE_TO_PIN_CQ_PAGES",
|
||||
"DLB_ST_DISCONTIGUOUS_CQ_MEMORY",
|
||||
"DLB_ST_DISCONTIGUOUS_POP_COUNT_MEMORY",
|
||||
"DLB_ST_DOMAIN_STARTED",
|
||||
"DLB_ST_LARGE_POOL_NOT_SPECIFIED",
|
||||
"DLB_ST_SMALL_POOL_NOT_SPECIFIED",
|
||||
"DLB_ST_NEITHER_POOL_SPECIFIED",
|
||||
"DLB_ST_DOMAIN_NOT_STARTED",
|
||||
"DLB_ST_INVALID_MEASUREMENT_DURATION",
|
||||
"DLB_ST_INVALID_PERF_METRIC_GROUP_ID",
|
||||
"DLB_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES",
|
||||
"DLB_ST_DOMAIN_RESET_FAILED",
|
||||
"DLB_ST_MBOX_ERROR",
|
||||
"DLB_ST_INVALID_HIST_LIST_DEPTH",
|
||||
"DLB_ST_NO_MEMORY",
|
||||
};
|
||||
|
||||
struct dlb_cmd_response {
|
||||
__u32 status; /* Interpret using enum dlb_error */
|
||||
__u32 id;
|
||||
};
|
||||
|
||||
/******************************/
|
||||
/* 'dlb' commands */
|
||||
/******************************/
|
||||
|
||||
#define DLB_DEVICE_VERSION(x) (((x) >> 8) & 0xFF)
|
||||
#define DLB_DEVICE_REVISION(x) ((x) & 0xFF)
|
||||
|
||||
enum dlb_revisions {
|
||||
DLB_REV_A0 = 0,
|
||||
DLB_REV_A1 = 1,
|
||||
DLB_REV_A2 = 2,
|
||||
DLB_REV_A3 = 3,
|
||||
DLB_REV_B0 = 4,
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_CMD_CREATE_SCHED_DOMAIN: Create a DLB scheduling domain and reserve the
|
||||
* resources (queues, ports, etc.) that it contains.
|
||||
*
|
||||
* Input parameters:
|
||||
* - num_ldb_queues: Number of load-balanced queues.
|
||||
* - num_ldb_ports: Number of load-balanced ports.
|
||||
* - num_dir_ports: Number of directed ports. A directed port has one directed
|
||||
* queue, so no num_dir_queues argument is necessary.
|
||||
* - num_atomic_inflights: This specifies the amount of temporary atomic QE
|
||||
* storage for the domain. This storage is divided among the domain's
|
||||
* load-balanced queues that are configured for atomic scheduling.
|
||||
* - num_hist_list_entries: Amount of history list storage. This is divided
|
||||
* among the domain's CQs.
|
||||
* - num_ldb_credits: Amount of load-balanced QE storage (QED). QEs occupy this
|
||||
* space until they are scheduled to a load-balanced CQ. One credit
|
||||
* represents the storage for one QE.
|
||||
* - num_dir_credits: Amount of directed QE storage (DQED). QEs occupy this
|
||||
* space until they are scheduled to a directed CQ. One credit represents
|
||||
* the storage for one QE.
|
||||
* - num_ldb_credit_pools: Number of pools into which the load-balanced credits
|
||||
* are placed.
|
||||
* - num_dir_credit_pools: Number of pools into which the directed credits are
|
||||
* placed.
|
||||
* - padding0: Reserved for future use.
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
* response.id: domain ID.
|
||||
*/
|
||||
struct dlb_create_sched_domain_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
__u32 num_ldb_queues;
|
||||
__u32 num_ldb_ports;
|
||||
__u32 num_dir_ports;
|
||||
__u32 num_atomic_inflights;
|
||||
__u32 num_hist_list_entries;
|
||||
__u32 num_ldb_credits;
|
||||
__u32 num_dir_credits;
|
||||
__u32 num_ldb_credit_pools;
|
||||
__u32 num_dir_credit_pools;
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_CMD_GET_NUM_RESOURCES: Return the number of available resources
|
||||
* (queues, ports, etc.) that this device owns.
|
||||
*
|
||||
* Output parameters:
|
||||
* - num_domains: Number of available scheduling domains.
|
||||
* - num_ldb_queues: Number of available load-balanced queues.
|
||||
* - num_ldb_ports: Number of available load-balanced ports.
|
||||
* - num_dir_ports: Number of available directed ports. There is one directed
|
||||
* queue for every directed port.
|
||||
* - num_atomic_inflights: Amount of available temporary atomic QE storage.
|
||||
* - max_contiguous_atomic_inflights: When a domain is created, the temporary
|
||||
* atomic QE storage is allocated in a contiguous chunk. This return value
|
||||
* is the longest available contiguous range of atomic QE storage.
|
||||
* - num_hist_list_entries: Amount of history list storage.
|
||||
* - max_contiguous_hist_list_entries: History list storage is allocated in
|
||||
* a contiguous chunk, and this return value is the longest available
|
||||
* contiguous range of history list entries.
|
||||
* - num_ldb_credits: Amount of available load-balanced QE storage.
|
||||
* - max_contiguous_ldb_credits: QED storage is allocated in a contiguous
|
||||
* chunk, and this return value is the longest available contiguous range
|
||||
* of load-balanced credit storage.
|
||||
* - num_dir_credits: Amount of available directed QE storage.
|
||||
* - max_contiguous_dir_credits: DQED storage is allocated in a contiguous
|
||||
* chunk, and this return value is the longest available contiguous range
|
||||
* of directed credit storage.
|
||||
* - num_ldb_credit_pools: Number of available load-balanced credit pools.
|
||||
* - num_dir_credit_pools: Number of available directed credit pools.
|
||||
* - padding0: Reserved for future use.
|
||||
*/
|
||||
struct dlb_get_num_resources_args {
|
||||
/* Output parameters */
|
||||
__u32 num_sched_domains;
|
||||
__u32 num_ldb_queues;
|
||||
__u32 num_ldb_ports;
|
||||
__u32 num_dir_ports;
|
||||
__u32 num_atomic_inflights;
|
||||
__u32 max_contiguous_atomic_inflights;
|
||||
__u32 num_hist_list_entries;
|
||||
__u32 max_contiguous_hist_list_entries;
|
||||
__u32 num_ldb_credits;
|
||||
__u32 max_contiguous_ldb_credits;
|
||||
__u32 num_dir_credits;
|
||||
__u32 max_contiguous_dir_credits;
|
||||
__u32 num_ldb_credit_pools;
|
||||
__u32 num_dir_credit_pools;
|
||||
__u32 padding0;
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_CMD_SET_SN_ALLOCATION: Configure a sequence number group
|
||||
*
|
||||
* Input parameters:
|
||||
* - group: Sequence number group ID.
|
||||
* - num: Number of sequence numbers per queue.
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
*/
|
||||
struct dlb_set_sn_allocation_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
__u32 group;
|
||||
__u32 num;
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_CMD_GET_SN_ALLOCATION: Get a sequence number group's configuration
|
||||
*
|
||||
* Input parameters:
|
||||
* - group: Sequence number group ID.
|
||||
* - padding0: Reserved for future use.
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
* response.id: Specified group's number of sequence numbers per queue.
|
||||
*/
|
||||
struct dlb_get_sn_allocation_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
__u32 group;
|
||||
__u32 padding0;
|
||||
};
|
||||
|
||||
enum dlb_cq_poll_modes {
|
||||
DLB_CQ_POLL_MODE_STD,
|
||||
DLB_CQ_POLL_MODE_SPARSE,
|
||||
|
||||
/* NUM_DLB_CQ_POLL_MODE must be last */
|
||||
NUM_DLB_CQ_POLL_MODE,
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_CMD_QUERY_CQ_POLL_MODE: Query the CQ poll mode the kernel driver is using
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
* response.id: CQ poll mode (see enum dlb_cq_poll_modes).
|
||||
*/
|
||||
struct dlb_query_cq_poll_mode_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_CMD_GET_SN_OCCUPANCY: Get a sequence number group's occupancy
|
||||
*
|
||||
* Each sequence number group has one or more slots, depending on its
|
||||
* configuration. I.e.:
|
||||
* - If configured for 1024 sequence numbers per queue, the group has 1 slot
|
||||
* - If configured for 512 sequence numbers per queue, the group has 2 slots
|
||||
* ...
|
||||
* - If configured for 32 sequence numbers per queue, the group has 32 slots
|
||||
*
|
||||
* This ioctl returns the group's number of in-use slots. If its occupancy is
|
||||
* 0, the group's sequence number allocation can be reconfigured.
|
||||
*
|
||||
* Input parameters:
|
||||
* - group: Sequence number group ID.
|
||||
* - padding0: Reserved for future use.
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
* response.id: Specified group's number of used slots.
|
||||
*/
|
||||
struct dlb_get_sn_occupancy_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
__u32 group;
|
||||
__u32 padding0;
|
||||
};
|
||||
|
||||
/*********************************/
|
||||
/* 'scheduling domain' commands */
|
||||
/*********************************/
|
||||
|
||||
/*
|
||||
* DLB_DOMAIN_CMD_CREATE_LDB_POOL: Configure a load-balanced credit pool.
|
||||
* Input parameters:
|
||||
* - num_ldb_credits: Number of load-balanced credits (QED space) for this
|
||||
* pool.
|
||||
* - padding0: Reserved for future use.
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
* response.id: pool ID.
|
||||
*/
|
||||
struct dlb_create_ldb_pool_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
__u32 num_ldb_credits;
|
||||
__u32 padding0;
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_DOMAIN_CMD_CREATE_DIR_POOL: Configure a directed credit pool.
|
||||
* Input parameters:
|
||||
* - num_dir_credits: Number of directed credits (DQED space) for this pool.
|
||||
* - padding0: Reserved for future use.
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
* response.id: Pool ID.
|
||||
*/
|
||||
struct dlb_create_dir_pool_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
__u32 num_dir_credits;
|
||||
__u32 padding0;
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_DOMAIN_CMD_CREATE_LDB_QUEUE: Configure a load-balanced queue.
|
||||
* Input parameters:
|
||||
* - num_atomic_inflights: This specifies the amount of temporary atomic QE
|
||||
* storage for this queue. If zero, the queue will not support atomic
|
||||
* scheduling.
|
||||
* - num_sequence_numbers: This specifies the number of sequence numbers used
|
||||
* by this queue. If zero, the queue will not support ordered scheduling.
|
||||
* If non-zero, the queue will not support unordered scheduling.
|
||||
* - num_qid_inflights: The maximum number of QEs that can be inflight
|
||||
* (scheduled to a CQ but not completed) at any time. If
|
||||
* num_sequence_numbers is non-zero, num_qid_inflights must be set equal
|
||||
* to num_sequence_numbers.
|
||||
* - padding0: Reserved for future use.
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
* response.id: Queue ID.
|
||||
*/
|
||||
struct dlb_create_ldb_queue_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
__u32 num_sequence_numbers;
|
||||
__u32 num_qid_inflights;
|
||||
__u32 num_atomic_inflights;
|
||||
__u32 padding0;
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_DOMAIN_CMD_CREATE_DIR_QUEUE: Configure a directed queue.
|
||||
* Input parameters:
|
||||
* - port_id: Port ID. If the corresponding directed port is already created,
|
||||
* specify its ID here. Else this argument must be 0xFFFFFFFF to indicate
|
||||
* that the queue is being created before the port.
|
||||
* - padding0: Reserved for future use.
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
* response.id: Queue ID.
|
||||
*/
|
||||
struct dlb_create_dir_queue_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
__s32 port_id;
|
||||
__u32 padding0;
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_DOMAIN_CMD_CREATE_LDB_PORT: Configure a load-balanced port.
|
||||
* Input parameters:
|
||||
* - ldb_credit_pool_id: Load-balanced credit pool this port will belong to.
|
||||
* - dir_credit_pool_id: Directed credit pool this port will belong to.
|
||||
* - ldb_credit_high_watermark: Number of load-balanced credits from the pool
|
||||
* that this port will own.
|
||||
*
|
||||
* If this port's scheduling domain does not have any load-balanced queues,
|
||||
* this argument is ignored and the port is given no load-balanced
|
||||
* credits.
|
||||
* - dir_credit_high_watermark: Number of directed credits from the pool that
|
||||
* this port will own.
|
||||
*
|
||||
* If this port's scheduling domain does not have any directed queues,
|
||||
* this argument is ignored and the port is given no directed credits.
|
||||
* - ldb_credit_low_watermark: Load-balanced credit low watermark. When the
|
||||
* port's credits reach this watermark, they become eligible to be
|
||||
* refilled by the DLB as credits until the high watermark
|
||||
* (num_ldb_credits) is reached.
|
||||
*
|
||||
* If this port's scheduling domain does not have any load-balanced queues,
|
||||
* this argument is ignored and the port is given no load-balanced
|
||||
* credits.
|
||||
* - dir_credit_low_watermark: Directed credit low watermark. When the port's
|
||||
* credits reach this watermark, they become eligible to be refilled by
|
||||
* the DLB as credits until the high watermark (num_dir_credits) is
|
||||
* reached.
|
||||
*
|
||||
* If this port's scheduling domain does not have any directed queues,
|
||||
* this argument is ignored and the port is given no directed credits.
|
||||
* - ldb_credit_quantum: Number of load-balanced credits for the DLB to refill
|
||||
* per refill operation.
|
||||
*
|
||||
* If this port's scheduling domain does not have any load-balanced queues,
|
||||
* this argument is ignored and the port is given no load-balanced
|
||||
* credits.
|
||||
* - dir_credit_quantum: Number of directed credits for the DLB to refill per
|
||||
* refill operation.
|
||||
*
|
||||
* If this port's scheduling domain does not have any directed queues,
|
||||
* this argument is ignored and the port is given no directed credits.
|
||||
* - padding0: Reserved for future use.
|
||||
* - cq_depth: Depth of the port's CQ. Must be a power-of-two between 8 and
|
||||
* 1024, inclusive.
|
||||
* - cq_depth_threshold: CQ depth interrupt threshold. A value of N means that
|
||||
* the CQ interrupt won't fire until there are N or more outstanding CQ
|
||||
* tokens.
|
||||
* - cq_history_list_size: Number of history list entries. This must be greater
|
||||
* than or equal to cq_depth.
|
||||
* - padding1: Reserved for future use.
|
||||
* - padding2: Reserved for future use.
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
* response.id: port ID.
|
||||
*/
|
||||
struct dlb_create_ldb_port_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
__u32 ldb_credit_pool_id;
|
||||
__u32 dir_credit_pool_id;
|
||||
__u16 ldb_credit_high_watermark;
|
||||
__u16 ldb_credit_low_watermark;
|
||||
__u16 ldb_credit_quantum;
|
||||
__u16 dir_credit_high_watermark;
|
||||
__u16 dir_credit_low_watermark;
|
||||
__u16 dir_credit_quantum;
|
||||
__u16 padding0;
|
||||
__u16 cq_depth;
|
||||
__u16 cq_depth_threshold;
|
||||
__u16 cq_history_list_size;
|
||||
__u32 padding1;
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_DOMAIN_CMD_CREATE_DIR_PORT: Configure a directed port.
|
||||
* Input parameters:
|
||||
* - ldb_credit_pool_id: Load-balanced credit pool this port will belong to.
|
||||
* - dir_credit_pool_id: Directed credit pool this port will belong to.
|
||||
* - ldb_credit_high_watermark: Number of load-balanced credits from the pool
|
||||
* that this port will own.
|
||||
*
|
||||
* If this port's scheduling domain does not have any load-balanced queues,
|
||||
* this argument is ignored and the port is given no load-balanced
|
||||
* credits.
|
||||
* - dir_credit_high_watermark: Number of directed credits from the pool that
|
||||
* this port will own.
|
||||
* - ldb_credit_low_watermark: Load-balanced credit low watermark. When the
|
||||
* port's credits reach this watermark, they become eligible to be
|
||||
* refilled by the DLB as credits until the high watermark
|
||||
* (num_ldb_credits) is reached.
|
||||
*
|
||||
* If this port's scheduling domain does not have any load-balanced queues,
|
||||
* this argument is ignored and the port is given no load-balanced
|
||||
* credits.
|
||||
* - dir_credit_low_watermark: Directed credit low watermark. When the port's
|
||||
* credits reach this watermark, they become eligible to be refilled by
|
||||
* the DLB as credits until the high watermark (num_dir_credits) is
|
||||
* reached.
|
||||
* - ldb_credit_quantum: Number of load-balanced credits for the DLB to refill
|
||||
* per refill operation.
|
||||
*
|
||||
* If this port's scheduling domain does not have any load-balanced queues,
|
||||
* this argument is ignored and the port is given no load-balanced
|
||||
* credits.
|
||||
* - dir_credit_quantum: Number of directed credits for the DLB to refill per
|
||||
* refill operation.
|
||||
* - cq_depth: Depth of the port's CQ. Must be a power-of-two between 8 and
|
||||
* 1024, inclusive.
|
||||
* - cq_depth_threshold: CQ depth interrupt threshold. A value of N means that
|
||||
* the CQ interrupt won't fire until there are N or more outstanding CQ
|
||||
* tokens.
|
||||
* - qid: Queue ID. If the corresponding directed queue is already created,
|
||||
* specify its ID here. Else this argument must be 0xFFFFFFFF to indicate
|
||||
* that the port is being created before the queue.
|
||||
* - padding1: Reserved for future use.
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
* response.id: Port ID.
|
||||
*/
|
||||
struct dlb_create_dir_port_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
__u32 ldb_credit_pool_id;
|
||||
__u32 dir_credit_pool_id;
|
||||
__u16 ldb_credit_high_watermark;
|
||||
__u16 ldb_credit_low_watermark;
|
||||
__u16 ldb_credit_quantum;
|
||||
__u16 dir_credit_high_watermark;
|
||||
__u16 dir_credit_low_watermark;
|
||||
__u16 dir_credit_quantum;
|
||||
__u16 cq_depth;
|
||||
__u16 cq_depth_threshold;
|
||||
__s32 queue_id;
|
||||
__u32 padding1;
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_DOMAIN_CMD_START_DOMAIN: Mark the end of the domain configuration. This
|
||||
* must be called before passing QEs into the device, and no configuration
|
||||
* ioctls can be issued once the domain has started. Sending QEs into the
|
||||
* device before calling this ioctl will result in undefined behavior.
|
||||
* Input parameters:
|
||||
* - (None)
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
*/
|
||||
struct dlb_start_domain_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_DOMAIN_CMD_MAP_QID: Map a load-balanced queue to a load-balanced port.
|
||||
* Input parameters:
|
||||
* - port_id: Load-balanced port ID.
|
||||
* - qid: Load-balanced queue ID.
|
||||
* - priority: Queue->port service priority.
|
||||
* - padding0: Reserved for future use.
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
*/
|
||||
struct dlb_map_qid_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
__u32 port_id;
|
||||
__u32 qid;
|
||||
__u32 priority;
|
||||
__u32 padding0;
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_DOMAIN_CMD_UNMAP_QID: Unmap a load-balanced queue to a load-balanced
|
||||
* port.
|
||||
* Input parameters:
|
||||
* - port_id: Load-balanced port ID.
|
||||
* - qid: Load-balanced queue ID.
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
*/
|
||||
struct dlb_unmap_qid_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
__u32 port_id;
|
||||
__u32 qid;
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_DOMAIN_CMD_ENABLE_LDB_PORT: Enable scheduling to a load-balanced port.
|
||||
* Input parameters:
|
||||
* - port_id: Load-balanced port ID.
|
||||
* - padding0: Reserved for future use.
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
*/
|
||||
struct dlb_enable_ldb_port_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
__u32 port_id;
|
||||
__u32 padding0;
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_DOMAIN_CMD_ENABLE_DIR_PORT: Enable scheduling to a directed port.
|
||||
* Input parameters:
|
||||
* - port_id: Directed port ID.
|
||||
* - padding0: Reserved for future use.
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
*/
|
||||
struct dlb_enable_dir_port_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
__u32 port_id;
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_DOMAIN_CMD_DISABLE_LDB_PORT: Disable scheduling to a load-balanced port.
|
||||
* Input parameters:
|
||||
* - port_id: Load-balanced port ID.
|
||||
* - padding0: Reserved for future use.
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
*/
|
||||
struct dlb_disable_ldb_port_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
__u32 port_id;
|
||||
__u32 padding0;
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_DOMAIN_CMD_DISABLE_DIR_PORT: Disable scheduling to a directed port.
|
||||
* Input parameters:
|
||||
* - port_id: Directed port ID.
|
||||
* - padding0: Reserved for future use.
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
*/
|
||||
struct dlb_disable_dir_port_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
__u32 port_id;
|
||||
__u32 padding0;
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_DOMAIN_CMD_GET_LDB_QUEUE_DEPTH: Get a load-balanced queue's depth.
|
||||
* Input parameters:
|
||||
* - queue_id: The load-balanced queue ID.
|
||||
* - padding0: Reserved for future use.
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
* response.id: queue depth.
|
||||
*/
|
||||
struct dlb_get_ldb_queue_depth_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
__u32 queue_id;
|
||||
__u32 padding0;
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_DOMAIN_CMD_GET_DIR_QUEUE_DEPTH: Get a directed queue's depth.
|
||||
* Input parameters:
|
||||
* - queue_id: The directed queue ID.
|
||||
* - padding0: Reserved for future use.
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
* response.id: queue depth.
|
||||
*/
|
||||
struct dlb_get_dir_queue_depth_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
__u32 queue_id;
|
||||
__u32 padding0;
|
||||
};
|
||||
|
||||
/*
|
||||
* DLB_DOMAIN_CMD_PENDING_PORT_UNMAPS: Get number of queue unmap operations in
|
||||
* progress for a load-balanced port.
|
||||
*
|
||||
* Note: This is a snapshot; the number of unmap operations in progress
|
||||
* is subject to change at any time.
|
||||
*
|
||||
* Input parameters:
|
||||
* - port_id: Load-balanced port ID.
|
||||
*
|
||||
* Output parameters:
|
||||
* - response: pointer to a struct dlb_cmd_response.
|
||||
* response.status: Detailed error code. In certain cases, such as if the
|
||||
* response pointer is invalid, the driver won't set status.
|
||||
* response.id: number of unmaps in progress.
|
||||
*/
|
||||
struct dlb_pending_port_unmaps_args {
|
||||
/* Output parameters */
|
||||
__u64 response;
|
||||
/* Input parameters */
|
||||
__u32 port_id;
|
||||
__u32 padding0;
|
||||
};
|
||||
|
||||
/*
|
||||
* Base addresses for memory mapping the consumer queue (CQ) and popcount (PC)
|
||||
* memory space, and producer port (PP) MMIO space. The CQ, PC, and PP
|
||||
* addresses are per-port. Every address is page-separated (e.g. LDB PP 0 is at
|
||||
* 0x2100000 and LDB PP 1 is at 0x2101000).
|
||||
*/
|
||||
#define DLB_LDB_CQ_BASE 0x3000000
|
||||
#define DLB_LDB_CQ_MAX_SIZE 65536
|
||||
#define DLB_LDB_CQ_OFFS(id) (DLB_LDB_CQ_BASE + (id) * DLB_LDB_CQ_MAX_SIZE)
|
||||
|
||||
#define DLB_DIR_CQ_BASE 0x3800000
|
||||
#define DLB_DIR_CQ_MAX_SIZE 65536
|
||||
#define DLB_DIR_CQ_OFFS(id) (DLB_DIR_CQ_BASE + (id) * DLB_DIR_CQ_MAX_SIZE)
|
||||
|
||||
#define DLB_LDB_PC_BASE 0x2300000
|
||||
#define DLB_LDB_PC_MAX_SIZE 4096
|
||||
#define DLB_LDB_PC_OFFS(id) (DLB_LDB_PC_BASE + (id) * DLB_LDB_PC_MAX_SIZE)
|
||||
|
||||
#define DLB_DIR_PC_BASE 0x2200000
|
||||
#define DLB_DIR_PC_MAX_SIZE 4096
|
||||
#define DLB_DIR_PC_OFFS(id) (DLB_DIR_PC_BASE + (id) * DLB_DIR_PC_MAX_SIZE)
|
||||
|
||||
#define DLB_LDB_PP_BASE 0x2100000
|
||||
#define DLB_LDB_PP_MAX_SIZE 4096
|
||||
#define DLB_LDB_PP_OFFS(id) (DLB_LDB_PP_BASE + (id) * DLB_LDB_PP_MAX_SIZE)
|
||||
|
||||
#define DLB_DIR_PP_BASE 0x2000000
|
||||
#define DLB_DIR_PP_MAX_SIZE 4096
|
||||
#define DLB_DIR_PP_OFFS(id) (DLB_DIR_PP_BASE + (id) * DLB_DIR_PP_MAX_SIZE)
|
||||
|
||||
#endif /* __DLB_USER_H */
|
File diff suppressed because it is too large
Load Diff
@ -1,22 +0,0 @@
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
# Copyright(c) 2019-2020 Intel Corporation
|
||||
|
||||
if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
|
||||
build = false
|
||||
reason = 'only supported on x86_64 Linux'
|
||||
subdir_done()
|
||||
endif
|
||||
|
||||
sources = files('dlb.c',
|
||||
'dlb_iface.c',
|
||||
'dlb_xstats.c',
|
||||
'pf/dlb_main.c',
|
||||
'pf/dlb_pf.c',
|
||||
'pf/base/dlb_resource.c',
|
||||
'rte_pmd_dlb.c',
|
||||
'dlb_selftest.c'
|
||||
)
|
||||
|
||||
headers = files('rte_pmd_dlb.h')
|
||||
|
||||
deps += ['mbuf', 'mempool', 'ring', 'pci', 'bus_pci']
|
@ -1,334 +0,0 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __DLB_HW_TYPES_H
|
||||
#define __DLB_HW_TYPES_H
|
||||
|
||||
#include "../../dlb_user.h"
|
||||
#include "dlb_osdep_types.h"
|
||||
#include "dlb_osdep_list.h"
|
||||
|
||||
#define DLB_MAX_NUM_DOMAINS 32
|
||||
#define DLB_MAX_NUM_LDB_QUEUES 128
|
||||
#define DLB_MAX_NUM_LDB_PORTS 64
|
||||
#define DLB_MAX_NUM_DIR_PORTS 128
|
||||
#define DLB_MAX_NUM_LDB_CREDITS 16384
|
||||
#define DLB_MAX_NUM_DIR_CREDITS 4096
|
||||
#define DLB_MAX_NUM_LDB_CREDIT_POOLS 64
|
||||
#define DLB_MAX_NUM_DIR_CREDIT_POOLS 64
|
||||
#define DLB_MAX_NUM_HIST_LIST_ENTRIES 5120
|
||||
#define DLB_MAX_NUM_AQOS_ENTRIES 2048
|
||||
#define DLB_MAX_NUM_TOTAL_OUTSTANDING_COMPLETIONS 4096
|
||||
#define DLB_MAX_NUM_QIDS_PER_LDB_CQ 8
|
||||
#define DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS 4
|
||||
#define DLB_MAX_NUM_SEQUENCE_NUMBER_MODES 6
|
||||
#define DLB_QID_PRIORITIES 8
|
||||
#define DLB_NUM_ARB_WEIGHTS 8
|
||||
#define DLB_MAX_WEIGHT 255
|
||||
#define DLB_MAX_PORT_CREDIT_QUANTUM 1023
|
||||
#define DLB_MAX_CQ_COMP_CHECK_LOOPS 409600
|
||||
#define DLB_MAX_QID_EMPTY_CHECK_LOOPS (32 * 64 * 1024 * (800 / 30))
|
||||
#define DLB_HZ 800000000
|
||||
|
||||
/* Used for DLB A-stepping workaround for hardware write buffer lock up issue */
|
||||
#define DLB_A_STEP_MAX_PORTS 128
|
||||
|
||||
#define DLB_PF_DEV_ID 0x270B
|
||||
|
||||
/* Interrupt related macros */
|
||||
#define DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS 8
|
||||
#define DLB_PF_NUM_CQ_INTERRUPT_VECTORS 64
|
||||
#define DLB_PF_TOTAL_NUM_INTERRUPT_VECTORS \
|
||||
(DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS + \
|
||||
DLB_PF_NUM_CQ_INTERRUPT_VECTORS)
|
||||
#define DLB_PF_NUM_COMPRESSED_MODE_VECTORS \
|
||||
(DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS + 1)
|
||||
#define DLB_PF_NUM_PACKED_MODE_VECTORS DLB_PF_TOTAL_NUM_INTERRUPT_VECTORS
|
||||
#define DLB_PF_COMPRESSED_MODE_CQ_VECTOR_ID DLB_PF_NUM_NON_CQ_INTERRUPT_VECTORS
|
||||
|
||||
#define DLB_PF_NUM_ALARM_INTERRUPT_VECTORS 4
|
||||
#define DLB_INT_ALARM 0
|
||||
#define DLB_INT_INGRESS_ERROR 3
|
||||
|
||||
#define DLB_ALARM_HW_SOURCE_SYS 0
|
||||
#define DLB_ALARM_HW_SOURCE_DLB 1
|
||||
|
||||
#define DLB_ALARM_HW_UNIT_CHP 1
|
||||
#define DLB_ALARM_HW_UNIT_LSP 3
|
||||
|
||||
#define DLB_ALARM_HW_CHP_AID_OUT_OF_CREDITS 6
|
||||
#define DLB_ALARM_HW_CHP_AID_ILLEGAL_ENQ 7
|
||||
#define DLB_ALARM_HW_LSP_AID_EXCESS_TOKEN_POPS 15
|
||||
#define DLB_ALARM_SYS_AID_ILLEGAL_HCW 0
|
||||
#define DLB_ALARM_SYS_AID_ILLEGAL_QID 3
|
||||
#define DLB_ALARM_SYS_AID_DISABLED_QID 4
|
||||
#define DLB_ALARM_SYS_AID_ILLEGAL_CQID 6
|
||||
|
||||
/* Hardware-defined base addresses */
|
||||
#define DLB_LDB_PP_BASE 0x2100000
|
||||
#define DLB_LDB_PP_STRIDE 0x1000
|
||||
#define DLB_LDB_PP_BOUND \
|
||||
(DLB_LDB_PP_BASE + DLB_LDB_PP_STRIDE * DLB_MAX_NUM_LDB_PORTS)
|
||||
#define DLB_DIR_PP_BASE 0x2000000
|
||||
#define DLB_DIR_PP_STRIDE 0x1000
|
||||
#define DLB_DIR_PP_BOUND \
|
||||
(DLB_DIR_PP_BASE + DLB_DIR_PP_STRIDE * DLB_MAX_NUM_DIR_PORTS)
|
||||
|
||||
struct dlb_freelist {
|
||||
u32 base;
|
||||
u32 bound;
|
||||
u32 offset;
|
||||
};
|
||||
|
||||
static inline u32 dlb_freelist_count(struct dlb_freelist *list)
|
||||
{
|
||||
return (list->bound - list->base) - list->offset;
|
||||
}
|
||||
|
||||
struct dlb_hcw {
|
||||
u64 data;
|
||||
/* Word 3 */
|
||||
u16 opaque;
|
||||
u8 qid;
|
||||
u8 sched_type:2;
|
||||
u8 priority:3;
|
||||
u8 msg_type:3;
|
||||
/* Word 4 */
|
||||
u16 lock_id;
|
||||
u8 meas_lat:1;
|
||||
u8 rsvd1:2;
|
||||
u8 no_dec:1;
|
||||
u8 cmp_id:4;
|
||||
u8 cq_token:1;
|
||||
u8 qe_comp:1;
|
||||
u8 qe_frag:1;
|
||||
u8 qe_valid:1;
|
||||
u8 int_arm:1;
|
||||
u8 error:1;
|
||||
u8 rsvd:2;
|
||||
};
|
||||
|
||||
struct dlb_ldb_queue {
|
||||
struct dlb_list_entry domain_list;
|
||||
struct dlb_list_entry func_list;
|
||||
u32 id;
|
||||
u32 domain_id;
|
||||
u32 num_qid_inflights;
|
||||
struct dlb_freelist aqed_freelist;
|
||||
u8 sn_cfg_valid;
|
||||
u32 sn_group;
|
||||
u32 sn_slot;
|
||||
u32 num_mappings;
|
||||
u8 num_pending_additions;
|
||||
u8 owned;
|
||||
u8 configured;
|
||||
};
|
||||
|
||||
/* Directed ports and queues are paired by nature, so the driver tracks them
|
||||
* with a single data structure.
|
||||
*/
|
||||
struct dlb_dir_pq_pair {
|
||||
struct dlb_list_entry domain_list;
|
||||
struct dlb_list_entry func_list;
|
||||
u32 id;
|
||||
u32 domain_id;
|
||||
u8 ldb_pool_used;
|
||||
u8 dir_pool_used;
|
||||
u8 queue_configured;
|
||||
u8 port_configured;
|
||||
u8 owned;
|
||||
u8 enabled;
|
||||
u32 ref_cnt;
|
||||
};
|
||||
|
||||
enum dlb_qid_map_state {
|
||||
/* The slot doesn't contain a valid queue mapping */
|
||||
DLB_QUEUE_UNMAPPED,
|
||||
/* The slot contains a valid queue mapping */
|
||||
DLB_QUEUE_MAPPED,
|
||||
/* The driver is mapping a queue into this slot */
|
||||
DLB_QUEUE_MAP_IN_PROGRESS,
|
||||
/* The driver is unmapping a queue from this slot */
|
||||
DLB_QUEUE_UNMAP_IN_PROGRESS,
|
||||
/* The driver is unmapping a queue from this slot, and once complete
|
||||
* will replace it with another mapping.
|
||||
*/
|
||||
DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP,
|
||||
};
|
||||
|
||||
struct dlb_ldb_port_qid_map {
|
||||
u16 qid;
|
||||
u8 priority;
|
||||
u16 pending_qid;
|
||||
u8 pending_priority;
|
||||
enum dlb_qid_map_state state;
|
||||
};
|
||||
|
||||
struct dlb_ldb_port {
|
||||
struct dlb_list_entry domain_list;
|
||||
struct dlb_list_entry func_list;
|
||||
u32 id;
|
||||
u32 domain_id;
|
||||
u8 ldb_pool_used;
|
||||
u8 dir_pool_used;
|
||||
u8 init_tkn_cnt;
|
||||
u32 hist_list_entry_base;
|
||||
u32 hist_list_entry_limit;
|
||||
/* The qid_map represents the hardware QID mapping state. */
|
||||
struct dlb_ldb_port_qid_map qid_map[DLB_MAX_NUM_QIDS_PER_LDB_CQ];
|
||||
u32 ref_cnt;
|
||||
u8 num_pending_removals;
|
||||
u8 num_mappings;
|
||||
u8 owned;
|
||||
u8 enabled;
|
||||
u8 configured;
|
||||
};
|
||||
|
||||
struct dlb_credit_pool {
|
||||
struct dlb_list_entry domain_list;
|
||||
struct dlb_list_entry func_list;
|
||||
u32 id;
|
||||
u32 domain_id;
|
||||
u32 total_credits;
|
||||
u32 avail_credits;
|
||||
u8 owned;
|
||||
u8 configured;
|
||||
};
|
||||
|
||||
struct dlb_sn_group {
|
||||
u32 mode;
|
||||
u32 sequence_numbers_per_queue;
|
||||
u32 slot_use_bitmap;
|
||||
u32 id;
|
||||
};
|
||||
|
||||
static inline bool dlb_sn_group_full(struct dlb_sn_group *group)
|
||||
{
|
||||
u32 mask[6] = {
|
||||
0xffffffff, /* 32 SNs per queue */
|
||||
0x0000ffff, /* 64 SNs per queue */
|
||||
0x000000ff, /* 128 SNs per queue */
|
||||
0x0000000f, /* 256 SNs per queue */
|
||||
0x00000003, /* 512 SNs per queue */
|
||||
0x00000001}; /* 1024 SNs per queue */
|
||||
|
||||
return group->slot_use_bitmap == mask[group->mode];
|
||||
}
|
||||
|
||||
static inline int dlb_sn_group_alloc_slot(struct dlb_sn_group *group)
|
||||
{
|
||||
int bound[6] = {32, 16, 8, 4, 2, 1};
|
||||
int i;
|
||||
|
||||
for (i = 0; i < bound[group->mode]; i++) {
|
||||
if (!(group->slot_use_bitmap & (1 << i))) {
|
||||
group->slot_use_bitmap |= 1 << i;
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline void dlb_sn_group_free_slot(struct dlb_sn_group *group, int slot)
|
||||
{
|
||||
group->slot_use_bitmap &= ~(1 << slot);
|
||||
}
|
||||
|
||||
static inline int dlb_sn_group_used_slots(struct dlb_sn_group *group)
|
||||
{
|
||||
int i, cnt = 0;
|
||||
|
||||
for (i = 0; i < 32; i++)
|
||||
cnt += !!(group->slot_use_bitmap & (1 << i));
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
struct dlb_domain {
|
||||
struct dlb_function_resources *parent_func;
|
||||
struct dlb_list_entry func_list;
|
||||
struct dlb_list_head used_ldb_queues;
|
||||
struct dlb_list_head used_ldb_ports;
|
||||
struct dlb_list_head used_dir_pq_pairs;
|
||||
struct dlb_list_head used_ldb_credit_pools;
|
||||
struct dlb_list_head used_dir_credit_pools;
|
||||
struct dlb_list_head avail_ldb_queues;
|
||||
struct dlb_list_head avail_ldb_ports;
|
||||
struct dlb_list_head avail_dir_pq_pairs;
|
||||
struct dlb_list_head avail_ldb_credit_pools;
|
||||
struct dlb_list_head avail_dir_credit_pools;
|
||||
u32 total_hist_list_entries;
|
||||
u32 avail_hist_list_entries;
|
||||
u32 hist_list_entry_base;
|
||||
u32 hist_list_entry_offset;
|
||||
struct dlb_freelist qed_freelist;
|
||||
struct dlb_freelist dqed_freelist;
|
||||
struct dlb_freelist aqed_freelist;
|
||||
u32 id;
|
||||
int num_pending_removals;
|
||||
int num_pending_additions;
|
||||
u8 configured;
|
||||
u8 started;
|
||||
};
|
||||
|
||||
struct dlb_bitmap;
|
||||
|
||||
struct dlb_function_resources {
|
||||
u32 num_avail_domains;
|
||||
struct dlb_list_head avail_domains;
|
||||
struct dlb_list_head used_domains;
|
||||
u32 num_avail_ldb_queues;
|
||||
struct dlb_list_head avail_ldb_queues;
|
||||
u32 num_avail_ldb_ports;
|
||||
struct dlb_list_head avail_ldb_ports;
|
||||
u32 num_avail_dir_pq_pairs;
|
||||
struct dlb_list_head avail_dir_pq_pairs;
|
||||
struct dlb_bitmap *avail_hist_list_entries;
|
||||
struct dlb_bitmap *avail_qed_freelist_entries;
|
||||
struct dlb_bitmap *avail_dqed_freelist_entries;
|
||||
struct dlb_bitmap *avail_aqed_freelist_entries;
|
||||
u32 num_avail_ldb_credit_pools;
|
||||
struct dlb_list_head avail_ldb_credit_pools;
|
||||
u32 num_avail_dir_credit_pools;
|
||||
struct dlb_list_head avail_dir_credit_pools;
|
||||
u32 num_enabled_ldb_ports;
|
||||
};
|
||||
|
||||
/* After initialization, each resource in dlb_hw_resources is located in one of
|
||||
* the following lists:
|
||||
* -- The PF's available resources list. These are unconfigured resources owned
|
||||
* by the PF and not allocated to a DLB scheduling domain.
|
||||
* -- A domain's available resources list. These are domain-owned unconfigured
|
||||
* resources.
|
||||
* -- A domain's used resources list. These are domain-owned configured
|
||||
* resources.
|
||||
*
|
||||
* A resource moves to a new list when a domain is created or destroyed, or
|
||||
* when the resource is configured.
|
||||
*/
|
||||
struct dlb_hw_resources {
|
||||
struct dlb_ldb_queue ldb_queues[DLB_MAX_NUM_LDB_QUEUES];
|
||||
struct dlb_ldb_port ldb_ports[DLB_MAX_NUM_LDB_PORTS];
|
||||
struct dlb_dir_pq_pair dir_pq_pairs[DLB_MAX_NUM_DIR_PORTS];
|
||||
struct dlb_credit_pool ldb_credit_pools[DLB_MAX_NUM_LDB_CREDIT_POOLS];
|
||||
struct dlb_credit_pool dir_credit_pools[DLB_MAX_NUM_DIR_CREDIT_POOLS];
|
||||
struct dlb_sn_group sn_groups[DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS];
|
||||
};
|
||||
|
||||
struct dlb_hw {
|
||||
/* BAR 0 address */
|
||||
void *csr_kva;
|
||||
unsigned long csr_phys_addr;
|
||||
/* BAR 2 address */
|
||||
void *func_kva;
|
||||
unsigned long func_phys_addr;
|
||||
|
||||
/* Resource tracking */
|
||||
struct dlb_hw_resources rsrcs;
|
||||
struct dlb_function_resources pf;
|
||||
struct dlb_domain domains[DLB_MAX_NUM_DOMAINS];
|
||||
};
|
||||
|
||||
#endif /* __DLB_HW_TYPES_H */
|
@ -1,310 +0,0 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __DLB_OSDEP_H__
|
||||
#define __DLB_OSDEP_H__
|
||||
|
||||
#include <string.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
#include <cpuid.h>
|
||||
#include <pthread.h>
|
||||
#include <rte_string_fns.h>
|
||||
#include <rte_cycles.h>
|
||||
#include <rte_io.h>
|
||||
#include <rte_log.h>
|
||||
#include <rte_spinlock.h>
|
||||
#include "../dlb_main.h"
|
||||
#include "dlb_resource.h"
|
||||
#include "../../dlb_log.h"
|
||||
#include "../../dlb_user.h"
|
||||
|
||||
|
||||
#define DLB_PCI_REG_READ(reg) rte_read32((void *)reg)
|
||||
#define DLB_PCI_REG_WRITE(reg, val) rte_write32(val, (void *)reg)
|
||||
|
||||
#define DLB_CSR_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->csr_kva + (reg)))
|
||||
#define DLB_CSR_RD(hw, reg) \
|
||||
DLB_PCI_REG_READ(DLB_CSR_REG_ADDR((hw), (reg)))
|
||||
#define DLB_CSR_WR(hw, reg, val) \
|
||||
DLB_PCI_REG_WRITE(DLB_CSR_REG_ADDR((hw), (reg)), (val))
|
||||
|
||||
#define DLB_FUNC_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->func_kva + (reg)))
|
||||
#define DLB_FUNC_RD(hw, reg) \
|
||||
DLB_PCI_REG_READ(DLB_FUNC_REG_ADDR((hw), (reg)))
|
||||
#define DLB_FUNC_WR(hw, reg, val) \
|
||||
DLB_PCI_REG_WRITE(DLB_FUNC_REG_ADDR((hw), (reg)), (val))
|
||||
|
||||
extern unsigned int dlb_unregister_timeout_s;
|
||||
/**
|
||||
* os_queue_unregister_timeout_s() - timeout (in seconds) to wait for queue
|
||||
* unregister acknowledgments.
|
||||
*/
|
||||
static inline unsigned int os_queue_unregister_timeout_s(void)
|
||||
{
|
||||
return dlb_unregister_timeout_s;
|
||||
}
|
||||
|
||||
static inline size_t os_strlcpy(char *dst, const char *src, size_t sz)
|
||||
{
|
||||
return rte_strlcpy(dst, src, sz);
|
||||
}
|
||||
|
||||
/**
|
||||
* os_udelay() - busy-wait for a number of microseconds
|
||||
* @usecs: delay duration.
|
||||
*/
|
||||
static inline void os_udelay(int usecs)
|
||||
{
|
||||
rte_delay_us(usecs);
|
||||
}
|
||||
|
||||
/**
|
||||
* os_msleep() - sleep for a number of milliseconds
|
||||
* @usecs: delay duration.
|
||||
*/
|
||||
|
||||
static inline void os_msleep(int msecs)
|
||||
{
|
||||
rte_delay_ms(msecs);
|
||||
}
|
||||
|
||||
#define DLB_PP_BASE(__is_ldb) ((__is_ldb) ? DLB_LDB_PP_BASE : DLB_DIR_PP_BASE)
|
||||
/**
|
||||
* os_map_producer_port() - map a producer port into the caller's address space
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @port_id: port ID
|
||||
* @is_ldb: true for load-balanced port, false for a directed port
|
||||
*
|
||||
* This function maps the requested producer port memory into the caller's
|
||||
* address space.
|
||||
*
|
||||
* Return:
|
||||
* Returns the base address at which the PP memory was mapped, else NULL.
|
||||
*/
|
||||
static inline void *os_map_producer_port(struct dlb_hw *hw,
|
||||
u8 port_id,
|
||||
bool is_ldb)
|
||||
{
|
||||
uint64_t addr;
|
||||
uint64_t pp_dma_base;
|
||||
|
||||
|
||||
pp_dma_base = (uintptr_t)hw->func_kva + DLB_PP_BASE(is_ldb);
|
||||
addr = (pp_dma_base + (rte_mem_page_size() * port_id));
|
||||
|
||||
return (void *)(uintptr_t)addr;
|
||||
|
||||
}
|
||||
/**
|
||||
* os_unmap_producer_port() - unmap a producer port
|
||||
* @addr: mapped producer port address
|
||||
*
|
||||
* This function undoes os_map_producer_port() by unmapping the producer port
|
||||
* memory from the caller's address space.
|
||||
*
|
||||
* Return:
|
||||
* Returns the base address at which the PP memory was mapped, else NULL.
|
||||
*/
|
||||
|
||||
/* PFPMD - Nothing to do here, since memory was not actually mapped by us */
|
||||
static inline void os_unmap_producer_port(struct dlb_hw *hw, void *addr)
|
||||
{
|
||||
RTE_SET_USED(hw);
|
||||
RTE_SET_USED(addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* os_fence_hcw() - fence an HCW to ensure it arrives at the device
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @pp_addr: producer port address
|
||||
*/
|
||||
static inline void os_fence_hcw(struct dlb_hw *hw, u64 *pp_addr)
|
||||
{
|
||||
RTE_SET_USED(hw);
|
||||
|
||||
/* To ensure outstanding HCWs reach the device, read the PP address. IA
|
||||
* memory ordering prevents reads from passing older writes, and the
|
||||
* mfence also ensures this.
|
||||
*/
|
||||
rte_mb();
|
||||
|
||||
*(volatile u64 *)pp_addr;
|
||||
}
|
||||
|
||||
/* Map to PMDs logging interface */
|
||||
#define DLB_ERR(dev, fmt, args...) \
|
||||
DLB_LOG_ERR(fmt, ## args)
|
||||
|
||||
#define DLB_INFO(dev, fmt, args...) \
|
||||
DLB_LOG_INFO(fmt, ## args)
|
||||
|
||||
#define DLB_DEBUG(dev, fmt, args...) \
|
||||
DLB_LOG_DEBUG(fmt, ## args)
|
||||
|
||||
/**
|
||||
* DLB_HW_ERR() - log an error message
|
||||
* @dlb: dlb_hw handle for a particular device.
|
||||
* @...: variable string args.
|
||||
*/
|
||||
#define DLB_HW_ERR(dlb, ...) do { \
|
||||
RTE_SET_USED(dlb); \
|
||||
DLB_ERR(dlb, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* DLB_HW_INFO() - log an info message
|
||||
* @dlb: dlb_hw handle for a particular device.
|
||||
* @...: variable string args.
|
||||
*/
|
||||
#define DLB_HW_INFO(dlb, ...) do { \
|
||||
RTE_SET_USED(dlb); \
|
||||
DLB_INFO(dlb, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
/*** scheduling functions ***/
|
||||
|
||||
/* The callback runs until it completes all outstanding QID->CQ
|
||||
* map and unmap requests. To prevent deadlock, this function gives other
|
||||
* threads a chance to grab the resource mutex and configure hardware.
|
||||
*/
|
||||
static void *dlb_complete_queue_map_unmap(void *__args)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)__args;
|
||||
int ret;
|
||||
|
||||
while (1) {
|
||||
rte_spinlock_lock(&dlb_dev->resource_mutex);
|
||||
|
||||
ret = dlb_finish_unmap_qid_procedures(&dlb_dev->hw);
|
||||
ret += dlb_finish_map_qid_procedures(&dlb_dev->hw);
|
||||
|
||||
if (ret != 0) {
|
||||
rte_spinlock_unlock(&dlb_dev->resource_mutex);
|
||||
/* Relinquish the CPU so the application can process
|
||||
* its CQs, so this function does not deadlock.
|
||||
*/
|
||||
sched_yield();
|
||||
} else
|
||||
break;
|
||||
}
|
||||
|
||||
dlb_dev->worker_launched = false;
|
||||
|
||||
rte_spinlock_unlock(&dlb_dev->resource_mutex);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* os_schedule_work() - launch a thread to process pending map and unmap work
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*
|
||||
* This function launches a thread that will run until all pending
|
||||
* map and unmap procedures are complete.
|
||||
*/
|
||||
static inline void os_schedule_work(struct dlb_hw *hw)
|
||||
{
|
||||
struct dlb_dev *dlb_dev;
|
||||
pthread_t complete_queue_map_unmap_thread;
|
||||
int ret;
|
||||
|
||||
dlb_dev = container_of(hw, struct dlb_dev, hw);
|
||||
|
||||
ret = rte_ctrl_thread_create(&complete_queue_map_unmap_thread,
|
||||
"dlb_queue_unmap_waiter",
|
||||
NULL,
|
||||
dlb_complete_queue_map_unmap,
|
||||
dlb_dev);
|
||||
if (ret)
|
||||
DLB_ERR(dlb_dev,
|
||||
"Could not create queue complete map/unmap thread, err=%d\n",
|
||||
ret);
|
||||
else
|
||||
dlb_dev->worker_launched = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* os_worker_active() - query whether the map/unmap worker thread is active
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*
|
||||
* This function returns a boolean indicating whether a thread (launched by
|
||||
* os_schedule_work()) is active. This function is used to determine
|
||||
* whether or not to launch a worker thread.
|
||||
*/
|
||||
static inline bool os_worker_active(struct dlb_hw *hw)
|
||||
{
|
||||
struct dlb_dev *dlb_dev;
|
||||
|
||||
dlb_dev = container_of(hw, struct dlb_dev, hw);
|
||||
|
||||
return dlb_dev->worker_launched;
|
||||
}
|
||||
|
||||
/**
|
||||
* os_notify_user_space() - notify user space
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: ID of domain to notify.
|
||||
* @alert_id: alert ID.
|
||||
* @aux_alert_data: additional alert data.
|
||||
*
|
||||
* This function notifies user space of an alert (such as a remote queue
|
||||
* unregister or hardware alarm).
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, <0 otherwise.
|
||||
*/
|
||||
static inline int os_notify_user_space(struct dlb_hw *hw,
|
||||
u32 domain_id,
|
||||
u64 alert_id,
|
||||
u64 aux_alert_data)
|
||||
{
|
||||
RTE_SET_USED(hw);
|
||||
RTE_SET_USED(domain_id);
|
||||
RTE_SET_USED(alert_id);
|
||||
RTE_SET_USED(aux_alert_data);
|
||||
|
||||
/* Not called for PF PMD */
|
||||
return -1;
|
||||
}
|
||||
|
||||
enum dlb_dev_revision {
|
||||
DLB_A0,
|
||||
DLB_A1,
|
||||
DLB_A2,
|
||||
DLB_A3,
|
||||
DLB_B0,
|
||||
};
|
||||
|
||||
/**
|
||||
* os_get_dev_revision() - query the device_revision
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*/
|
||||
static inline enum dlb_dev_revision os_get_dev_revision(struct dlb_hw *hw)
|
||||
{
|
||||
uint32_t a, b, c, d, stepping;
|
||||
|
||||
RTE_SET_USED(hw);
|
||||
|
||||
__cpuid(0x1, a, b, c, d);
|
||||
|
||||
stepping = a & 0xf;
|
||||
|
||||
switch (stepping) {
|
||||
case 0:
|
||||
return DLB_A0;
|
||||
case 1:
|
||||
return DLB_A1;
|
||||
case 2:
|
||||
return DLB_A2;
|
||||
case 3:
|
||||
return DLB_A3;
|
||||
default:
|
||||
/* Treat all revisions >= 4 as B0 */
|
||||
return DLB_B0;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* __DLB_OSDEP_H__ */
|
@ -1,441 +0,0 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __DLB_OSDEP_BITMAP_H__
|
||||
#define __DLB_OSDEP_BITMAP_H__
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <rte_bitmap.h>
|
||||
#include <rte_string_fns.h>
|
||||
#include <rte_malloc.h>
|
||||
#include <rte_errno.h>
|
||||
#include "../dlb_main.h"
|
||||
|
||||
/*************************/
|
||||
/*** Bitmap operations ***/
|
||||
/*************************/
|
||||
struct dlb_bitmap {
|
||||
struct rte_bitmap *map;
|
||||
unsigned int len;
|
||||
struct dlb_hw *hw;
|
||||
};
|
||||
|
||||
/**
|
||||
* dlb_bitmap_alloc() - alloc a bitmap data structure
|
||||
* @bitmap: pointer to dlb_bitmap structure pointer.
|
||||
* @len: number of entries in the bitmap.
|
||||
*
|
||||
* This function allocates a bitmap and initializes it with length @len. All
|
||||
* entries are initially zero.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - bitmap is NULL or len is 0.
|
||||
* ENOMEM - could not allocate memory for the bitmap data structure.
|
||||
*/
|
||||
static inline int dlb_bitmap_alloc(struct dlb_hw *hw,
|
||||
struct dlb_bitmap **bitmap,
|
||||
unsigned int len)
|
||||
{
|
||||
struct dlb_bitmap *bm;
|
||||
void *mem;
|
||||
uint32_t alloc_size;
|
||||
uint32_t nbits = (uint32_t) len;
|
||||
RTE_SET_USED(hw);
|
||||
|
||||
if (bitmap == NULL || nbits == 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Allocate DLB bitmap control struct */
|
||||
bm = rte_malloc("DLB_PF",
|
||||
sizeof(struct dlb_bitmap),
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
|
||||
if (bm == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Allocate bitmap memory */
|
||||
alloc_size = rte_bitmap_get_memory_footprint(nbits);
|
||||
mem = rte_malloc("DLB_PF_BITMAP", alloc_size, RTE_CACHE_LINE_SIZE);
|
||||
if (mem == NULL) {
|
||||
rte_free(bm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bm->map = rte_bitmap_init(len, mem, alloc_size);
|
||||
if (bm->map == NULL) {
|
||||
rte_free(mem);
|
||||
rte_free(bm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bm->len = len;
|
||||
|
||||
*bitmap = bm;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb_bitmap_free() - free a previously allocated bitmap data structure
|
||||
* @bitmap: pointer to dlb_bitmap structure.
|
||||
*
|
||||
* This function frees a bitmap that was allocated with dlb_bitmap_alloc().
|
||||
*/
|
||||
static inline void dlb_bitmap_free(struct dlb_bitmap *bitmap)
|
||||
{
|
||||
if (bitmap == NULL)
|
||||
return;
|
||||
|
||||
rte_free(bitmap->map);
|
||||
rte_free(bitmap);
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb_bitmap_fill() - fill a bitmap with all 1s
|
||||
* @bitmap: pointer to dlb_bitmap structure.
|
||||
*
|
||||
* This function sets all bitmap values to 1.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - bitmap is NULL or is uninitialized.
|
||||
*/
|
||||
static inline int dlb_bitmap_fill(struct dlb_bitmap *bitmap)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (bitmap == NULL || bitmap->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i != bitmap->len; i++)
|
||||
rte_bitmap_set(bitmap->map, i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb_bitmap_zero() - fill a bitmap with all 0s
|
||||
* @bitmap: pointer to dlb_bitmap structure.
|
||||
*
|
||||
* This function sets all bitmap values to 0.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - bitmap is NULL or is uninitialized.
|
||||
*/
|
||||
static inline int dlb_bitmap_zero(struct dlb_bitmap *bitmap)
|
||||
{
|
||||
if (bitmap == NULL || bitmap->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
rte_bitmap_reset(bitmap->map);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb_bitmap_set() - set a bitmap entry
|
||||
* @bitmap: pointer to dlb_bitmap structure.
|
||||
* @bit: bit index.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - bitmap is NULL or is uninitialized, or bit is larger than the
|
||||
* bitmap length.
|
||||
*/
|
||||
static inline int dlb_bitmap_set(struct dlb_bitmap *bitmap,
|
||||
unsigned int bit)
|
||||
{
|
||||
if (bitmap == NULL || bitmap->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (bitmap->len <= bit)
|
||||
return -EINVAL;
|
||||
|
||||
rte_bitmap_set(bitmap->map, bit);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb_bitmap_set_range() - set a range of bitmap entries
|
||||
* @bitmap: pointer to dlb_bitmap structure.
|
||||
* @bit: starting bit index.
|
||||
* @len: length of the range.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - bitmap is NULL or is uninitialized, or the range exceeds the bitmap
|
||||
* length.
|
||||
*/
|
||||
static inline int dlb_bitmap_set_range(struct dlb_bitmap *bitmap,
|
||||
unsigned int bit,
|
||||
unsigned int len)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (bitmap == NULL || bitmap->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (bitmap->len <= bit)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i != len; i++)
|
||||
rte_bitmap_set(bitmap->map, bit + i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb_bitmap_clear() - clear a bitmap entry
|
||||
* @bitmap: pointer to dlb_bitmap structure.
|
||||
* @bit: bit index.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - bitmap is NULL or is uninitialized, or bit is larger than the
|
||||
* bitmap length.
|
||||
*/
|
||||
static inline int dlb_bitmap_clear(struct dlb_bitmap *bitmap,
|
||||
unsigned int bit)
|
||||
{
|
||||
if (bitmap == NULL || bitmap->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (bitmap->len <= bit)
|
||||
return -EINVAL;
|
||||
|
||||
rte_bitmap_clear(bitmap->map, bit);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb_bitmap_clear_range() - clear a range of bitmap entries
|
||||
* @bitmap: pointer to dlb_bitmap structure.
|
||||
* @bit: starting bit index.
|
||||
* @len: length of the range.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - bitmap is NULL or is uninitialized, or the range exceeds the bitmap
|
||||
* length.
|
||||
*/
|
||||
static inline int dlb_bitmap_clear_range(struct dlb_bitmap *bitmap,
|
||||
unsigned int bit,
|
||||
unsigned int len)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (bitmap == NULL || bitmap->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (bitmap->len <= bit)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i != len; i++)
|
||||
rte_bitmap_clear(bitmap->map, bit + i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb_bitmap_find_set_bit_range() - find a range of set bits
|
||||
* @bitmap: pointer to dlb_bitmap structure.
|
||||
* @len: length of the range.
|
||||
*
|
||||
* This function looks for a range of set bits of length @len.
|
||||
*
|
||||
* Return:
|
||||
* Returns the base bit index upon success, < 0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* ENOENT - unable to find a length *len* range of set bits.
|
||||
* EINVAL - bitmap is NULL or is uninitialized, or len is invalid.
|
||||
*/
|
||||
static inline int dlb_bitmap_find_set_bit_range(struct dlb_bitmap *bitmap,
|
||||
unsigned int len)
|
||||
{
|
||||
unsigned int i, j = 0;
|
||||
|
||||
if (bitmap == NULL || bitmap->map == NULL || len == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (bitmap->len < len)
|
||||
return -ENOENT;
|
||||
|
||||
for (i = 0; i != bitmap->len; i++) {
|
||||
if (rte_bitmap_get(bitmap->map, i)) {
|
||||
if (++j == len)
|
||||
return i - j + 1;
|
||||
} else
|
||||
j = 0;
|
||||
}
|
||||
|
||||
/* No set bit range of length len? */
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb_bitmap_find_set_bit() - find the first set bit
|
||||
* @bitmap: pointer to dlb_bitmap structure.
|
||||
*
|
||||
* This function looks for a single set bit.
|
||||
*
|
||||
* Return:
|
||||
* Returns the base bit index upon success, < 0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* ENOENT - the bitmap contains no set bits.
|
||||
* EINVAL - bitmap is NULL or is uninitialized, or len is invalid.
|
||||
*/
|
||||
static inline int dlb_bitmap_find_set_bit(struct dlb_bitmap *bitmap)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (bitmap == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (bitmap->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i != bitmap->len; i++) {
|
||||
if (rte_bitmap_get(bitmap->map, i))
|
||||
return i;
|
||||
}
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb_bitmap_count() - returns the number of set bits
|
||||
* @bitmap: pointer to dlb_bitmap structure.
|
||||
*
|
||||
* This function looks for a single set bit.
|
||||
*
|
||||
* Return:
|
||||
* Returns the number of set bits upon success, <0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - bitmap is NULL or is uninitialized.
|
||||
*/
|
||||
static inline int dlb_bitmap_count(struct dlb_bitmap *bitmap)
|
||||
{
|
||||
int weight = 0;
|
||||
unsigned int i;
|
||||
|
||||
if (bitmap == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (bitmap->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i != bitmap->len; i++) {
|
||||
if (rte_bitmap_get(bitmap->map, i))
|
||||
weight++;
|
||||
}
|
||||
return weight;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb_bitmap_longest_set_range() - returns longest contiguous range of set bits
|
||||
* @bitmap: pointer to dlb_bitmap structure.
|
||||
*
|
||||
* Return:
|
||||
* Returns the bitmap's longest contiguous range of set bits upon success,
|
||||
* <0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - bitmap is NULL or is uninitialized.
|
||||
*/
|
||||
static inline int dlb_bitmap_longest_set_range(struct dlb_bitmap *bitmap)
|
||||
{
|
||||
int max_len = 0, len = 0;
|
||||
unsigned int i;
|
||||
|
||||
if (bitmap == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (bitmap->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i != bitmap->len; i++) {
|
||||
if (rte_bitmap_get(bitmap->map, i)) {
|
||||
len++;
|
||||
} else {
|
||||
if (len > max_len)
|
||||
max_len = len;
|
||||
len = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (len > max_len)
|
||||
max_len = len;
|
||||
|
||||
return max_len;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb_bitmap_or() - store the logical 'or' of two bitmaps into a third
|
||||
* @dest: pointer to dlb_bitmap structure, which will contain the results of
|
||||
* the 'or' of src1 and src2.
|
||||
* @src1: pointer to dlb_bitmap structure, will be 'or'ed with src2.
|
||||
* @src2: pointer to dlb_bitmap structure, will be 'or'ed with src1.
|
||||
*
|
||||
* This function 'or's two bitmaps together and stores the result in a third
|
||||
* bitmap. The source and destination bitmaps can be the same.
|
||||
*
|
||||
* Return:
|
||||
* Returns the number of set bits upon success, <0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - One of the bitmaps is NULL or is uninitialized.
|
||||
*/
|
||||
static inline int dlb_bitmap_or(struct dlb_bitmap *dest,
|
||||
struct dlb_bitmap *src1,
|
||||
struct dlb_bitmap *src2)
|
||||
{
|
||||
unsigned int i, min;
|
||||
int numset = 0;
|
||||
|
||||
if (dest == NULL || dest->map == NULL ||
|
||||
src1 == NULL || src1->map == NULL ||
|
||||
src2 == NULL || src2->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
min = dest->len;
|
||||
min = (min > src1->len) ? src1->len : min;
|
||||
min = (min > src2->len) ? src2->len : min;
|
||||
|
||||
for (i = 0; i != min; i++) {
|
||||
if (rte_bitmap_get(src1->map, i) ||
|
||||
rte_bitmap_get(src2->map, i)) {
|
||||
rte_bitmap_set(dest->map, i);
|
||||
numset++;
|
||||
} else
|
||||
rte_bitmap_clear(dest->map, i);
|
||||
}
|
||||
|
||||
return numset;
|
||||
}
|
||||
|
||||
#endif /* __DLB_OSDEP_BITMAP_H__ */
|
@ -1,131 +0,0 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __DLB_OSDEP_LIST_H__
|
||||
#define __DLB_OSDEP_LIST_H__
|
||||
|
||||
#include <rte_tailq.h>
|
||||
|
||||
struct dlb_list_entry {
|
||||
TAILQ_ENTRY(dlb_list_entry) node;
|
||||
};
|
||||
|
||||
/* Dummy - just a struct definition */
|
||||
TAILQ_HEAD(dlb_list_head, dlb_list_entry);
|
||||
|
||||
/* =================
|
||||
* TAILQ Supplements
|
||||
* =================
|
||||
*/
|
||||
|
||||
#ifndef TAILQ_FOREACH_ENTRY
|
||||
#define TAILQ_FOREACH_ENTRY(ptr, head, name, iter) \
|
||||
for ((iter) = TAILQ_FIRST(&head); \
|
||||
(iter) \
|
||||
&& (ptr = container_of(iter, typeof(*(ptr)), name)); \
|
||||
(iter) = TAILQ_NEXT((iter), node))
|
||||
#endif
|
||||
|
||||
#ifndef TAILQ_FOREACH_ENTRY_SAFE
|
||||
#define TAILQ_FOREACH_ENTRY_SAFE(ptr, head, name, iter, tvar) \
|
||||
for ((iter) = TAILQ_FIRST(&head); \
|
||||
(iter) && \
|
||||
(ptr = container_of(iter, typeof(*(ptr)), name)) &&\
|
||||
((tvar) = TAILQ_NEXT((iter), node), 1); \
|
||||
(iter) = (tvar))
|
||||
#endif
|
||||
|
||||
/* =========
|
||||
* DLB Lists
|
||||
* =========
|
||||
*/
|
||||
|
||||
/**
|
||||
* dlb_list_init_head() - initialize the head of a list
|
||||
* @head: list head
|
||||
*/
|
||||
static inline void dlb_list_init_head(struct dlb_list_head *head)
|
||||
{
|
||||
TAILQ_INIT(head);
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb_list_add() - add an entry to a list
|
||||
* @head: new entry will be added after this list header
|
||||
* @entry: new list entry to be added
|
||||
*/
|
||||
static inline void dlb_list_add(struct dlb_list_head *head,
|
||||
struct dlb_list_entry *entry)
|
||||
{
|
||||
TAILQ_INSERT_TAIL(head, entry, node);
|
||||
}
|
||||
|
||||
/**
|
||||
* @head: list head
|
||||
* @entry: list entry to be deleted
|
||||
*/
|
||||
static inline void dlb_list_del(struct dlb_list_head *head,
|
||||
struct dlb_list_entry *entry)
|
||||
{
|
||||
TAILQ_REMOVE(head, entry, node);
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb_list_empty() - check if a list is empty
|
||||
* @head: list head
|
||||
*
|
||||
* Return:
|
||||
* Returns 1 if empty, 0 if not.
|
||||
*/
|
||||
static inline bool dlb_list_empty(struct dlb_list_head *head)
|
||||
{
|
||||
return TAILQ_EMPTY(head);
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb_list_empty() - check if a list is empty
|
||||
* @src_head: list to be added
|
||||
* @ head: where src_head will be inserted
|
||||
*/
|
||||
static inline void dlb_list_splice(struct dlb_list_head *src_head,
|
||||
struct dlb_list_head *head)
|
||||
{
|
||||
TAILQ_CONCAT(head, src_head, node);
|
||||
}
|
||||
|
||||
/**
|
||||
* DLB_LIST_HEAD() - retrieve the head of the list
|
||||
* @head: list head
|
||||
* @type: type of the list variable
|
||||
* @name: name of the dlb_list within the struct
|
||||
*/
|
||||
#define DLB_LIST_HEAD(head, type, name) \
|
||||
(TAILQ_FIRST(&head) ? \
|
||||
container_of(TAILQ_FIRST(&head), type, name) : \
|
||||
NULL)
|
||||
|
||||
/**
|
||||
* DLB_LIST_FOR_EACH() - iterate over a list
|
||||
* @head: list head
|
||||
* @ptr: pointer to struct containing a struct dlb_list_entry
|
||||
* @name: name of the dlb_list_entry field within the containing struct
|
||||
* @iter: iterator variable
|
||||
*/
|
||||
#define DLB_LIST_FOR_EACH(head, ptr, name, tmp_iter) \
|
||||
TAILQ_FOREACH_ENTRY(ptr, head, name, tmp_iter)
|
||||
|
||||
/**
|
||||
* DLB_LIST_FOR_EACH_SAFE() - iterate over a list. This loop works even if
|
||||
* an element is removed from the list while processing it.
|
||||
* @ptr: pointer to struct containing a struct dlb_list_entry
|
||||
* @ptr_tmp: pointer to struct containing a struct dlb_list_entry (temporary)
|
||||
* @head: list head
|
||||
* @name: name of the dlb_list_entry field within the containing struct
|
||||
* @iter: iterator variable
|
||||
* @iter_tmp: iterator variable (temporary)
|
||||
*/
|
||||
#define DLB_LIST_FOR_EACH_SAFE(head, ptr, ptr_tmp, name, tmp_iter, saf_iter) \
|
||||
TAILQ_FOREACH_ENTRY_SAFE(ptr, head, name, tmp_iter, saf_iter)
|
||||
|
||||
#endif /* __DLB_OSDEP_LIST_H__ */
|
@ -1,31 +0,0 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __DLB_OSDEP_TYPES_H
|
||||
#define __DLB_OSDEP_TYPES_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <ctype.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
|
||||
/* Types for user mode PF PMD */
|
||||
typedef uint8_t u8;
|
||||
typedef int8_t s8;
|
||||
typedef uint16_t u16;
|
||||
typedef int16_t s16;
|
||||
typedef uint32_t u32;
|
||||
typedef int32_t s32;
|
||||
typedef uint64_t u64;
|
||||
|
||||
#define __iomem
|
||||
|
||||
/* END types for user mode PF PMD */
|
||||
|
||||
#endif /* __DLB_OSDEP_TYPES_H */
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,876 +0,0 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __DLB_RESOURCE_H
|
||||
#define __DLB_RESOURCE_H
|
||||
|
||||
#include "dlb_hw_types.h"
|
||||
#include "dlb_osdep_types.h"
|
||||
|
||||
/**
|
||||
* dlb_resource_init() - initialize the device
|
||||
* @hw: pointer to struct dlb_hw.
|
||||
*
|
||||
* This function initializes the device's software state (pointed to by the hw
|
||||
* argument) and programs global scheduling QoS registers. This function should
|
||||
* be called during driver initialization.
|
||||
*
|
||||
* The dlb_hw struct must be unique per DLB device and persist until the device
|
||||
* is reset.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, -1 otherwise.
|
||||
*/
|
||||
int dlb_resource_init(struct dlb_hw *hw);
|
||||
|
||||
/**
|
||||
* dlb_resource_free() - free device state memory
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*
|
||||
* This function frees software state pointed to by dlb_hw. This function
|
||||
* should be called when resetting the device or unloading the driver.
|
||||
*/
|
||||
void dlb_resource_free(struct dlb_hw *hw);
|
||||
|
||||
/**
|
||||
* dlb_resource_reset() - reset in-use resources to their initial state
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*
|
||||
* This function resets in-use resources, and makes them available for use.
|
||||
*/
|
||||
void dlb_resource_reset(struct dlb_hw *hw);
|
||||
|
||||
/**
|
||||
* dlb_hw_create_sched_domain() - create a scheduling domain
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @args: scheduling domain creation arguments.
|
||||
* @resp: response structure.
|
||||
*
|
||||
* This function creates a scheduling domain containing the resources specified
|
||||
* in args. The individual resources (queues, ports, credit pools) can be
|
||||
* configured after creating a scheduling domain.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
|
||||
* assigned a detailed error code from enum dlb_error. If successful, resp->id
|
||||
* contains the domain ID.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - A requested resource is unavailable, or the requested domain name
|
||||
* is already in use.
|
||||
* EFAULT - Internal error (resp->status not set).
|
||||
*/
|
||||
int dlb_hw_create_sched_domain(struct dlb_hw *hw,
|
||||
struct dlb_create_sched_domain_args *args,
|
||||
struct dlb_cmd_response *resp);
|
||||
|
||||
/**
|
||||
* dlb_hw_create_ldb_pool() - create a load-balanced credit pool
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: domain ID.
|
||||
* @args: credit pool creation arguments.
|
||||
* @resp: response structure.
|
||||
*
|
||||
* This function creates a load-balanced credit pool containing the number of
|
||||
* requested credits.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
|
||||
* assigned a detailed error code from enum dlb_error. If successful, resp->id
|
||||
* contains the pool ID.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - A requested resource is unavailable, the domain is not configured,
|
||||
* or the domain has already been started.
|
||||
* EFAULT - Internal error (resp->status not set).
|
||||
*/
|
||||
int dlb_hw_create_ldb_pool(struct dlb_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb_create_ldb_pool_args *args,
|
||||
struct dlb_cmd_response *resp);
|
||||
|
||||
/**
|
||||
* dlb_hw_create_dir_pool() - create a directed credit pool
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: domain ID.
|
||||
* @args: credit pool creation arguments.
|
||||
* @resp: response structure.
|
||||
*
|
||||
* This function creates a directed credit pool containing the number of
|
||||
* requested credits.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
|
||||
* assigned a detailed error code from enum dlb_error. If successful, resp->id
|
||||
* contains the pool ID.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - A requested resource is unavailable, the domain is not configured,
|
||||
* or the domain has already been started.
|
||||
* EFAULT - Internal error (resp->status not set).
|
||||
*/
|
||||
int dlb_hw_create_dir_pool(struct dlb_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb_create_dir_pool_args *args,
|
||||
struct dlb_cmd_response *resp);
|
||||
|
||||
/**
|
||||
* dlb_hw_create_ldb_queue() - create a load-balanced queue
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: domain ID.
|
||||
* @args: queue creation arguments.
|
||||
* @resp: response structure.
|
||||
*
|
||||
* This function creates a load-balanced queue.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
|
||||
* assigned a detailed error code from enum dlb_error. If successful, resp->id
|
||||
* contains the queue ID.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - A requested resource is unavailable, the domain is not configured,
|
||||
* the domain has already been started, or the requested queue name is
|
||||
* already in use.
|
||||
* EFAULT - Internal error (resp->status not set).
|
||||
*/
|
||||
int dlb_hw_create_ldb_queue(struct dlb_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb_create_ldb_queue_args *args,
|
||||
struct dlb_cmd_response *resp);
|
||||
|
||||
/**
|
||||
* dlb_hw_create_dir_queue() - create a directed queue
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: domain ID.
|
||||
* @args: queue creation arguments.
|
||||
* @resp: response structure.
|
||||
*
|
||||
* This function creates a directed queue.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
|
||||
* assigned a detailed error code from enum dlb_error. If successful, resp->id
|
||||
* contains the queue ID.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - A requested resource is unavailable, the domain is not configured,
|
||||
* or the domain has already been started.
|
||||
* EFAULT - Internal error (resp->status not set).
|
||||
*/
|
||||
int dlb_hw_create_dir_queue(struct dlb_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb_create_dir_queue_args *args,
|
||||
struct dlb_cmd_response *resp);
|
||||
|
||||
/**
|
||||
* dlb_hw_create_dir_port() - create a directed port
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: domain ID.
|
||||
* @args: port creation arguments.
|
||||
* @pop_count_dma_base: base address of the pop count memory. This can be
|
||||
* a PA or an IOVA.
|
||||
* @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
|
||||
* @resp: response structure.
|
||||
*
|
||||
* This function creates a directed port.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
|
||||
* assigned a detailed error code from enum dlb_error. If successful, resp->id
|
||||
* contains the port ID.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - A requested resource is unavailable, a credit setting is invalid, a
|
||||
* pool ID is invalid, a pointer address is not properly aligned, the
|
||||
* domain is not configured, or the domain has already been started.
|
||||
* EFAULT - Internal error (resp->status not set).
|
||||
*/
|
||||
int dlb_hw_create_dir_port(struct dlb_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb_create_dir_port_args *args,
|
||||
u64 pop_count_dma_base,
|
||||
u64 cq_dma_base,
|
||||
struct dlb_cmd_response *resp);
|
||||
|
||||
/**
|
||||
* dlb_hw_create_ldb_port() - create a load-balanced port
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: domain ID.
|
||||
* @args: port creation arguments.
|
||||
* @pop_count_dma_base: base address of the pop count memory. This can be
|
||||
* a PA or an IOVA.
|
||||
* @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
|
||||
* @resp: response structure.
|
||||
*
|
||||
* This function creates a load-balanced port.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
|
||||
* assigned a detailed error code from enum dlb_error. If successful, resp->id
|
||||
* contains the port ID.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - A requested resource is unavailable, a credit setting is invalid, a
|
||||
* pool ID is invalid, a pointer address is not properly aligned, the
|
||||
* domain is not configured, or the domain has already been started.
|
||||
* EFAULT - Internal error (resp->status not set).
|
||||
*/
|
||||
int dlb_hw_create_ldb_port(struct dlb_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb_create_ldb_port_args *args,
|
||||
u64 pop_count_dma_base,
|
||||
u64 cq_dma_base,
|
||||
struct dlb_cmd_response *resp);
|
||||
|
||||
/**
|
||||
* dlb_hw_start_domain() - start a scheduling domain
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: domain ID.
|
||||
* @args: start domain arguments.
|
||||
* @resp: response structure.
|
||||
*
|
||||
* This function starts a scheduling domain, which allows applications to send
|
||||
* traffic through it. Once a domain is started, its resources can no longer be
|
||||
* configured (besides QID remapping and port enable/disable).
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
|
||||
* assigned a detailed error code from enum dlb_error.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - the domain is not configured, or the domain is already started.
|
||||
*/
|
||||
int dlb_hw_start_domain(struct dlb_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb_start_domain_args *args,
|
||||
struct dlb_cmd_response *resp);
|
||||
|
||||
/**
|
||||
* dlb_hw_map_qid() - map a load-balanced queue to a load-balanced port
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: domain ID.
|
||||
* @args: map QID arguments.
|
||||
* @resp: response structure.
|
||||
*
|
||||
* This function configures the DLB to schedule QEs from the specified queue to
|
||||
* the specified port. Each load-balanced port can be mapped to up to 8 queues;
|
||||
* each load-balanced queue can potentially map to all the load-balanced ports.
|
||||
*
|
||||
* A successful return does not necessarily mean the mapping was configured. If
|
||||
* this function is unable to immediately map the queue to the port, it will
|
||||
* add the requested operation to a per-port list of pending map/unmap
|
||||
* operations, and (if it's not already running) launch a kernel thread that
|
||||
* periodically attempts to process all pending operations. In a sense, this is
|
||||
* an asynchronous function.
|
||||
*
|
||||
* This asynchronicity creates two views of the state of hardware: the actual
|
||||
* hardware state and the requested state (as if every request completed
|
||||
* immediately). If there are any pending map/unmap operations, the requested
|
||||
* state will differ from the actual state. All validation is performed with
|
||||
* respect to the pending state; for instance, if there are 8 pending map
|
||||
* operations for port X, a request for a 9th will fail because a load-balanced
|
||||
* port can only map up to 8 queues.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
|
||||
* assigned a detailed error code from enum dlb_error.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - A requested resource is unavailable, invalid port or queue ID, or
|
||||
* the domain is not configured.
|
||||
* EFAULT - Internal error (resp->status not set).
|
||||
*/
|
||||
int dlb_hw_map_qid(struct dlb_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb_map_qid_args *args,
|
||||
struct dlb_cmd_response *resp);
|
||||
|
||||
/**
|
||||
* dlb_hw_unmap_qid() - Unmap a load-balanced queue from a load-balanced port
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: domain ID.
|
||||
* @args: unmap QID arguments.
|
||||
* @resp: response structure.
|
||||
*
|
||||
* This function configures the DLB to stop scheduling QEs from the specified
|
||||
* queue to the specified port.
|
||||
*
|
||||
* A successful return does not necessarily mean the mapping was removed. If
|
||||
* this function is unable to immediately unmap the queue from the port, it
|
||||
* will add the requested operation to a per-port list of pending map/unmap
|
||||
* operations, and (if it's not already running) launch a kernel thread that
|
||||
* periodically attempts to process all pending operations. See
|
||||
* dlb_hw_map_qid() for more details.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
|
||||
* assigned a detailed error code from enum dlb_error.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - A requested resource is unavailable, invalid port or queue ID, or
|
||||
* the domain is not configured.
|
||||
* EFAULT - Internal error (resp->status not set).
|
||||
*/
|
||||
int dlb_hw_unmap_qid(struct dlb_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb_unmap_qid_args *args,
|
||||
struct dlb_cmd_response *resp);
|
||||
|
||||
/**
|
||||
* dlb_finish_unmap_qid_procedures() - finish any pending unmap procedures
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*
|
||||
* This function attempts to finish any outstanding unmap procedures.
|
||||
* This function should be called by the kernel thread responsible for
|
||||
* finishing map/unmap procedures.
|
||||
*
|
||||
* Return:
|
||||
* Returns the number of procedures that weren't completed.
|
||||
*/
|
||||
unsigned int dlb_finish_unmap_qid_procedures(struct dlb_hw *hw);
|
||||
|
||||
/**
|
||||
* dlb_finish_map_qid_procedures() - finish any pending map procedures
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*
|
||||
* This function attempts to finish any outstanding map procedures.
|
||||
* This function should be called by the kernel thread responsible for
|
||||
* finishing map/unmap procedures.
|
||||
*
|
||||
* Return:
|
||||
* Returns the number of procedures that weren't completed.
|
||||
*/
|
||||
unsigned int dlb_finish_map_qid_procedures(struct dlb_hw *hw);
|
||||
|
||||
/**
|
||||
* dlb_hw_enable_ldb_port() - enable a load-balanced port for scheduling
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: domain ID.
|
||||
* @args: port enable arguments.
|
||||
* @resp: response structure.
|
||||
*
|
||||
* This function configures the DLB to schedule QEs to a load-balanced port.
|
||||
* Ports are enabled by default.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
|
||||
* assigned a detailed error code from enum dlb_error.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - The port ID is invalid or the domain is not configured.
|
||||
* EFAULT - Internal error (resp->status not set).
|
||||
*/
|
||||
int dlb_hw_enable_ldb_port(struct dlb_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb_enable_ldb_port_args *args,
|
||||
struct dlb_cmd_response *resp);
|
||||
|
||||
/**
|
||||
* dlb_hw_disable_ldb_port() - disable a load-balanced port for scheduling
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: domain ID.
|
||||
* @args: port disable arguments.
|
||||
* @resp: response structure.
|
||||
*
|
||||
* This function configures the DLB to stop scheduling QEs to a load-balanced
|
||||
* port. Ports are enabled by default.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
|
||||
* assigned a detailed error code from enum dlb_error.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - The port ID is invalid or the domain is not configured.
|
||||
* EFAULT - Internal error (resp->status not set).
|
||||
*/
|
||||
int dlb_hw_disable_ldb_port(struct dlb_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb_disable_ldb_port_args *args,
|
||||
struct dlb_cmd_response *resp);
|
||||
|
||||
/**
|
||||
* dlb_hw_enable_dir_port() - enable a directed port for scheduling
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: domain ID.
|
||||
* @args: port enable arguments.
|
||||
* @resp: response structure.
|
||||
*
|
||||
* This function configures the DLB to schedule QEs to a directed port.
|
||||
* Ports are enabled by default.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
|
||||
* assigned a detailed error code from enum dlb_error.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - The port ID is invalid or the domain is not configured.
|
||||
* EFAULT - Internal error (resp->status not set).
|
||||
*/
|
||||
int dlb_hw_enable_dir_port(struct dlb_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb_enable_dir_port_args *args,
|
||||
struct dlb_cmd_response *resp);
|
||||
|
||||
/**
|
||||
* dlb_hw_disable_dir_port() - disable a directed port for scheduling
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: domain ID.
|
||||
* @args: port disable arguments.
|
||||
* @resp: response structure.
|
||||
*
|
||||
* This function configures the DLB to stop scheduling QEs to a directed port.
|
||||
* Ports are enabled by default.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
|
||||
* assigned a detailed error code from enum dlb_error.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - The port ID is invalid or the domain is not configured.
|
||||
* EFAULT - Internal error (resp->status not set).
|
||||
*/
|
||||
int dlb_hw_disable_dir_port(struct dlb_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb_disable_dir_port_args *args,
|
||||
struct dlb_cmd_response *resp);
|
||||
|
||||
/**
|
||||
* dlb_configure_ldb_cq_interrupt() - configure load-balanced CQ for interrupts
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @port_id: load-balancd port ID.
|
||||
* @vector: interrupt vector ID. Should be 0 for MSI or compressed MSI-X mode,
|
||||
* else a value up to 64.
|
||||
* @mode: interrupt type (DLB_CQ_ISR_MODE_MSI or DLB_CQ_ISR_MODE_MSIX)
|
||||
* @threshold: the minimum CQ depth at which the interrupt can fire. Must be
|
||||
* greater than 0.
|
||||
*
|
||||
* This function configures the DLB registers for load-balanced CQ's interrupts.
|
||||
* This doesn't enable the CQ's interrupt; that can be done with
|
||||
* dlb_arm_cq_interrupt() or through an interrupt arm QE.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - The port ID is invalid.
|
||||
*/
|
||||
int dlb_configure_ldb_cq_interrupt(struct dlb_hw *hw,
|
||||
int port_id,
|
||||
int vector,
|
||||
int mode,
|
||||
u16 threshold);
|
||||
|
||||
/**
|
||||
* dlb_configure_dir_cq_interrupt() - configure directed CQ for interrupts
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @port_id: load-balancd port ID.
|
||||
* @vector: interrupt vector ID. Should be 0 for MSI or compressed MSI-X mode,
|
||||
* else a value up to 64.
|
||||
* @mode: interrupt type (DLB_CQ_ISR_MODE_MSI or DLB_CQ_ISR_MODE_MSIX)
|
||||
* @threshold: the minimum CQ depth at which the interrupt can fire. Must be
|
||||
* greater than 0.
|
||||
*
|
||||
* This function configures the DLB registers for directed CQ's interrupts.
|
||||
* This doesn't enable the CQ's interrupt; that can be done with
|
||||
* dlb_arm_cq_interrupt() or through an interrupt arm QE.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - The port ID is invalid.
|
||||
*/
|
||||
int dlb_configure_dir_cq_interrupt(struct dlb_hw *hw,
|
||||
int port_id,
|
||||
int vector,
|
||||
int mode,
|
||||
u16 threshold);
|
||||
|
||||
/**
|
||||
* dlb_enable_alarm_interrupts() - enable certain hardware alarm interrupts
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*
|
||||
* This function configures the ingress error alarm. (Other alarms are enabled
|
||||
* by default.)
|
||||
*/
|
||||
void dlb_enable_alarm_interrupts(struct dlb_hw *hw);
|
||||
|
||||
/**
|
||||
* dlb_disable_alarm_interrupts() - disable certain hardware alarm interrupts
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*
|
||||
* This function configures the ingress error alarm. (Other alarms are disabled
|
||||
* by default.)
|
||||
*/
|
||||
void dlb_disable_alarm_interrupts(struct dlb_hw *hw);
|
||||
|
||||
/**
|
||||
* dlb_set_msix_mode() - enable certain hardware alarm interrupts
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @mode: MSI-X mode (DLB_MSIX_MODE_PACKED or DLB_MSIX_MODE_COMPRESSED)
|
||||
*
|
||||
* This function configures the hardware to use either packed or compressed
|
||||
* mode. This function should not be called if using MSI interrupts.
|
||||
*/
|
||||
void dlb_set_msix_mode(struct dlb_hw *hw, int mode);
|
||||
|
||||
/**
|
||||
* dlb_arm_cq_interrupt() - arm a CQ's interrupt
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @port_id: port ID
|
||||
* @is_ldb: true for load-balanced port, false for a directed port
|
||||
*
|
||||
* This function arms the CQ's interrupt. The CQ must be configured prior to
|
||||
* calling this function.
|
||||
*
|
||||
* The function does no parameter validation; that is the caller's
|
||||
* responsibility.
|
||||
*
|
||||
* Return: returns 0 upon success, <0 otherwise.
|
||||
*
|
||||
* EINVAL - Invalid port ID.
|
||||
*/
|
||||
int dlb_arm_cq_interrupt(struct dlb_hw *hw, int port_id, bool is_ldb);
|
||||
|
||||
/**
|
||||
* dlb_read_compressed_cq_intr_status() - read compressed CQ interrupt status
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @ldb_interrupts: 2-entry array of u32 bitmaps
|
||||
* @dir_interrupts: 4-entry array of u32 bitmaps
|
||||
*
|
||||
* This function can be called from a compressed CQ interrupt handler to
|
||||
* determine which CQ interrupts have fired. The caller should take appropriate
|
||||
* (such as waking threads blocked on a CQ's interrupt) then ack the interrupts
|
||||
* with dlb_ack_compressed_cq_intr().
|
||||
*/
|
||||
void dlb_read_compressed_cq_intr_status(struct dlb_hw *hw,
|
||||
u32 *ldb_interrupts,
|
||||
u32 *dir_interrupts);
|
||||
|
||||
/**
|
||||
* dlb_ack_compressed_cq_intr_status() - ack compressed CQ interrupts
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @ldb_interrupts: 2-entry array of u32 bitmaps
|
||||
* @dir_interrupts: 4-entry array of u32 bitmaps
|
||||
*
|
||||
* This function ACKs compressed CQ interrupts. Its arguments should be the
|
||||
* same ones passed to dlb_read_compressed_cq_intr_status().
|
||||
*/
|
||||
void dlb_ack_compressed_cq_intr(struct dlb_hw *hw,
|
||||
u32 *ldb_interrupts,
|
||||
u32 *dir_interrupts);
|
||||
|
||||
/**
|
||||
* dlb_process_alarm_interrupt() - process an alarm interrupt
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*
|
||||
* This function reads the alarm syndrome, logs its, and acks the interrupt.
|
||||
* This function should be called from the alarm interrupt handler when
|
||||
* interrupt vector DLB_INT_ALARM fires.
|
||||
*/
|
||||
void dlb_process_alarm_interrupt(struct dlb_hw *hw);
|
||||
|
||||
/**
|
||||
* dlb_process_ingress_error_interrupt() - process ingress error interrupts
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*
|
||||
* This function reads the alarm syndrome, logs it, notifies user-space, and
|
||||
* acks the interrupt. This function should be called from the alarm interrupt
|
||||
* handler when interrupt vector DLB_INT_INGRESS_ERROR fires.
|
||||
*/
|
||||
void dlb_process_ingress_error_interrupt(struct dlb_hw *hw);
|
||||
|
||||
/**
|
||||
* dlb_get_group_sequence_numbers() - return a group's number of SNs per queue
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @group_id: sequence number group ID.
|
||||
*
|
||||
* This function returns the configured number of sequence numbers per queue
|
||||
* for the specified group.
|
||||
*
|
||||
* Return:
|
||||
* Returns -EINVAL if group_id is invalid, else the group's SNs per queue.
|
||||
*/
|
||||
int dlb_get_group_sequence_numbers(struct dlb_hw *hw, unsigned int group_id);
|
||||
|
||||
/**
|
||||
* dlb_get_group_sequence_number_occupancy() - return a group's in-use slots
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @group_id: sequence number group ID.
|
||||
*
|
||||
* This function returns the group's number of in-use slots (i.e. load-balanced
|
||||
* queues using the specified group).
|
||||
*
|
||||
* Return:
|
||||
* Returns -EINVAL if group_id is invalid, else the group's occupancy.
|
||||
*/
|
||||
int dlb_get_group_sequence_number_occupancy(struct dlb_hw *hw,
|
||||
unsigned int group_id);
|
||||
|
||||
/**
|
||||
* dlb_set_group_sequence_numbers() - assign a group's number of SNs per queue
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @group_id: sequence number group ID.
|
||||
* @val: requested amount of sequence numbers per queue.
|
||||
*
|
||||
* This function configures the group's number of sequence numbers per queue.
|
||||
* val can be a power-of-two between 32 and 1024, inclusive. This setting can
|
||||
* be configured until the first ordered load-balanced queue is configured, at
|
||||
* which point the configuration is locked.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success; -EINVAL if group_id or val is invalid, -EPERM if an
|
||||
* ordered queue is configured.
|
||||
*/
|
||||
int dlb_set_group_sequence_numbers(struct dlb_hw *hw,
|
||||
unsigned int group_id,
|
||||
unsigned long val);
|
||||
|
||||
/**
|
||||
* dlb_reset_domain() - reset a scheduling domain
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: domain ID.
|
||||
*
|
||||
* This function resets and frees a DLB scheduling domain and its associated
|
||||
* resources.
|
||||
*
|
||||
* Pre-condition: the driver must ensure software has stopped sending QEs
|
||||
* through this domain's producer ports before invoking this function, or
|
||||
* undefined behavior will result.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, -1 otherwise.
|
||||
*
|
||||
* EINVAL - Invalid domain ID, or the domain is not configured.
|
||||
* EFAULT - Internal error. (Possibly caused if software is the pre-condition
|
||||
* is not met.)
|
||||
* ETIMEDOUT - Hardware component didn't reset in the expected time.
|
||||
*/
|
||||
int dlb_reset_domain(struct dlb_hw *hw, u32 domain_id);
|
||||
|
||||
/**
|
||||
* dlb_ldb_port_owned_by_domain() - query whether a port is owned by a domain
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: domain ID.
|
||||
* @port_id: port ID.
|
||||
*
|
||||
* This function returns whether a load-balanced port is owned by a specified
|
||||
* domain.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 if false, 1 if true, <0 otherwise.
|
||||
*
|
||||
* EINVAL - Invalid domain or port ID, or the domain is not configured.
|
||||
*/
|
||||
int dlb_ldb_port_owned_by_domain(struct dlb_hw *hw,
|
||||
u32 domain_id,
|
||||
u32 port_id);
|
||||
|
||||
/**
|
||||
* dlb_dir_port_owned_by_domain() - query whether a port is owned by a domain
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: domain ID.
|
||||
* @port_id: port ID.
|
||||
*
|
||||
* This function returns whether a directed port is owned by a specified
|
||||
* domain.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 if false, 1 if true, <0 otherwise.
|
||||
*
|
||||
* EINVAL - Invalid domain or port ID, or the domain is not configured.
|
||||
*/
|
||||
int dlb_dir_port_owned_by_domain(struct dlb_hw *hw,
|
||||
u32 domain_id,
|
||||
u32 port_id);
|
||||
|
||||
/**
|
||||
* dlb_hw_get_num_resources() - query the PCI function's available resources
|
||||
* @arg: pointer to resource counts.
|
||||
*
|
||||
* This function returns the number of available resources for the PF.
|
||||
*/
|
||||
void dlb_hw_get_num_resources(struct dlb_hw *hw,
|
||||
struct dlb_get_num_resources_args *arg);
|
||||
|
||||
/**
|
||||
* dlb_hw_get_num_used_resources() - query the PCI function's used resources
|
||||
* @arg: pointer to resource counts.
|
||||
*
|
||||
* This function returns the number of resources in use by the PF. It fills in
|
||||
* the fields that args points to, except the following:
|
||||
* - max_contiguous_atomic_inflights
|
||||
* - max_contiguous_hist_list_entries
|
||||
* - max_contiguous_ldb_credits
|
||||
* - max_contiguous_dir_credits
|
||||
*/
|
||||
void dlb_hw_get_num_used_resources(struct dlb_hw *hw,
|
||||
struct dlb_get_num_resources_args *arg);
|
||||
|
||||
/**
|
||||
* dlb_disable_dp_vasr_feature() - disable directed pipe VAS reset hardware
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*
|
||||
* This function disables certain hardware in the directed pipe,
|
||||
* necessary to workaround a DLB VAS reset issue.
|
||||
*/
|
||||
void dlb_disable_dp_vasr_feature(struct dlb_hw *hw);
|
||||
|
||||
/**
|
||||
* dlb_enable_excess_tokens_alarm() - enable interrupts for the excess token
|
||||
* pop alarm
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*
|
||||
* This function enables the PF ingress error alarm interrupt to fire when an
|
||||
* excess token pop occurs.
|
||||
*/
|
||||
void dlb_enable_excess_tokens_alarm(struct dlb_hw *hw);
|
||||
|
||||
/**
|
||||
* dlb_disable_excess_tokens_alarm() - disable interrupts for the excess token
|
||||
* pop alarm
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*
|
||||
* This function disables the PF ingress error alarm interrupt to fire when an
|
||||
* excess token pop occurs.
|
||||
*/
|
||||
void dlb_disable_excess_tokens_alarm(struct dlb_hw *hw);
|
||||
|
||||
/**
|
||||
* dlb_hw_get_ldb_queue_depth() - returns the depth of a load-balanced queue
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: domain ID.
|
||||
* @args: queue depth args
|
||||
*
|
||||
* This function returns the depth of a load-balanced queue.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
|
||||
* assigned a detailed error code from enum dlb_error. If successful, resp->id
|
||||
* contains the depth.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - Invalid domain ID or queue ID.
|
||||
*/
|
||||
int dlb_hw_get_ldb_queue_depth(struct dlb_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb_get_ldb_queue_depth_args *args,
|
||||
struct dlb_cmd_response *resp);
|
||||
|
||||
/**
|
||||
* dlb_hw_get_dir_queue_depth() - returns the depth of a directed queue
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: domain ID.
|
||||
* @args: queue depth args
|
||||
*
|
||||
* This function returns the depth of a directed queue.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
|
||||
* assigned a detailed error code from enum dlb_error. If successful, resp->id
|
||||
* contains the depth.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - Invalid domain ID or queue ID.
|
||||
*/
|
||||
int dlb_hw_get_dir_queue_depth(struct dlb_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb_get_dir_queue_depth_args *args,
|
||||
struct dlb_cmd_response *resp);
|
||||
|
||||
/**
|
||||
* dlb_hw_pending_port_unmaps() - returns the number of unmap operations in
|
||||
* progress for a load-balanced port.
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @domain_id: domain ID.
|
||||
* @args: number of unmaps in progress args
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
|
||||
* assigned a detailed error code from enum dlb_error. If successful, resp->id
|
||||
* contains the number of unmaps in progress.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - Invalid port ID.
|
||||
*/
|
||||
int dlb_hw_pending_port_unmaps(struct dlb_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb_pending_port_unmaps_args *args,
|
||||
struct dlb_cmd_response *resp);
|
||||
|
||||
/**
|
||||
* dlb_hw_enable_sparse_ldb_cq_mode() - enable sparse mode for load-balanced
|
||||
* ports.
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*
|
||||
* This function must be called prior to configuring scheduling domains.
|
||||
*/
|
||||
void dlb_hw_enable_sparse_ldb_cq_mode(struct dlb_hw *hw);
|
||||
|
||||
/**
|
||||
* dlb_hw_enable_sparse_dir_cq_mode() - enable sparse mode for directed ports
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*
|
||||
* This function must be called prior to configuring scheduling domains.
|
||||
*/
|
||||
void dlb_hw_enable_sparse_dir_cq_mode(struct dlb_hw *hw);
|
||||
|
||||
/**
|
||||
* dlb_hw_set_qe_arbiter_weights() - program QE arbiter weights
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @weight: 8-entry array of arbiter weights.
|
||||
*
|
||||
* weight[N] programs priority N's weight. In cases where the 8 priorities are
|
||||
* reduced to 4 bins, the mapping is:
|
||||
* - weight[1] programs bin 0
|
||||
* - weight[3] programs bin 1
|
||||
* - weight[5] programs bin 2
|
||||
* - weight[7] programs bin 3
|
||||
*/
|
||||
void dlb_hw_set_qe_arbiter_weights(struct dlb_hw *hw, u8 weight[8]);
|
||||
|
||||
/**
|
||||
* dlb_hw_set_qid_arbiter_weights() - program QID arbiter weights
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
* @weight: 8-entry array of arbiter weights.
|
||||
*
|
||||
* weight[N] programs priority N's weight. In cases where the 8 priorities are
|
||||
* reduced to 4 bins, the mapping is:
|
||||
* - weight[1] programs bin 0
|
||||
* - weight[3] programs bin 1
|
||||
* - weight[5] programs bin 2
|
||||
* - weight[7] programs bin 3
|
||||
*/
|
||||
void dlb_hw_set_qid_arbiter_weights(struct dlb_hw *hw, u8 weight[8]);
|
||||
|
||||
/**
|
||||
* dlb_hw_enable_pp_sw_alarms() - enable out-of-credit alarm for all producer
|
||||
* ports
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*/
|
||||
void dlb_hw_enable_pp_sw_alarms(struct dlb_hw *hw);
|
||||
|
||||
/**
|
||||
* dlb_hw_disable_pp_sw_alarms() - disable out-of-credit alarm for all producer
|
||||
* ports
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*/
|
||||
void dlb_hw_disable_pp_sw_alarms(struct dlb_hw *hw);
|
||||
|
||||
/**
|
||||
* dlb_hw_disable_pf_to_vf_isr_pend_err() - disable alarm triggered by PF
|
||||
* access to VF's ISR pending register
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*/
|
||||
void dlb_hw_disable_pf_to_vf_isr_pend_err(struct dlb_hw *hw);
|
||||
|
||||
/**
|
||||
* dlb_hw_disable_vf_to_pf_isr_pend_err() - disable alarm triggered by VF
|
||||
* access to PF's ISR pending register
|
||||
* @hw: dlb_hw handle for a particular device.
|
||||
*/
|
||||
void dlb_hw_disable_vf_to_pf_isr_pend_err(struct dlb_hw *hw);
|
||||
|
||||
#endif /* __DLB_RESOURCE_H */
|
@ -1,552 +0,0 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
#include <assert.h>
|
||||
#include <unistd.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <rte_malloc.h>
|
||||
#include <rte_errno.h>
|
||||
|
||||
#include "base/dlb_resource.h"
|
||||
#include "base/dlb_osdep.h"
|
||||
#include "base/dlb_regs.h"
|
||||
#include "../dlb_priv.h"
|
||||
#include "../dlb_inline_fns.h"
|
||||
#include "../dlb_user.h"
|
||||
#include "dlb_main.h"
|
||||
|
||||
unsigned int dlb_unregister_timeout_s = DLB_DEFAULT_UNREGISTER_TIMEOUT_S;
|
||||
|
||||
#define DLB_PCI_CAP_POINTER 0x34
|
||||
#define DLB_PCI_CAP_NEXT(hdr) (((hdr) >> 8) & 0xFC)
|
||||
#define DLB_PCI_CAP_ID(hdr) ((hdr) & 0xFF)
|
||||
#define DLB_PCI_ERR_UNCOR_MASK 8
|
||||
#define DLB_PCI_ERR_UNC_UNSUP 0x00100000
|
||||
|
||||
#define DLB_PCI_LNKCTL 16
|
||||
#define DLB_PCI_SLTCTL 24
|
||||
#define DLB_PCI_RTCTL 28
|
||||
#define DLB_PCI_EXP_DEVCTL2 40
|
||||
#define DLB_PCI_LNKCTL2 48
|
||||
#define DLB_PCI_SLTCTL2 56
|
||||
#define DLB_PCI_CMD 4
|
||||
#define DLB_PCI_EXP_DEVSTA 10
|
||||
#define DLB_PCI_EXP_DEVSTA_TRPND 0x20
|
||||
#define DLB_PCI_EXP_DEVCTL_BCR_FLR 0x8000
|
||||
|
||||
#define DLB_PCI_CAP_ID_EXP 0x10
|
||||
#define DLB_PCI_CAP_ID_MSIX 0x11
|
||||
#define DLB_PCI_EXT_CAP_ID_PRI 0x13
|
||||
#define DLB_PCI_EXT_CAP_ID_ACS 0xD
|
||||
|
||||
#define DLB_PCI_PRI_CTRL_ENABLE 0x1
|
||||
#define DLB_PCI_PRI_ALLOC_REQ 0xC
|
||||
#define DLB_PCI_PRI_CTRL 0x4
|
||||
#define DLB_PCI_MSIX_FLAGS 0x2
|
||||
#define DLB_PCI_MSIX_FLAGS_ENABLE 0x8000
|
||||
#define DLB_PCI_MSIX_FLAGS_MASKALL 0x4000
|
||||
#define DLB_PCI_ERR_ROOT_STATUS 0x30
|
||||
#define DLB_PCI_ERR_COR_STATUS 0x10
|
||||
#define DLB_PCI_ERR_UNCOR_STATUS 0x4
|
||||
#define DLB_PCI_COMMAND_INTX_DISABLE 0x400
|
||||
#define DLB_PCI_ACS_CAP 0x4
|
||||
#define DLB_PCI_ACS_CTRL 0x6
|
||||
#define DLB_PCI_ACS_SV 0x1
|
||||
#define DLB_PCI_ACS_RR 0x4
|
||||
#define DLB_PCI_ACS_CR 0x8
|
||||
#define DLB_PCI_ACS_UF 0x10
|
||||
#define DLB_PCI_ACS_EC 0x20
|
||||
|
||||
static int dlb_pci_find_capability(struct rte_pci_device *pdev, uint32_t id)
|
||||
{
|
||||
uint8_t pos;
|
||||
int ret;
|
||||
uint16_t hdr;
|
||||
|
||||
ret = rte_pci_read_config(pdev, &pos, 1, DLB_PCI_CAP_POINTER);
|
||||
pos &= 0xFC;
|
||||
|
||||
if (ret != 1)
|
||||
return -1;
|
||||
|
||||
while (pos > 0x3F) {
|
||||
ret = rte_pci_read_config(pdev, &hdr, 2, pos);
|
||||
if (ret != 2)
|
||||
return -1;
|
||||
|
||||
if (DLB_PCI_CAP_ID(hdr) == id)
|
||||
return pos;
|
||||
|
||||
if (DLB_PCI_CAP_ID(hdr) == 0xFF)
|
||||
return -1;
|
||||
|
||||
pos = DLB_PCI_CAP_NEXT(hdr);
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int dlb_mask_ur_err(struct rte_pci_device *pdev)
|
||||
{
|
||||
uint32_t mask;
|
||||
size_t sz = sizeof(mask);
|
||||
int pos = rte_pci_find_ext_capability(pdev, RTE_PCI_EXT_CAP_ID_ERR);
|
||||
|
||||
if (pos < 0) {
|
||||
DLB_LOG_ERR("[%s()] failed to find the aer capability\n",
|
||||
__func__);
|
||||
return pos;
|
||||
}
|
||||
|
||||
pos += DLB_PCI_ERR_UNCOR_MASK;
|
||||
|
||||
if (rte_pci_read_config(pdev, &mask, sz, pos) != (int)sz) {
|
||||
DLB_LOG_ERR("[%s()] Failed to read uncorrectable error mask reg\n",
|
||||
__func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Mask Unsupported Request errors */
|
||||
mask |= DLB_PCI_ERR_UNC_UNSUP;
|
||||
|
||||
if (rte_pci_write_config(pdev, &mask, sz, pos) != (int)sz) {
|
||||
DLB_LOG_ERR("[%s()] Failed to write uncorrectable error mask reg at offset %d\n",
|
||||
__func__, pos);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct dlb_dev *
|
||||
dlb_probe(struct rte_pci_device *pdev)
|
||||
{
|
||||
struct dlb_dev *dlb_dev;
|
||||
int ret = 0;
|
||||
|
||||
DLB_INFO(dlb_dev, "probe\n");
|
||||
|
||||
dlb_dev = rte_malloc("DLB_PF", sizeof(struct dlb_dev),
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
|
||||
if (dlb_dev == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto dlb_dev_malloc_fail;
|
||||
}
|
||||
|
||||
/* PCI Bus driver has already mapped bar space into process.
|
||||
* Save off our IO register and FUNC addresses.
|
||||
*/
|
||||
|
||||
/* BAR 0 */
|
||||
if (pdev->mem_resource[0].addr == NULL) {
|
||||
DLB_ERR(dlb_dev, "probe: BAR 0 addr (csr_kva) is NULL\n");
|
||||
ret = -EINVAL;
|
||||
goto pci_mmap_bad_addr;
|
||||
}
|
||||
dlb_dev->hw.func_kva = (void *)(uintptr_t)pdev->mem_resource[0].addr;
|
||||
dlb_dev->hw.func_phys_addr = pdev->mem_resource[0].phys_addr;
|
||||
|
||||
DLB_INFO(dlb_dev, "DLB FUNC VA=%p, PA=%p, len=%"PRIu64"\n",
|
||||
(void *)dlb_dev->hw.func_kva,
|
||||
(void *)dlb_dev->hw.func_phys_addr,
|
||||
pdev->mem_resource[0].len);
|
||||
|
||||
/* BAR 2 */
|
||||
if (pdev->mem_resource[2].addr == NULL) {
|
||||
DLB_ERR(dlb_dev, "probe: BAR 2 addr (func_kva) is NULL\n");
|
||||
ret = -EINVAL;
|
||||
goto pci_mmap_bad_addr;
|
||||
}
|
||||
dlb_dev->hw.csr_kva = (void *)(uintptr_t)pdev->mem_resource[2].addr;
|
||||
dlb_dev->hw.csr_phys_addr = pdev->mem_resource[2].phys_addr;
|
||||
|
||||
DLB_INFO(dlb_dev, "DLB CSR VA=%p, PA=%p, len=%"PRIu64"\n",
|
||||
(void *)dlb_dev->hw.csr_kva,
|
||||
(void *)dlb_dev->hw.csr_phys_addr,
|
||||
pdev->mem_resource[2].len);
|
||||
|
||||
dlb_dev->pdev = pdev;
|
||||
|
||||
ret = dlb_pf_reset(dlb_dev);
|
||||
if (ret)
|
||||
goto dlb_reset_fail;
|
||||
|
||||
/* DLB incorrectly sends URs in response to certain messages. Mask UR
|
||||
* errors to prevent these from being propagated to the MCA.
|
||||
*/
|
||||
ret = dlb_mask_ur_err(pdev);
|
||||
if (ret)
|
||||
goto mask_ur_err_fail;
|
||||
|
||||
ret = dlb_pf_init_driver_state(dlb_dev);
|
||||
if (ret)
|
||||
goto init_driver_state_fail;
|
||||
|
||||
ret = dlb_resource_init(&dlb_dev->hw);
|
||||
if (ret)
|
||||
goto resource_init_fail;
|
||||
|
||||
dlb_dev->revision = os_get_dev_revision(&dlb_dev->hw);
|
||||
|
||||
dlb_pf_init_hardware(dlb_dev);
|
||||
|
||||
return dlb_dev;
|
||||
|
||||
resource_init_fail:
|
||||
dlb_resource_free(&dlb_dev->hw);
|
||||
init_driver_state_fail:
|
||||
mask_ur_err_fail:
|
||||
dlb_reset_fail:
|
||||
pci_mmap_bad_addr:
|
||||
rte_free(dlb_dev);
|
||||
dlb_dev_malloc_fail:
|
||||
rte_errno = ret;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int
|
||||
dlb_pf_reset(struct dlb_dev *dlb_dev)
|
||||
{
|
||||
int msix_cap_offset, err_cap_offset, acs_cap_offset, wait_count;
|
||||
uint16_t dev_ctl_word, dev_ctl2_word, lnk_word, lnk_word2;
|
||||
uint16_t rt_ctl_word, pri_ctrl_word;
|
||||
struct rte_pci_device *pdev = dlb_dev->pdev;
|
||||
uint16_t devsta_busy_word, devctl_word;
|
||||
int pcie_cap_offset, pri_cap_offset;
|
||||
uint16_t slt_word, slt_word2, cmd;
|
||||
int ret = 0, i = 0;
|
||||
uint32_t dword[16], pri_reqs_dword;
|
||||
off_t off;
|
||||
|
||||
/* Save PCI config state */
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
if (rte_pci_read_config(pdev, &dword[i], 4, i * 4) != 4)
|
||||
return ret;
|
||||
}
|
||||
|
||||
pcie_cap_offset = dlb_pci_find_capability(pdev, DLB_PCI_CAP_ID_EXP);
|
||||
|
||||
if (pcie_cap_offset < 0) {
|
||||
DLB_LOG_ERR("[%s()] failed to find the pcie capability\n",
|
||||
__func__);
|
||||
return pcie_cap_offset;
|
||||
}
|
||||
|
||||
off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
|
||||
if (rte_pci_read_config(pdev, &dev_ctl_word, 2, off) != 2)
|
||||
dev_ctl_word = 0;
|
||||
|
||||
off = pcie_cap_offset + DLB_PCI_LNKCTL;
|
||||
if (rte_pci_read_config(pdev, &lnk_word, 2, off) != 2)
|
||||
lnk_word = 0;
|
||||
|
||||
off = pcie_cap_offset + DLB_PCI_SLTCTL;
|
||||
if (rte_pci_read_config(pdev, &slt_word, 2, off) != 2)
|
||||
slt_word = 0;
|
||||
|
||||
off = pcie_cap_offset + DLB_PCI_RTCTL;
|
||||
if (rte_pci_read_config(pdev, &rt_ctl_word, 2, off) != 2)
|
||||
rt_ctl_word = 0;
|
||||
|
||||
off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL2;
|
||||
if (rte_pci_read_config(pdev, &dev_ctl2_word, 2, off) != 2)
|
||||
dev_ctl2_word = 0;
|
||||
|
||||
off = pcie_cap_offset + DLB_PCI_LNKCTL2;
|
||||
if (rte_pci_read_config(pdev, &lnk_word2, 2, off) != 2)
|
||||
lnk_word2 = 0;
|
||||
|
||||
off = pcie_cap_offset + DLB_PCI_SLTCTL2;
|
||||
if (rte_pci_read_config(pdev, &slt_word2, 2, off) != 2)
|
||||
slt_word2 = 0;
|
||||
|
||||
pri_cap_offset = rte_pci_find_ext_capability(pdev,
|
||||
DLB_PCI_EXT_CAP_ID_PRI);
|
||||
if (pri_cap_offset >= 0) {
|
||||
off = pri_cap_offset + DLB_PCI_PRI_ALLOC_REQ;
|
||||
if (rte_pci_read_config(pdev, &pri_reqs_dword, 4, off) != 4)
|
||||
pri_reqs_dword = 0;
|
||||
}
|
||||
|
||||
/* clear the PCI command register before issuing the FLR */
|
||||
|
||||
off = DLB_PCI_CMD;
|
||||
cmd = 0;
|
||||
if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
|
||||
DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* issue the FLR */
|
||||
for (wait_count = 0; wait_count < 4; wait_count++) {
|
||||
int sleep_time;
|
||||
|
||||
off = pcie_cap_offset + DLB_PCI_EXP_DEVSTA;
|
||||
ret = rte_pci_read_config(pdev, &devsta_busy_word, 2, off);
|
||||
if (ret != 2) {
|
||||
DLB_LOG_ERR("[%s()] failed to read the pci device status\n",
|
||||
__func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!(devsta_busy_word & DLB_PCI_EXP_DEVSTA_TRPND))
|
||||
break;
|
||||
|
||||
sleep_time = (1 << (wait_count)) * 100;
|
||||
rte_delay_ms(sleep_time);
|
||||
}
|
||||
|
||||
if (wait_count == 4) {
|
||||
DLB_LOG_ERR("[%s()] wait for pci pending transactions timed out\n",
|
||||
__func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
|
||||
ret = rte_pci_read_config(pdev, &devctl_word, 2, off);
|
||||
if (ret != 2) {
|
||||
DLB_LOG_ERR("[%s()] failed to read the pcie device control\n",
|
||||
__func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
devctl_word |= DLB_PCI_EXP_DEVCTL_BCR_FLR;
|
||||
|
||||
if (rte_pci_write_config(pdev, &devctl_word, 2, off) != 2) {
|
||||
DLB_LOG_ERR("[%s()] failed to write the pcie device control at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return -1;
|
||||
}
|
||||
|
||||
rte_delay_ms(100);
|
||||
|
||||
/* Restore PCI config state */
|
||||
|
||||
if (pcie_cap_offset >= 0) {
|
||||
off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
|
||||
if (rte_pci_write_config(pdev, &dev_ctl_word, 2, off) != 2) {
|
||||
DLB_LOG_ERR("[%s()] failed to write the pcie device control at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return -1;
|
||||
}
|
||||
|
||||
off = pcie_cap_offset + DLB_PCI_LNKCTL;
|
||||
if (rte_pci_write_config(pdev, &lnk_word, 2, off) != 2) {
|
||||
DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return -1;
|
||||
}
|
||||
|
||||
off = pcie_cap_offset + DLB_PCI_SLTCTL;
|
||||
if (rte_pci_write_config(pdev, &slt_word, 2, off) != 2) {
|
||||
DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return -1;
|
||||
}
|
||||
|
||||
off = pcie_cap_offset + DLB_PCI_RTCTL;
|
||||
if (rte_pci_write_config(pdev, &rt_ctl_word, 2, off) != 2) {
|
||||
DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return -1;
|
||||
}
|
||||
|
||||
off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL2;
|
||||
if (rte_pci_write_config(pdev, &dev_ctl2_word, 2, off) != 2) {
|
||||
DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return -1;
|
||||
}
|
||||
|
||||
off = pcie_cap_offset + DLB_PCI_LNKCTL2;
|
||||
if (rte_pci_write_config(pdev, &lnk_word2, 2, off) != 2) {
|
||||
DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return -1;
|
||||
}
|
||||
|
||||
off = pcie_cap_offset + DLB_PCI_SLTCTL2;
|
||||
if (rte_pci_write_config(pdev, &slt_word2, 2, off) != 2) {
|
||||
DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (pri_cap_offset >= 0) {
|
||||
pri_ctrl_word = DLB_PCI_PRI_CTRL_ENABLE;
|
||||
|
||||
off = pri_cap_offset + DLB_PCI_PRI_ALLOC_REQ;
|
||||
if (rte_pci_write_config(pdev, &pri_reqs_dword, 4, off) != 4) {
|
||||
DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return -1;
|
||||
}
|
||||
|
||||
off = pri_cap_offset + DLB_PCI_PRI_CTRL;
|
||||
if (rte_pci_write_config(pdev, &pri_ctrl_word, 2, off) != 2) {
|
||||
DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
err_cap_offset = rte_pci_find_ext_capability(pdev,
|
||||
RTE_PCI_EXT_CAP_ID_ERR);
|
||||
if (err_cap_offset >= 0) {
|
||||
uint32_t tmp;
|
||||
|
||||
off = err_cap_offset + DLB_PCI_ERR_ROOT_STATUS;
|
||||
if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
|
||||
tmp = 0;
|
||||
|
||||
if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
|
||||
DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return -1;
|
||||
}
|
||||
|
||||
off = err_cap_offset + DLB_PCI_ERR_COR_STATUS;
|
||||
if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
|
||||
tmp = 0;
|
||||
|
||||
if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
|
||||
DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return -1;
|
||||
}
|
||||
|
||||
off = err_cap_offset + DLB_PCI_ERR_UNCOR_STATUS;
|
||||
if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
|
||||
tmp = 0;
|
||||
|
||||
if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
|
||||
DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 16; i > 0; i--) {
|
||||
off = (i - 1) * 4;
|
||||
if (rte_pci_write_config(pdev, &dword[i - 1], 4, off) != 4) {
|
||||
DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
off = DLB_PCI_CMD;
|
||||
if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
|
||||
cmd &= ~DLB_PCI_COMMAND_INTX_DISABLE;
|
||||
if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
|
||||
DLB_LOG_ERR("[%s()] failed to write pci config space\n",
|
||||
__func__);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
msix_cap_offset = dlb_pci_find_capability(pdev, DLB_PCI_CAP_ID_MSIX);
|
||||
if (msix_cap_offset >= 0) {
|
||||
off = msix_cap_offset + DLB_PCI_MSIX_FLAGS;
|
||||
if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
|
||||
cmd |= DLB_PCI_MSIX_FLAGS_ENABLE;
|
||||
cmd |= DLB_PCI_MSIX_FLAGS_MASKALL;
|
||||
if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
|
||||
DLB_LOG_ERR("[%s()] failed to write msix flags\n",
|
||||
__func__);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
off = msix_cap_offset + DLB_PCI_MSIX_FLAGS;
|
||||
if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
|
||||
cmd &= ~DLB_PCI_MSIX_FLAGS_MASKALL;
|
||||
if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
|
||||
DLB_LOG_ERR("[%s()] failed to write msix flags\n",
|
||||
__func__);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
acs_cap_offset = rte_pci_find_ext_capability(pdev,
|
||||
DLB_PCI_EXT_CAP_ID_ACS);
|
||||
if (acs_cap_offset >= 0) {
|
||||
uint16_t acs_cap, acs_ctrl, acs_mask;
|
||||
off = acs_cap_offset + DLB_PCI_ACS_CAP;
|
||||
if (rte_pci_read_config(pdev, &acs_cap, 2, off) != 2)
|
||||
acs_cap = 0;
|
||||
|
||||
off = acs_cap_offset + DLB_PCI_ACS_CTRL;
|
||||
if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
|
||||
acs_ctrl = 0;
|
||||
|
||||
acs_mask = DLB_PCI_ACS_SV | DLB_PCI_ACS_RR;
|
||||
acs_mask |= (DLB_PCI_ACS_CR | DLB_PCI_ACS_UF);
|
||||
acs_ctrl |= (acs_cap & acs_mask);
|
||||
|
||||
if (rte_pci_write_config(pdev, &acs_ctrl, 2, off) != 2) {
|
||||
DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return -1;
|
||||
}
|
||||
|
||||
off = acs_cap_offset + DLB_PCI_ACS_CTRL;
|
||||
if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
|
||||
acs_ctrl = 0;
|
||||
|
||||
acs_mask = DLB_PCI_ACS_RR | DLB_PCI_ACS_CR | DLB_PCI_ACS_EC;
|
||||
acs_ctrl &= ~acs_mask;
|
||||
|
||||
off = acs_cap_offset + DLB_PCI_ACS_CTRL;
|
||||
if (rte_pci_write_config(pdev, &acs_ctrl, 2, off) != 2) {
|
||||
DLB_LOG_ERR("[%s()] failed to write pci config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*******************************/
|
||||
/****** Driver management ******/
|
||||
/*******************************/
|
||||
|
||||
int
|
||||
dlb_pf_init_driver_state(struct dlb_dev *dlb_dev)
|
||||
{
|
||||
/* Initialize software state */
|
||||
rte_spinlock_init(&dlb_dev->resource_mutex);
|
||||
rte_spinlock_init(&dlb_dev->measurement_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
dlb_pf_init_hardware(struct dlb_dev *dlb_dev)
|
||||
{
|
||||
dlb_disable_dp_vasr_feature(&dlb_dev->hw);
|
||||
|
||||
dlb_enable_excess_tokens_alarm(&dlb_dev->hw);
|
||||
|
||||
if (dlb_dev->revision >= DLB_REV_B0) {
|
||||
dlb_hw_enable_sparse_ldb_cq_mode(&dlb_dev->hw);
|
||||
dlb_hw_enable_sparse_dir_cq_mode(&dlb_dev->hw);
|
||||
}
|
||||
|
||||
if (dlb_dev->revision >= DLB_REV_B0) {
|
||||
dlb_hw_disable_pf_to_vf_isr_pend_err(&dlb_dev->hw);
|
||||
dlb_hw_disable_vf_to_pf_isr_pend_err(&dlb_dev->hw);
|
||||
}
|
||||
}
|
@ -1,44 +0,0 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __DLB_MAIN_H
|
||||
#define __DLB_MAIN_H
|
||||
|
||||
#include <rte_debug.h>
|
||||
#include <rte_log.h>
|
||||
#include <rte_spinlock.h>
|
||||
#include <rte_pci.h>
|
||||
#include <rte_bus_pci.h>
|
||||
#include <rte_eal_paging.h>
|
||||
|
||||
#include "base/dlb_hw_types.h"
|
||||
#include "../dlb_user.h"
|
||||
|
||||
#define DLB_DEFAULT_UNREGISTER_TIMEOUT_S 5
|
||||
|
||||
struct dlb_dev {
|
||||
struct rte_pci_device *pdev;
|
||||
struct dlb_hw hw;
|
||||
/* struct list_head list; */
|
||||
struct device *dlb_device;
|
||||
bool domain_reset_failed;
|
||||
/* The resource mutex serializes access to driver data structures and
|
||||
* hardware registers.
|
||||
*/
|
||||
rte_spinlock_t resource_mutex;
|
||||
rte_spinlock_t measurement_lock;
|
||||
bool worker_launched;
|
||||
u8 revision;
|
||||
};
|
||||
|
||||
struct dlb_dev *dlb_probe(struct rte_pci_device *pdev);
|
||||
void dlb_reset_done(struct dlb_dev *dlb_dev);
|
||||
|
||||
/* pf_ops */
|
||||
int dlb_pf_init_driver_state(struct dlb_dev *dev);
|
||||
void dlb_pf_free_driver_state(struct dlb_dev *dev);
|
||||
void dlb_pf_init_hardware(struct dlb_dev *dev);
|
||||
int dlb_pf_reset(struct dlb_dev *dlb_dev);
|
||||
|
||||
#endif /* __DLB_MAIN_H */
|
@ -1,755 +0,0 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/mman.h>
|
||||
#include <fcntl.h>
|
||||
#include <sys/time.h>
|
||||
#include <errno.h>
|
||||
#include <assert.h>
|
||||
#include <unistd.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <rte_debug.h>
|
||||
#include <rte_log.h>
|
||||
#include <rte_dev.h>
|
||||
#include <rte_devargs.h>
|
||||
#include <rte_mbuf.h>
|
||||
#include <rte_ring.h>
|
||||
#include <rte_errno.h>
|
||||
#include <rte_kvargs.h>
|
||||
#include <rte_malloc.h>
|
||||
#include <rte_cycles.h>
|
||||
#include <rte_io.h>
|
||||
#include <rte_memory.h>
|
||||
#include <rte_string_fns.h>
|
||||
|
||||
#include "../dlb_priv.h"
|
||||
#include "../dlb_iface.h"
|
||||
#include "../dlb_inline_fns.h"
|
||||
#include "dlb_main.h"
|
||||
#include "base/dlb_hw_types.h"
|
||||
#include "base/dlb_osdep.h"
|
||||
#include "base/dlb_resource.h"
|
||||
|
||||
static void
|
||||
dlb_pf_low_level_io_init(struct dlb_eventdev *dlb __rte_unused)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Addresses will be initialized at port create */
|
||||
for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
|
||||
/* First directed ports */
|
||||
|
||||
/* producer port */
|
||||
dlb_port[i][DLB_DIR].pp_addr = NULL;
|
||||
|
||||
/* popcount */
|
||||
dlb_port[i][DLB_DIR].ldb_popcount = NULL;
|
||||
dlb_port[i][DLB_DIR].dir_popcount = NULL;
|
||||
|
||||
/* consumer queue */
|
||||
dlb_port[i][DLB_DIR].cq_base = NULL;
|
||||
dlb_port[i][DLB_DIR].mmaped = true;
|
||||
|
||||
/* Now load balanced ports */
|
||||
|
||||
/* producer port */
|
||||
dlb_port[i][DLB_LDB].pp_addr = NULL;
|
||||
|
||||
/* popcount */
|
||||
dlb_port[i][DLB_LDB].ldb_popcount = NULL;
|
||||
dlb_port[i][DLB_LDB].dir_popcount = NULL;
|
||||
|
||||
/* consumer queue */
|
||||
dlb_port[i][DLB_LDB].cq_base = NULL;
|
||||
dlb_port[i][DLB_LDB].mmaped = true;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_open(struct dlb_hw_dev *handle, const char *name)
|
||||
{
|
||||
RTE_SET_USED(handle);
|
||||
RTE_SET_USED(name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
dlb_pf_domain_close(struct dlb_eventdev *dlb)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)dlb->qm_instance.pf_dev;
|
||||
int ret;
|
||||
|
||||
ret = dlb_reset_domain(&dlb_dev->hw, dlb->qm_instance.domain_id);
|
||||
if (ret)
|
||||
DLB_LOG_ERR("dlb_pf_reset_domain err %d", ret);
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_get_device_version(struct dlb_hw_dev *handle,
|
||||
uint8_t *revision)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
|
||||
|
||||
*revision = dlb_dev->revision;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_get_num_resources(struct dlb_hw_dev *handle,
|
||||
struct dlb_get_num_resources_args *rsrcs)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
|
||||
|
||||
dlb_hw_get_num_resources(&dlb_dev->hw, rsrcs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_sched_domain_create(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_sched_domain_args *arg)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
|
||||
struct dlb_cmd_response response = {0};
|
||||
int ret;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
|
||||
|
||||
if (dlb_dev->domain_reset_failed) {
|
||||
response.status = DLB_ST_DOMAIN_RESET_FAILED;
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
ret = dlb_hw_create_sched_domain(&dlb_dev->hw, arg, &response);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
done:
|
||||
|
||||
*(struct dlb_cmd_response *)arg->response = response;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_ldb_credit_pool_create(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_ldb_pool_args *cfg)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
|
||||
struct dlb_cmd_response response = {0};
|
||||
int ret;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
|
||||
|
||||
ret = dlb_hw_create_ldb_pool(&dlb_dev->hw,
|
||||
handle->domain_id,
|
||||
cfg,
|
||||
&response);
|
||||
|
||||
*(struct dlb_cmd_response *)cfg->response = response;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_dir_credit_pool_create(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_dir_pool_args *cfg)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
|
||||
struct dlb_cmd_response response = {0};
|
||||
int ret;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
|
||||
|
||||
ret = dlb_hw_create_dir_pool(&dlb_dev->hw,
|
||||
handle->domain_id,
|
||||
cfg,
|
||||
&response);
|
||||
|
||||
*(struct dlb_cmd_response *)cfg->response = response;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_get_cq_poll_mode(struct dlb_hw_dev *handle,
|
||||
enum dlb_cq_poll_modes *mode)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
|
||||
|
||||
if (dlb_dev->revision >= DLB_REV_B0)
|
||||
*mode = DLB_CQ_POLL_MODE_SPARSE;
|
||||
else
|
||||
*mode = DLB_CQ_POLL_MODE_STD;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_ldb_queue_create(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_ldb_queue_args *cfg)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
|
||||
struct dlb_cmd_response response = {0};
|
||||
int ret;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
|
||||
|
||||
ret = dlb_hw_create_ldb_queue(&dlb_dev->hw,
|
||||
handle->domain_id,
|
||||
cfg,
|
||||
&response);
|
||||
|
||||
*(struct dlb_cmd_response *)cfg->response = response;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_dir_queue_create(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_dir_queue_args *cfg)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
|
||||
struct dlb_cmd_response response = {0};
|
||||
int ret;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
|
||||
|
||||
ret = dlb_hw_create_dir_queue(&dlb_dev->hw,
|
||||
handle->domain_id,
|
||||
cfg,
|
||||
&response);
|
||||
|
||||
*(struct dlb_cmd_response *)cfg->response = response;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void *
|
||||
dlb_alloc_coherent_aligned(const struct rte_memzone **mz, rte_iova_t *phys,
|
||||
size_t size, int align)
|
||||
{
|
||||
char mz_name[RTE_MEMZONE_NAMESIZE];
|
||||
uint32_t core_id = rte_lcore_id();
|
||||
unsigned int socket_id;
|
||||
|
||||
snprintf(mz_name, sizeof(mz_name) - 1, "event_dlb_port_mem_%lx",
|
||||
(unsigned long)rte_get_timer_cycles());
|
||||
if (core_id == (unsigned int)LCORE_ID_ANY)
|
||||
core_id = rte_get_main_lcore();
|
||||
socket_id = rte_lcore_to_socket_id(core_id);
|
||||
*mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
|
||||
RTE_MEMZONE_IOVA_CONTIG, align);
|
||||
if (*mz == NULL) {
|
||||
DLB_LOG_ERR("Unable to allocate DMA memory of size %zu bytes\n",
|
||||
size);
|
||||
*phys = 0;
|
||||
return NULL;
|
||||
}
|
||||
*phys = (*mz)->iova;
|
||||
return (*mz)->addr;
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_ldb_port_create(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_ldb_port_args *cfg,
|
||||
enum dlb_cq_poll_modes poll_mode)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
|
||||
struct dlb_cmd_response response = {0};
|
||||
int ret;
|
||||
uint8_t *port_base;
|
||||
const struct rte_memzone *mz;
|
||||
int alloc_sz, qe_sz, cq_alloc_depth;
|
||||
rte_iova_t pp_dma_base;
|
||||
rte_iova_t pc_dma_base;
|
||||
rte_iova_t cq_dma_base;
|
||||
int is_dir = false;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
|
||||
|
||||
if (poll_mode == DLB_CQ_POLL_MODE_STD)
|
||||
qe_sz = sizeof(struct dlb_dequeue_qe);
|
||||
else
|
||||
qe_sz = RTE_CACHE_LINE_SIZE;
|
||||
|
||||
/* The hardware always uses a CQ depth of at least
|
||||
* DLB_MIN_HARDWARE_CQ_DEPTH, even though from the user
|
||||
* perspective we support a depth as low as 1 for LDB ports.
|
||||
*/
|
||||
cq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB_MIN_HARDWARE_CQ_DEPTH);
|
||||
|
||||
/* Calculate the port memory required, including two cache lines for
|
||||
* credit pop counts. Round up to the nearest cache line.
|
||||
*/
|
||||
alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cq_alloc_depth * qe_sz;
|
||||
alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
|
||||
|
||||
port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
|
||||
alloc_sz, rte_mem_page_size());
|
||||
if (port_base == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Lock the page in memory */
|
||||
ret = rte_mem_lock_page(port_base);
|
||||
if (ret < 0) {
|
||||
DLB_LOG_ERR("dlb pf pmd could not lock page for device i/o\n");
|
||||
goto create_port_err;
|
||||
}
|
||||
|
||||
memset(port_base, 0, alloc_sz);
|
||||
cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));
|
||||
|
||||
ret = dlb_hw_create_ldb_port(&dlb_dev->hw,
|
||||
handle->domain_id,
|
||||
cfg,
|
||||
pc_dma_base,
|
||||
cq_dma_base,
|
||||
&response);
|
||||
if (ret)
|
||||
goto create_port_err;
|
||||
|
||||
pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
|
||||
dlb_port[response.id][DLB_LDB].pp_addr =
|
||||
(void *)(uintptr_t)(pp_dma_base +
|
||||
(rte_mem_page_size() * response.id));
|
||||
|
||||
dlb_port[response.id][DLB_LDB].cq_base =
|
||||
(void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
|
||||
|
||||
dlb_port[response.id][DLB_LDB].ldb_popcount =
|
||||
(void *)(uintptr_t)port_base;
|
||||
dlb_port[response.id][DLB_LDB].dir_popcount = (void *)(uintptr_t)
|
||||
(port_base + RTE_CACHE_LINE_SIZE);
|
||||
dlb_port[response.id][DLB_LDB].mz = mz;
|
||||
|
||||
*(struct dlb_cmd_response *)cfg->response = response;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
|
||||
return 0;
|
||||
|
||||
create_port_err:
|
||||
|
||||
rte_memzone_free(mz);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_dir_port_create(struct dlb_hw_dev *handle,
|
||||
struct dlb_create_dir_port_args *cfg,
|
||||
enum dlb_cq_poll_modes poll_mode)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
|
||||
struct dlb_cmd_response response = {0};
|
||||
int ret;
|
||||
uint8_t *port_base;
|
||||
const struct rte_memzone *mz;
|
||||
int alloc_sz, qe_sz;
|
||||
rte_iova_t pp_dma_base;
|
||||
rte_iova_t pc_dma_base;
|
||||
rte_iova_t cq_dma_base;
|
||||
int is_dir = true;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
|
||||
|
||||
if (poll_mode == DLB_CQ_POLL_MODE_STD)
|
||||
qe_sz = sizeof(struct dlb_dequeue_qe);
|
||||
else
|
||||
qe_sz = RTE_CACHE_LINE_SIZE;
|
||||
|
||||
/* Calculate the port memory required, including two cache lines for
|
||||
* credit pop counts. Round up to the nearest cache line.
|
||||
*/
|
||||
alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cfg->cq_depth * qe_sz;
|
||||
alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
|
||||
|
||||
port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
|
||||
alloc_sz, rte_mem_page_size());
|
||||
if (port_base == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Lock the page in memory */
|
||||
ret = rte_mem_lock_page(port_base);
|
||||
if (ret < 0) {
|
||||
DLB_LOG_ERR("dlb pf pmd could not lock page for device i/o\n");
|
||||
goto create_port_err;
|
||||
}
|
||||
|
||||
memset(port_base, 0, alloc_sz);
|
||||
cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));
|
||||
|
||||
ret = dlb_hw_create_dir_port(&dlb_dev->hw,
|
||||
handle->domain_id,
|
||||
cfg,
|
||||
pc_dma_base,
|
||||
cq_dma_base,
|
||||
&response);
|
||||
if (ret)
|
||||
goto create_port_err;
|
||||
|
||||
pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
|
||||
dlb_port[response.id][DLB_DIR].pp_addr =
|
||||
(void *)(uintptr_t)(pp_dma_base +
|
||||
(rte_mem_page_size() * response.id));
|
||||
|
||||
dlb_port[response.id][DLB_DIR].cq_base =
|
||||
(void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
|
||||
|
||||
dlb_port[response.id][DLB_DIR].ldb_popcount =
|
||||
(void *)(uintptr_t)port_base;
|
||||
dlb_port[response.id][DLB_DIR].dir_popcount = (void *)(uintptr_t)
|
||||
(port_base + RTE_CACHE_LINE_SIZE);
|
||||
dlb_port[response.id][DLB_DIR].mz = mz;
|
||||
|
||||
*(struct dlb_cmd_response *)cfg->response = response;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
|
||||
return 0;
|
||||
|
||||
create_port_err:
|
||||
|
||||
rte_memzone_free(mz);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_get_sn_allocation(struct dlb_hw_dev *handle,
|
||||
struct dlb_get_sn_allocation_args *args)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
|
||||
struct dlb_cmd_response response = {0};
|
||||
int ret;
|
||||
|
||||
ret = dlb_get_group_sequence_numbers(&dlb_dev->hw, args->group);
|
||||
|
||||
response.id = ret;
|
||||
response.status = 0;
|
||||
|
||||
*(struct dlb_cmd_response *)args->response = response;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_set_sn_allocation(struct dlb_hw_dev *handle,
|
||||
struct dlb_set_sn_allocation_args *args)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
|
||||
struct dlb_cmd_response response = {0};
|
||||
int ret;
|
||||
|
||||
ret = dlb_set_group_sequence_numbers(&dlb_dev->hw, args->group,
|
||||
args->num);
|
||||
|
||||
response.status = 0;
|
||||
|
||||
*(struct dlb_cmd_response *)args->response = response;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_get_sn_occupancy(struct dlb_hw_dev *handle,
|
||||
struct dlb_get_sn_occupancy_args *args)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
|
||||
struct dlb_cmd_response response = {0};
|
||||
int ret;
|
||||
|
||||
ret = dlb_get_group_sequence_number_occupancy(&dlb_dev->hw,
|
||||
args->group);
|
||||
|
||||
response.id = ret;
|
||||
response.status = 0;
|
||||
|
||||
*(struct dlb_cmd_response *)args->response = response;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_sched_domain_start(struct dlb_hw_dev *handle,
|
||||
struct dlb_start_domain_args *cfg)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
|
||||
struct dlb_cmd_response response = {0};
|
||||
int ret;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
|
||||
|
||||
ret = dlb_hw_start_domain(&dlb_dev->hw,
|
||||
handle->domain_id,
|
||||
cfg,
|
||||
&response);
|
||||
|
||||
*(struct dlb_cmd_response *)cfg->response = response;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_pending_port_unmaps(struct dlb_hw_dev *handle,
|
||||
struct dlb_pending_port_unmaps_args *args)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
|
||||
struct dlb_cmd_response response = {0};
|
||||
int ret;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
|
||||
|
||||
ret = dlb_hw_pending_port_unmaps(&dlb_dev->hw,
|
||||
handle->domain_id,
|
||||
args,
|
||||
&response);
|
||||
|
||||
*(struct dlb_cmd_response *)args->response = response;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_map_qid(struct dlb_hw_dev *handle,
|
||||
struct dlb_map_qid_args *cfg)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
|
||||
struct dlb_cmd_response response = {0};
|
||||
int ret;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
|
||||
|
||||
ret = dlb_hw_map_qid(&dlb_dev->hw,
|
||||
handle->domain_id,
|
||||
cfg,
|
||||
&response);
|
||||
|
||||
*(struct dlb_cmd_response *)cfg->response = response;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_unmap_qid(struct dlb_hw_dev *handle,
|
||||
struct dlb_unmap_qid_args *cfg)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
|
||||
struct dlb_cmd_response response = {0};
|
||||
int ret;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
|
||||
|
||||
ret = dlb_hw_unmap_qid(&dlb_dev->hw,
|
||||
handle->domain_id,
|
||||
cfg,
|
||||
&response);
|
||||
|
||||
*(struct dlb_cmd_response *)cfg->response = response;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_get_ldb_queue_depth(struct dlb_hw_dev *handle,
|
||||
struct dlb_get_ldb_queue_depth_args *args)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
|
||||
struct dlb_cmd_response response = {0};
|
||||
int ret;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
|
||||
|
||||
ret = dlb_hw_get_ldb_queue_depth(&dlb_dev->hw,
|
||||
handle->domain_id,
|
||||
args,
|
||||
&response);
|
||||
|
||||
*(struct dlb_cmd_response *)args->response = response;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
dlb_pf_get_dir_queue_depth(struct dlb_hw_dev *handle,
|
||||
struct dlb_get_dir_queue_depth_args *args)
|
||||
{
|
||||
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
|
||||
struct dlb_cmd_response response = {0};
|
||||
int ret = 0;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
|
||||
|
||||
ret = dlb_hw_get_dir_queue_depth(&dlb_dev->hw,
|
||||
handle->domain_id,
|
||||
args,
|
||||
&response);
|
||||
|
||||
*(struct dlb_cmd_response *)args->response = response;
|
||||
|
||||
DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
dlb_pf_iface_fn_ptrs_init(void)
|
||||
{
|
||||
dlb_iface_low_level_io_init = dlb_pf_low_level_io_init;
|
||||
dlb_iface_open = dlb_pf_open;
|
||||
dlb_iface_domain_close = dlb_pf_domain_close;
|
||||
dlb_iface_get_device_version = dlb_pf_get_device_version;
|
||||
dlb_iface_get_num_resources = dlb_pf_get_num_resources;
|
||||
dlb_iface_sched_domain_create = dlb_pf_sched_domain_create;
|
||||
dlb_iface_ldb_credit_pool_create = dlb_pf_ldb_credit_pool_create;
|
||||
dlb_iface_dir_credit_pool_create = dlb_pf_dir_credit_pool_create;
|
||||
dlb_iface_ldb_queue_create = dlb_pf_ldb_queue_create;
|
||||
dlb_iface_dir_queue_create = dlb_pf_dir_queue_create;
|
||||
dlb_iface_ldb_port_create = dlb_pf_ldb_port_create;
|
||||
dlb_iface_dir_port_create = dlb_pf_dir_port_create;
|
||||
dlb_iface_map_qid = dlb_pf_map_qid;
|
||||
dlb_iface_unmap_qid = dlb_pf_unmap_qid;
|
||||
dlb_iface_sched_domain_start = dlb_pf_sched_domain_start;
|
||||
dlb_iface_pending_port_unmaps = dlb_pf_pending_port_unmaps;
|
||||
dlb_iface_get_ldb_queue_depth = dlb_pf_get_ldb_queue_depth;
|
||||
dlb_iface_get_dir_queue_depth = dlb_pf_get_dir_queue_depth;
|
||||
dlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode;
|
||||
dlb_iface_get_sn_allocation = dlb_pf_get_sn_allocation;
|
||||
dlb_iface_set_sn_allocation = dlb_pf_set_sn_allocation;
|
||||
dlb_iface_get_sn_occupancy = dlb_pf_get_sn_occupancy;
|
||||
|
||||
}
|
||||
|
||||
/* PCI DEV HOOKS */
|
||||
static int
|
||||
dlb_eventdev_pci_init(struct rte_eventdev *eventdev)
|
||||
{
|
||||
int ret = 0;
|
||||
struct rte_pci_device *pci_dev;
|
||||
struct dlb_devargs dlb_args = {
|
||||
.socket_id = rte_socket_id(),
|
||||
.max_num_events = DLB_MAX_NUM_LDB_CREDITS,
|
||||
.num_dir_credits_override = -1,
|
||||
.defer_sched = 0,
|
||||
.num_atm_inflights = DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE,
|
||||
};
|
||||
struct dlb_eventdev *dlb;
|
||||
|
||||
DLB_LOG_DBG("Enter with dev_id=%d socket_id=%d",
|
||||
eventdev->data->dev_id, eventdev->data->socket_id);
|
||||
|
||||
dlb_entry_points_init(eventdev);
|
||||
|
||||
dlb_pf_iface_fn_ptrs_init();
|
||||
|
||||
pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
|
||||
|
||||
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
||||
dlb = dlb_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
|
||||
|
||||
/* Probe the DLB PF layer */
|
||||
dlb->qm_instance.pf_dev = dlb_probe(pci_dev);
|
||||
|
||||
if (dlb->qm_instance.pf_dev == NULL) {
|
||||
DLB_LOG_ERR("DLB PF Probe failed with error %d\n",
|
||||
rte_errno);
|
||||
ret = -rte_errno;
|
||||
goto dlb_probe_failed;
|
||||
}
|
||||
|
||||
/* Were we invoked with runtime parameters? */
|
||||
if (pci_dev->device.devargs) {
|
||||
ret = dlb_parse_params(pci_dev->device.devargs->args,
|
||||
pci_dev->device.devargs->name,
|
||||
&dlb_args);
|
||||
if (ret) {
|
||||
DLB_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
|
||||
ret, rte_errno);
|
||||
goto dlb_probe_failed;
|
||||
}
|
||||
}
|
||||
|
||||
ret = dlb_primary_eventdev_probe(eventdev,
|
||||
EVDEV_DLB_NAME_PMD_STR,
|
||||
&dlb_args);
|
||||
} else {
|
||||
ret = dlb_secondary_eventdev_probe(eventdev,
|
||||
EVDEV_DLB_NAME_PMD_STR);
|
||||
}
|
||||
if (ret)
|
||||
goto dlb_probe_failed;
|
||||
|
||||
DLB_LOG_INFO("DLB PF Probe success\n");
|
||||
|
||||
return 0;
|
||||
|
||||
dlb_probe_failed:
|
||||
|
||||
DLB_LOG_INFO("DLB PF Probe failed, ret=%d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define EVENTDEV_INTEL_VENDOR_ID 0x8086
|
||||
|
||||
static const struct rte_pci_id pci_id_dlb_map[] = {
|
||||
{
|
||||
RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
|
||||
DLB_PF_DEV_ID)
|
||||
},
|
||||
{
|
||||
.vendor_id = 0,
|
||||
},
|
||||
};
|
||||
|
||||
static int
|
||||
event_dlb_pci_probe(struct rte_pci_driver *pci_drv,
|
||||
struct rte_pci_device *pci_dev)
|
||||
{
|
||||
return rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
|
||||
sizeof(struct dlb_eventdev), dlb_eventdev_pci_init,
|
||||
EVDEV_DLB_NAME_PMD_STR);
|
||||
}
|
||||
|
||||
static int
|
||||
event_dlb_pci_remove(struct rte_pci_device *pci_dev)
|
||||
{
|
||||
return rte_event_pmd_pci_remove(pci_dev, NULL);
|
||||
}
|
||||
|
||||
static struct rte_pci_driver pci_eventdev_dlb_pmd = {
|
||||
.id_table = pci_id_dlb_map,
|
||||
.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
|
||||
.probe = event_dlb_pci_probe,
|
||||
.remove = event_dlb_pci_remove,
|
||||
};
|
||||
|
||||
RTE_PMD_REGISTER_PCI(event_dlb_pf, pci_eventdev_dlb_pmd);
|
||||
RTE_PMD_REGISTER_PCI_TABLE(event_dlb_pf, pci_id_dlb_map);
|
@ -1,38 +0,0 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "rte_eventdev.h"
|
||||
#include "eventdev_pmd.h"
|
||||
#include "rte_pmd_dlb.h"
|
||||
#include "dlb_priv.h"
|
||||
#include "dlb_inline_fns.h"
|
||||
|
||||
int
|
||||
rte_pmd_dlb_set_token_pop_mode(uint8_t dev_id,
|
||||
uint8_t port_id,
|
||||
enum dlb_token_pop_mode mode)
|
||||
{
|
||||
struct dlb_eventdev *dlb;
|
||||
struct rte_eventdev *dev;
|
||||
|
||||
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
||||
dev = &rte_eventdevs[dev_id];
|
||||
|
||||
dlb = dlb_pmd_priv(dev);
|
||||
|
||||
if (mode >= NUM_TOKEN_POP_MODES)
|
||||
return -EINVAL;
|
||||
|
||||
/* The event device must be configured, but not yet started */
|
||||
if (!dlb->configured || dlb->run_state != DLB_RUN_STATE_STOPPED)
|
||||
return -EINVAL;
|
||||
|
||||
/* The token pop mode must be set before configuring the port */
|
||||
if (port_id >= dlb->num_ports || dlb->ev_ports[port_id].setup_done)
|
||||
return -EINVAL;
|
||||
|
||||
dlb->ev_ports[port_id].qm_port.token_pop_mode = mode;
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,77 +0,0 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2019-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
/*!
|
||||
* @file rte_pmd_dlb.h
|
||||
*
|
||||
* @brief DLB PMD-specific functions
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _RTE_PMD_DLB_H_
|
||||
#define _RTE_PMD_DLB_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
/**
|
||||
* @warning
|
||||
* @b EXPERIMENTAL: this API may change, or be removed, without prior notice
|
||||
*
|
||||
* Selects the token pop mode for an DLB port.
|
||||
*/
|
||||
enum dlb_token_pop_mode {
|
||||
/* Pop the CQ tokens immediately after dequeueing. */
|
||||
AUTO_POP,
|
||||
/* Pop CQ tokens after (dequeue_depth - 1) events are released.
|
||||
* Supported on load-balanced ports only.
|
||||
*/
|
||||
DELAYED_POP,
|
||||
/* Pop the CQ tokens during next dequeue operation. */
|
||||
DEFERRED_POP,
|
||||
|
||||
/* NUM_TOKEN_POP_MODES must be last */
|
||||
NUM_TOKEN_POP_MODES
|
||||
};
|
||||
|
||||
/*!
|
||||
* @warning
|
||||
* @b EXPERIMENTAL: this API may change, or be removed, without prior notice
|
||||
*
|
||||
* Configure the token pop mode for an DLB port. By default, all ports use
|
||||
* AUTO_POP. This function must be called before calling rte_event_port_setup()
|
||||
* for the port, but after calling rte_event_dev_configure().
|
||||
*
|
||||
* @note
|
||||
* The defer_sched vdev arg, which configures all load-balanced ports with
|
||||
* dequeue_depth == 1 for DEFERRED_POP mode, takes precedence over this
|
||||
* function.
|
||||
*
|
||||
* @param dev_id
|
||||
* The identifier of the event device.
|
||||
* @param port_id
|
||||
* The identifier of the event port.
|
||||
* @param mode
|
||||
* The token pop mode.
|
||||
*
|
||||
* @return
|
||||
* - 0: Success
|
||||
* - EINVAL: Invalid dev_id, port_id, or mode
|
||||
* - EINVAL: The DLB is not configured, is already running, or the port is
|
||||
* already setup
|
||||
*/
|
||||
|
||||
__rte_experimental
|
||||
int
|
||||
rte_pmd_dlb_set_token_pop_mode(uint8_t dev_id,
|
||||
uint8_t port_id,
|
||||
enum dlb_token_pop_mode mode);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _RTE_PMD_DLB_H_ */
|
@ -1,9 +0,0 @@
|
||||
DPDK_21 {
|
||||
local: *;
|
||||
};
|
||||
|
||||
EXPERIMENTAL {
|
||||
global:
|
||||
|
||||
rte_pmd_dlb_set_token_pop_mode;
|
||||
};
|
@ -5,7 +5,7 @@ if is_windows
|
||||
subdir_done()
|
||||
endif
|
||||
|
||||
drivers = ['dlb', 'dlb2', 'dpaa', 'dpaa2', 'octeontx2', 'opdl', 'skeleton', 'sw',
|
||||
drivers = ['dlb2', 'dpaa', 'dpaa2', 'octeontx2', 'opdl', 'skeleton', 'sw',
|
||||
'dsw']
|
||||
if not (toolchain == 'gcc' and cc.version().version_compare('<4.8.6') and
|
||||
dpdk_conf.has('RTE_ARCH_ARM64'))
|
||||
|
Loading…
Reference in New Issue
Block a user