Add qat(4)

This provides an OpenCrypto driver for Intel QuickAssist devices.  The
driver was initially ported from NetBSD and comes with a few
improvements:
- support for GMAC/AES-GCM, AES-CTR and AES-XTS, and support for
  SHA/HMAC-authenticated encryption
- support for detaching the driver
- various bug fixes
- DH895X support

Discussed with:	jhb
MFC after:	3 days
Sponsored by:	Rubicon Communications, LLC (Netgate)
Differential Revision:	https://reviews.freebsd.org/D26963
This commit is contained in:
Mark Johnston 2020-11-05 15:55:23 +00:00
parent 2dee296a3d
commit 72143e89bb
27 changed files with 15644 additions and 0 deletions

View File

@ -431,6 +431,7 @@ MAN= aac.4 \
pty.4 \
puc.4 \
pwmc.4 \
${_qat.4} \
${_qlxge.4} \
${_qlxgb.4} \
${_qlxgbe.4} \
@ -823,6 +824,7 @@ _nvram.4= nvram.4
_ossl.4= ossl.4
_padlock.4= padlock.4
_pchtherm.4= pchtherm.4
_qat.4= qat.4
_rr232x.4= rr232x.4
_speaker.4= speaker.4
_spkr.4= spkr.4

99
share/man/man4/qat.4 Normal file
View File

@ -0,0 +1,99 @@
.\"-
.\" Copyright (c) 2020 Rubicon Communications, LLC (Netgate)
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd November 5, 2020
.Dt QAT 4
.Os
.Sh NAME
.Nm qat
.Nd Intel QuickAssist Technology (QAT) driver
.Sh SYNOPSIS
To compile this driver into the kernel,
place the following lines in your
kernel configuration file:
.Bd -ragged -offset indent
.Cd "device crypto"
.Cd "device cryptodev"
.Cd "device qat"
.Ed
.Pp
Alternatively, to load the driver as a
module at boot time, place the following lines in
.Xr loader.conf 5 :
.Bd -literal -offset indent
qat_load="YES"
qat_c2xxxfw_load="YES"
qat_c3xxxfw_load="YES"
qat_c63xfw_load="YES"
qat_d15xxfw_load="YES"
qat_dh895xcc_load="YES"
.Ed
.Sh DESCRIPTION
The
.Nm
driver implements
.Xr crypto 4
support for some of the cryptographic acceleration functions of the Intel
QuickAssist device.
The
.Nm
driver supports the QAT devices integrated with Atom C2000 and C3000 and Xeon
C620 and D-1500 chipsets, and the Intel QAT Adapter 8950.
It can accelerate AES in CBC, CTR, XTS (except for the C2000) and GCM modes,
and can perform authenticated encryption combining the CBC, CTR and XTS modes
with SHA1-HMAC and SHA2-HMAC.
The
.Nm
driver can also compute SHA1 and SHA2 digests.
.Sh SEE ALSO
.Xr crypto 4 ,
.Xr ipsec 4 ,
.Xr pci 4 ,
.Xr random 4 ,
.Xr crypto 7 ,
.Xr crypto 9
.Sh HISTORY
The
.Nm
driver first appeared in
.Fx 13.0 .
.Sh AUTHORS
The
.Nm
driver was written for
.Nx
by
.An Hikaru Abe Aq Mt hikaru@iij.ad.jp
and ported to
.Fx
by
.An Mark Johnston Aq Mt markj@FreeBSD.org .
.Sh BUGS
Some Atom C2000 QAT devices have two acceleration engines instead of one.
The
.Nm
driver currently misbehaves when both are enabled and thus does not enable
the second acceleration engine if one is present.

View File

@ -470,6 +470,10 @@ device vmd_bus # bus for VMD children
# PMC-Sierra SAS/SATA controller
device pmspcv
#
# Intel QuickAssist
device qat
#
# SafeNet crypto driver: can be moved to the MI NOTES as soon as
# it's tested on a big-endian machine

View File

@ -291,6 +291,15 @@ dev/isci/scil/scif_sas_task_request_state_handlers.c optional isci
dev/isci/scil/scif_sas_task_request_states.c optional isci
dev/isci/scil/scif_sas_timer.c optional isci
dev/itwd/itwd.c optional itwd
dev/qat/qat.c optional qat
dev/qat/qat_ae.c optional qat
dev/qat/qat_c2xxx.c optional qat
dev/qat/qat_c3xxx.c optional qat
dev/qat/qat_c62x.c optional qat
dev/qat/qat_d15xx.c optional qat
dev/qat/qat_dh895xcc.c optional qat
dev/qat/qat_hw15.c optional qat
dev/qat/qat_hw17.c optional qat
libkern/x86/crc32_sse42.c standard
#
# x86 shared code between IA32 and AMD64 architectures

2140
sys/dev/qat/qat.c Normal file

File diff suppressed because it is too large Load Diff

3456
sys/dev/qat/qat_ae.c Normal file

File diff suppressed because it is too large Load Diff

73
sys/dev/qat/qat_aevar.h Normal file
View File

@ -0,0 +1,73 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_aevar.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* $FreeBSD$ */
#ifndef _DEV_PCI_QAT_AEVAR_H_
#define _DEV_PCI_QAT_AEVAR_H_
int qat_ae_init(struct qat_softc *);
int qat_ae_start(struct qat_softc *);
void qat_ae_cluster_intr(void *);
int qat_aefw_load(struct qat_softc *);
void qat_aefw_unload(struct qat_softc *);
int qat_aefw_start(struct qat_softc *, u_char, u_int);
#endif

217
sys/dev/qat/qat_c2xxx.c Normal file
View File

@ -0,0 +1,217 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_c2xxx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2007-2013 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#if 0
__KERNEL_RCSID(0, "$NetBSD: qat_c2xxx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
#endif
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/systm.h>
#include <machine/bus.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include "qatreg.h"
#include "qat_hw15reg.h"
#include "qat_c2xxxreg.h"
#include "qatvar.h"
#include "qat_hw15var.h"
static uint32_t
qat_c2xxx_get_accel_mask(struct qat_softc *sc)
{
uint32_t fusectl;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
return ((~fusectl) & ACCEL_MASK_C2XXX);
}
static uint32_t
qat_c2xxx_get_ae_mask(struct qat_softc *sc)
{
uint32_t fusectl;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
if (fusectl & (
FUSECTL_C2XXX_PKE_DISABLE |
FUSECTL_C2XXX_ATH_DISABLE |
FUSECTL_C2XXX_CPH_DISABLE)) {
return 0;
} else {
if ((~fusectl & AE_MASK_C2XXX) == 0x3) {
/*
* With both AEs enabled we get spurious completions on
* ETR rings. Work around that for now by simply
* disabling the second AE.
*/
device_printf(sc->sc_dev, "disabling second AE\n");
fusectl |= 0x2;
}
return ((~fusectl) & AE_MASK_C2XXX);
}
}
static enum qat_sku
qat_c2xxx_get_sku(struct qat_softc *sc)
{
uint32_t fusectl;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
switch (sc->sc_ae_num) {
case 1:
if (fusectl & FUSECTL_C2XXX_LOW_SKU)
return QAT_SKU_3;
else if (fusectl & FUSECTL_C2XXX_MID_SKU)
return QAT_SKU_2;
break;
case MAX_AE_C2XXX:
return QAT_SKU_1;
}
return QAT_SKU_UNKNOWN;
}
static uint32_t
qat_c2xxx_get_accel_cap(struct qat_softc *sc)
{
return QAT_ACCEL_CAP_CRYPTO_SYMMETRIC |
QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC |
QAT_ACCEL_CAP_CIPHER |
QAT_ACCEL_CAP_AUTHENTICATION;
}
static const char *
qat_c2xxx_get_fw_uof_name(struct qat_softc *sc)
{
if (sc->sc_rev < QAT_REVID_C2XXX_B0)
return AE_FW_UOF_NAME_C2XXX_A0;
/* QAT_REVID_C2XXX_B0 and QAT_REVID_C2XXX_C0 */
return AE_FW_UOF_NAME_C2XXX_B0;
}
static void
qat_c2xxx_enable_intr(struct qat_softc *sc)
{
qat_misc_write_4(sc, EP_SMIA_C2XXX, EP_SMIA_MASK_C2XXX);
}
static void
qat_c2xxx_init_etr_intr(struct qat_softc *sc, int bank)
{
/*
* For now, all rings within the bank are setup such that the generation
* of flag interrupts will be triggered when ring leaves the empty
* state. Note that in order for the ring interrupt to generate an IRQ
* the interrupt must also be enabled for the ring.
*/
qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL,
ETR_INT_SRCSEL_MASK_0_C2XXX);
qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL_2,
ETR_INT_SRCSEL_MASK_X_C2XXX);
}
const struct qat_hw qat_hw_c2xxx = {
.qhw_sram_bar_id = BAR_SRAM_ID_C2XXX,
.qhw_misc_bar_id = BAR_PMISC_ID_C2XXX,
.qhw_etr_bar_id = BAR_ETR_ID_C2XXX,
.qhw_cap_global_offset = CAP_GLOBAL_OFFSET_C2XXX,
.qhw_ae_offset = AE_OFFSET_C2XXX,
.qhw_ae_local_offset = AE_LOCAL_OFFSET_C2XXX,
.qhw_etr_bundle_size = ETR_BUNDLE_SIZE_C2XXX,
.qhw_num_banks = ETR_MAX_BANKS_C2XXX,
.qhw_num_ap_banks = ETR_MAX_AP_BANKS_C2XXX,
.qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK,
.qhw_num_accel = MAX_ACCEL_C2XXX,
.qhw_num_engines = MAX_AE_C2XXX,
.qhw_tx_rx_gap = ETR_TX_RX_GAP_C2XXX,
.qhw_tx_rings_mask = ETR_TX_RINGS_MASK_C2XXX,
.qhw_msix_ae_vec_gap = MSIX_AE_VEC_GAP_C2XXX,
.qhw_fw_auth = false,
.qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW15,
.qhw_fw_resp_size = FW_REQ_DEFAULT_SZ_HW15,
.qhw_ring_asym_tx = 2,
.qhw_ring_asym_rx = 3,
.qhw_ring_sym_tx = 4,
.qhw_ring_sym_rx = 5,
.qhw_mof_fwname = AE_FW_MOF_NAME_C2XXX,
.qhw_mmp_fwname = AE_FW_MMP_NAME_C2XXX,
.qhw_prod_type = AE_FW_PROD_TYPE_C2XXX,
.qhw_get_accel_mask = qat_c2xxx_get_accel_mask,
.qhw_get_ae_mask = qat_c2xxx_get_ae_mask,
.qhw_get_sku = qat_c2xxx_get_sku,
.qhw_get_accel_cap = qat_c2xxx_get_accel_cap,
.qhw_get_fw_uof_name = qat_c2xxx_get_fw_uof_name,
.qhw_enable_intr = qat_c2xxx_enable_intr,
.qhw_init_etr_intr = qat_c2xxx_init_etr_intr,
.qhw_init_admin_comms = qat_adm_ring_init,
.qhw_send_admin_init = qat_adm_ring_send_init,
.qhw_crypto_setup_desc = qat_hw15_crypto_setup_desc,
.qhw_crypto_setup_req_params = qat_hw15_crypto_setup_req_params,
.qhw_crypto_opaque_offset =
offsetof(struct fw_la_resp, comn_resp.opaque_data),
};

177
sys/dev/qat/qat_c2xxxreg.h Normal file
View File

@ -0,0 +1,177 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_c2xxxreg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2007-2013 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* $FreeBSD$ */
#ifndef _DEV_PCI_QAT_C2XXXREG_H_
#define _DEV_PCI_QAT_C2XXXREG_H_
/* PCI revision IDs */
#define QAT_REVID_C2XXX_A0 0x00
#define QAT_REVID_C2XXX_B0 0x02
#define QAT_REVID_C2XXX_C0 0x03
/* Max number of accelerators and engines */
#define MAX_ACCEL_C2XXX 1
#define MAX_AE_C2XXX 2
/* PCIe BAR index */
#define BAR_SRAM_ID_C2XXX NO_PCI_REG
#define BAR_PMISC_ID_C2XXX 0
#define BAR_ETR_ID_C2XXX 1
#define ACCEL_MASK_C2XXX 0x1
#define AE_MASK_C2XXX 0x3
#define MSIX_AE_VEC_GAP_C2XXX 8
/* PCIe configuration space registers */
/* PESRAM: 512K eSRAM */
#define BAR_PESRAM_C2XXX NO_PCI_REG
#define BAR_PESRAM_SIZE_C2XXX 0
/*
* PMISC: 16K CAP, 16K Scratch, 32K SSU(QATs),
* 32K AE CSRs and transfer registers, 8K CHAP/PMU,
* 4K EP CSRs, 4K MSI-X Tables
*/
#define BAR_PMISC_C2XXX 0x18
#define BAR_PMISC_SIZE_C2XXX 0x20000 /* 128K */
/* PETRINGCSR: 8K 16 bundles of ET Ring CSRs */
#define BAR_PETRINGCSR_C2XXX 0x20
#define BAR_PETRINGCSR_SIZE_C2XXX 0x4000 /* 16K */
/* Fuse Control */
#define FUSECTL_C2XXX_PKE_DISABLE (1 << 6)
#define FUSECTL_C2XXX_ATH_DISABLE (1 << 5)
#define FUSECTL_C2XXX_CPH_DISABLE (1 << 4)
#define FUSECTL_C2XXX_LOW_SKU (1 << 3)
#define FUSECTL_C2XXX_MID_SKU (1 << 2)
#define FUSECTL_C2XXX_AE1_DISABLE (1 << 1)
/* SINT: Signal Target Raw Interrupt Register */
#define EP_SINTPF_C2XXX 0x1A024
/* SMIA: Signal Target IA Mask Register */
#define EP_SMIA_C2XXX 0x1A028
#define EP_SMIA_BUNDLES_IRQ_MASK_C2XXX 0xFF
#define EP_SMIA_AE_IRQ_MASK_C2XXX 0x10000
#define EP_SMIA_MASK_C2XXX \
(EP_SMIA_BUNDLES_IRQ_MASK_C2XXX | EP_SMIA_AE_IRQ_MASK_C2XXX)
#define EP_RIMISCCTL_C2XXX 0x1A0C4
#define EP_RIMISCCTL_MASK_C2XXX 0x40000000
#define PFCGCIOSFPRIR_REG_C2XXX 0x2C0
#define PFCGCIOSFPRIR_MASK_C2XXX 0XFFFF7FFF
/* BAR sub-regions */
#define PESRAM_BAR_C2XXX NO_PCI_REG
#define PESRAM_OFFSET_C2XXX 0x0
#define PESRAM_SIZE_C2XXX 0x0
#define CAP_GLOBAL_BAR_C2XXX BAR_PMISC_C2XXX
#define CAP_GLOBAL_OFFSET_C2XXX 0x00000
#define CAP_GLOBAL_SIZE_C2XXX 0x04000
#define CAP_HASH_OFFSET 0x900
#define SCRATCH_BAR_C2XXX NO_PCI_REG
#define SCRATCH_OFFSET_C2XXX NO_REG_OFFSET
#define SCRATCH_SIZE_C2XXX 0x0
#define SSU_BAR_C2XXX BAR_PMISC_C2XXX
#define SSU_OFFSET_C2XXX 0x08000
#define SSU_SIZE_C2XXX 0x08000
#define AE_BAR_C2XXX BAR_PMISC_C2XXX
#define AE_OFFSET_C2XXX 0x10000
#define AE_LOCAL_OFFSET_C2XXX 0x10800
#define PMU_BAR_C2XXX NO_PCI_REG
#define PMU_OFFSET_C2XXX NO_REG_OFFSET
#define PMU_SIZE_C2XXX 0x0
#define EP_BAR_C2XXX BAR_PMISC_C2XXX
#define EP_OFFSET_C2XXX 0x1A000
#define EP_SIZE_C2XXX 0x01000
#define MSIX_TAB_BAR_C2XXX NO_PCI_REG /* mapped by pci(9) */
#define MSIX_TAB_OFFSET_C2XXX 0x1B000
#define MSIX_TAB_SIZE_C2XXX 0x01000
#define PETRINGCSR_BAR_C2XXX BAR_PETRINGCSR_C2XXX
#define PETRINGCSR_OFFSET_C2XXX 0x0
#define PETRINGCSR_SIZE_C2XXX 0x0 /* use size of BAR */
/* ETR */
#define ETR_MAX_BANKS_C2XXX 8
#define ETR_MAX_ET_RINGS_C2XXX \
(ETR_MAX_BANKS_C2XXX * ETR_MAX_RINGS_PER_BANK_C2XXX)
#define ETR_MAX_AP_BANKS_C2XXX 4
#define ETR_TX_RX_GAP_C2XXX 1
#define ETR_TX_RINGS_MASK_C2XXX 0x51
#define ETR_BUNDLE_SIZE_C2XXX 0x0200
/* Initial bank Interrupt Source mask */
#define ETR_INT_SRCSEL_MASK_0_C2XXX 0x4444444CUL
#define ETR_INT_SRCSEL_MASK_X_C2XXX 0x44444444UL
/* AE firmware */
#define AE_FW_PROD_TYPE_C2XXX 0x00800000
#define AE_FW_MOF_NAME_C2XXX "mof_firmware_c2xxx"
#define AE_FW_MMP_NAME_C2XXX "mmp_firmware_c2xxx"
#define AE_FW_UOF_NAME_C2XXX_A0 "icp_qat_nae.uof"
#define AE_FW_UOF_NAME_C2XXX_B0 "icp_qat_nae_b0.uof"
#endif

298
sys/dev/qat/qat_c3xxx.c Normal file
View File

@ -0,0 +1,298 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_c3xxx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2014 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#if 0
__KERNEL_RCSID(0, "$NetBSD: qat_c3xxx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
#endif
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/systm.h>
#include <machine/bus.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include "qatreg.h"
#include "qat_hw17reg.h"
#include "qat_c3xxxreg.h"
#include "qatvar.h"
#include "qat_hw17var.h"
static uint32_t
qat_c3xxx_get_accel_mask(struct qat_softc *sc)
{
uint32_t fusectl, strap;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C3XXX, 4);
return (((~(fusectl | strap)) >> ACCEL_REG_OFFSET_C3XXX) &
ACCEL_MASK_C3XXX);
}
static uint32_t
qat_c3xxx_get_ae_mask(struct qat_softc *sc)
{
uint32_t fusectl, me_strap, me_disable, ssms_disabled;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
me_strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C3XXX, 4);
/* If SSMs are disabled, then disable the corresponding MEs */
ssms_disabled = (~qat_c3xxx_get_accel_mask(sc)) & ACCEL_MASK_C3XXX;
me_disable = 0x3;
while (ssms_disabled) {
if (ssms_disabled & 1)
me_strap |= me_disable;
ssms_disabled >>= 1;
me_disable <<= 2;
}
return (~(fusectl | me_strap)) & AE_MASK_C3XXX;
}
static enum qat_sku
qat_c3xxx_get_sku(struct qat_softc *sc)
{
switch (sc->sc_ae_num) {
case MAX_AE_C3XXX:
return QAT_SKU_4;
}
return QAT_SKU_UNKNOWN;
}
static uint32_t
qat_c3xxx_get_accel_cap(struct qat_softc *sc)
{
uint32_t cap, legfuse, strap;
legfuse = pci_read_config(sc->sc_dev, LEGFUSE_REG, 4);
strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C3XXX, 4);
cap = QAT_ACCEL_CAP_CRYPTO_SYMMETRIC +
QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC +
QAT_ACCEL_CAP_CIPHER +
QAT_ACCEL_CAP_AUTHENTICATION +
QAT_ACCEL_CAP_COMPRESSION +
QAT_ACCEL_CAP_ZUC +
QAT_ACCEL_CAP_SHA3;
if (legfuse & LEGFUSE_ACCEL_MASK_CIPHER_SLICE) {
cap &= ~QAT_ACCEL_CAP_CRYPTO_SYMMETRIC;
cap &= ~QAT_ACCEL_CAP_CIPHER;
}
if (legfuse & LEGFUSE_ACCEL_MASK_AUTH_SLICE)
cap &= ~QAT_ACCEL_CAP_AUTHENTICATION;
if (legfuse & LEGFUSE_ACCEL_MASK_PKE_SLICE)
cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
if (legfuse & LEGFUSE_ACCEL_MASK_COMPRESS_SLICE)
cap &= ~QAT_ACCEL_CAP_COMPRESSION;
if (legfuse & LEGFUSE_ACCEL_MASK_EIA3_SLICE)
cap &= ~QAT_ACCEL_CAP_ZUC;
if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_PKE_C3XXX)
cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_CY_C3XXX)
cap &= ~QAT_ACCEL_CAP_COMPRESSION;
return cap;
}
static const char *
qat_c3xxx_get_fw_uof_name(struct qat_softc *sc)
{
return AE_FW_UOF_NAME_C3XXX;
}
static void
qat_c3xxx_enable_intr(struct qat_softc *sc)
{
/* Enable bundle and misc interrupts */
qat_misc_write_4(sc, SMIAPF0_C3XXX, SMIA0_MASK_C3XXX);
qat_misc_write_4(sc, SMIAPF1_C3XXX, SMIA1_MASK_C3XXX);
}
/* Worker thread to service arbiter mappings */
static uint32_t thrd_to_arb_map[] = {
0x12222AAA, 0x11222AAA, 0x12222AAA,
0x11222AAA, 0x12222AAA, 0x11222AAA
};
static void
qat_c3xxx_get_arb_mapping(struct qat_softc *sc, const uint32_t **arb_map_config)
{
int i;
for (i = 1; i < MAX_AE_C3XXX; i++) {
if ((~sc->sc_ae_mask) & (1 << i))
thrd_to_arb_map[i] = 0;
}
*arb_map_config = thrd_to_arb_map;
}
static void
qat_c3xxx_enable_error_interrupts(struct qat_softc *sc)
{
qat_misc_write_4(sc, ERRMSK0, ERRMSK0_CERR_C3XXX); /* ME0-ME3 */
qat_misc_write_4(sc, ERRMSK1, ERRMSK1_CERR_C3XXX); /* ME4-ME5 */
qat_misc_write_4(sc, ERRMSK5, ERRMSK5_CERR_C3XXX); /* SSM2 */
/* Reset everything except VFtoPF1_16. */
qat_misc_read_write_and_4(sc, ERRMSK3, VF2PF1_16_C3XXX);
/* RI CPP bus interface error detection and reporting. */
qat_misc_write_4(sc, RICPPINTCTL_C3XXX, RICPP_EN_C3XXX);
/* TI CPP bus interface error detection and reporting. */
qat_misc_write_4(sc, TICPPINTCTL_C3XXX, TICPP_EN_C3XXX);
/* Enable CFC Error interrupts and logging. */
qat_misc_write_4(sc, CPP_CFC_ERR_CTRL_C3XXX, CPP_CFC_UE_C3XXX);
}
static void
qat_c3xxx_disable_error_interrupts(struct qat_softc *sc)
{
/* ME0-ME3 */
qat_misc_write_4(sc, ERRMSK0, ERRMSK0_UERR_C3XXX | ERRMSK0_CERR_C3XXX);
/* ME4-ME5 */
qat_misc_write_4(sc, ERRMSK1, ERRMSK1_UERR_C3XXX | ERRMSK1_CERR_C3XXX);
/* CPP Push Pull, RI, TI, SSM0-SSM1, CFC */
qat_misc_write_4(sc, ERRMSK3, ERRMSK3_UERR_C3XXX);
/* SSM2 */
qat_misc_write_4(sc, ERRMSK5, ERRMSK5_UERR_C3XXX);
}
static void
qat_c3xxx_enable_error_correction(struct qat_softc *sc)
{
u_int i, mask;
/* Enable Accel Engine error detection & correction */
for (i = 0, mask = sc->sc_ae_mask; mask; i++, mask >>= 1) {
if (!(mask & 1))
continue;
qat_misc_read_write_or_4(sc, AE_CTX_ENABLES_C3XXX(i),
ENABLE_AE_ECC_ERR_C3XXX);
qat_misc_read_write_or_4(sc, AE_MISC_CONTROL_C3XXX(i),
ENABLE_AE_ECC_PARITY_CORR_C3XXX);
}
/* Enable shared memory error detection & correction */
for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
if (!(mask & 1))
continue;
qat_misc_read_write_or_4(sc, UERRSSMSH(i), ERRSSMSH_EN_C3XXX);
qat_misc_read_write_or_4(sc, CERRSSMSH(i), ERRSSMSH_EN_C3XXX);
qat_misc_read_write_or_4(sc, PPERR(i), PPERR_EN_C3XXX);
}
qat_c3xxx_enable_error_interrupts(sc);
}
const struct qat_hw qat_hw_c3xxx = {
.qhw_sram_bar_id = BAR_SRAM_ID_C3XXX,
.qhw_misc_bar_id = BAR_PMISC_ID_C3XXX,
.qhw_etr_bar_id = BAR_ETR_ID_C3XXX,
.qhw_cap_global_offset = CAP_GLOBAL_OFFSET_C3XXX,
.qhw_ae_offset = AE_OFFSET_C3XXX,
.qhw_ae_local_offset = AE_LOCAL_OFFSET_C3XXX,
.qhw_etr_bundle_size = ETR_BUNDLE_SIZE_C3XXX,
.qhw_num_banks = ETR_MAX_BANKS_C3XXX,
.qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK,
.qhw_num_accel = MAX_ACCEL_C3XXX,
.qhw_num_engines = MAX_AE_C3XXX,
.qhw_tx_rx_gap = ETR_TX_RX_GAP_C3XXX,
.qhw_tx_rings_mask = ETR_TX_RINGS_MASK_C3XXX,
.qhw_clock_per_sec = CLOCK_PER_SEC_C3XXX,
.qhw_fw_auth = true,
.qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW17,
.qhw_fw_resp_size = FW_RESP_DEFAULT_SZ_HW17,
.qhw_ring_asym_tx = 0,
.qhw_ring_asym_rx = 8,
.qhw_ring_sym_tx = 2,
.qhw_ring_sym_rx = 10,
.qhw_mof_fwname = AE_FW_MOF_NAME_C3XXX,
.qhw_mmp_fwname = AE_FW_MMP_NAME_C3XXX,
.qhw_prod_type = AE_FW_PROD_TYPE_C3XXX,
.qhw_get_accel_mask = qat_c3xxx_get_accel_mask,
.qhw_get_ae_mask = qat_c3xxx_get_ae_mask,
.qhw_get_sku = qat_c3xxx_get_sku,
.qhw_get_accel_cap = qat_c3xxx_get_accel_cap,
.qhw_get_fw_uof_name = qat_c3xxx_get_fw_uof_name,
.qhw_enable_intr = qat_c3xxx_enable_intr,
.qhw_init_admin_comms = qat_adm_mailbox_init,
.qhw_send_admin_init = qat_adm_mailbox_send_init,
.qhw_init_arb = qat_arb_init,
.qhw_get_arb_mapping = qat_c3xxx_get_arb_mapping,
.qhw_enable_error_correction = qat_c3xxx_enable_error_correction,
.qhw_disable_error_interrupts = qat_c3xxx_disable_error_interrupts,
.qhw_set_ssm_wdtimer = qat_set_ssm_wdtimer,
.qhw_check_slice_hang = qat_check_slice_hang,
.qhw_crypto_setup_desc = qat_hw17_crypto_setup_desc,
.qhw_crypto_setup_req_params = qat_hw17_crypto_setup_req_params,
.qhw_crypto_opaque_offset = offsetof(struct fw_la_resp, opaque_data),
};

178
sys/dev/qat/qat_c3xxxreg.h Normal file
View File

@ -0,0 +1,178 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_c3xxxreg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2014 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* $FreeBSD$ */
#ifndef _DEV_PCI_QAT_C3XXXREG_H_
#define _DEV_PCI_QAT_C3XXXREG_H_
/* Max number of accelerators and engines */
#define MAX_ACCEL_C3XXX 3
#define MAX_AE_C3XXX 6
/* PCIe BAR index */
#define BAR_SRAM_ID_C3XXX NO_PCI_REG
#define BAR_PMISC_ID_C3XXX 0
#define BAR_ETR_ID_C3XXX 1
/* BAR PMISC sub-regions */
#define AE_OFFSET_C3XXX 0x20000
#define AE_LOCAL_OFFSET_C3XXX 0x20800
#define CAP_GLOBAL_OFFSET_C3XXX 0x30000
#define SOFTSTRAP_REG_C3XXX 0x2EC
#define SOFTSTRAP_SS_POWERGATE_CY_C3XXX __BIT(23)
#define SOFTSTRAP_SS_POWERGATE_PKE_C3XXX __BIT(24)
#define ACCEL_REG_OFFSET_C3XXX 16
#define ACCEL_MASK_C3XXX 0x7
#define AE_MASK_C3XXX 0x3F
#define SMIAPF0_C3XXX 0x3A028
#define SMIAPF1_C3XXX 0x3A030
#define SMIA0_MASK_C3XXX 0xFFFF
#define SMIA1_MASK_C3XXX 0x1
/* Error detection and correction */
#define AE_CTX_ENABLES_C3XXX(i) ((i) * 0x1000 + 0x20818)
#define AE_MISC_CONTROL_C3XXX(i) ((i) * 0x1000 + 0x20960)
#define ENABLE_AE_ECC_ERR_C3XXX __BIT(28)
#define ENABLE_AE_ECC_PARITY_CORR_C3XXX (__BIT(24) | __BIT(12))
#define ERRSSMSH_EN_C3XXX __BIT(3)
/* BIT(2) enables the logging of push/pull data errors. */
#define PPERR_EN_C3XXX (__BIT(2))
/* Mask for VF2PF interrupts */
#define VF2PF1_16_C3XXX (0xFFFF << 9)
#define ERRSOU3_VF2PF_C3XXX(errsou3) (((errsou3) & 0x01FFFE00) >> 9)
#define ERRMSK3_VF2PF_C3XXX(vf_mask) (((vf_mask) & 0xFFFF) << 9)
/* Masks for correctable error interrupts. */
#define ERRMSK0_CERR_C3XXX (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0))
#define ERRMSK1_CERR_C3XXX (__BIT(8) | __BIT(0))
#define ERRMSK5_CERR_C3XXX (0)
/* Masks for uncorrectable error interrupts. */
#define ERRMSK0_UERR_C3XXX (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1))
#define ERRMSK1_UERR_C3XXX (__BIT(9) | __BIT(1))
#define ERRMSK3_UERR_C3XXX (__BIT(6) | __BIT(5) | __BIT(4) | __BIT(3) | \
__BIT(2) | __BIT(0))
#define ERRMSK5_UERR_C3XXX (__BIT(16))
/* RI CPP control */
#define RICPPINTCTL_C3XXX (0x3A000 + 0x110)
/*
* BIT(2) enables error detection and reporting on the RI Parity Error.
* BIT(1) enables error detection and reporting on the RI CPP Pull interface.
* BIT(0) enables error detection and reporting on the RI CPP Push interface.
*/
#define RICPP_EN_C3XXX (__BIT(2) | __BIT(1) | __BIT(0))
/* TI CPP control */
#define TICPPINTCTL_C3XXX (0x3A400 + 0x138)
/*
* BIT(3) enables error detection and reporting on the ETR Parity Error.
* BIT(2) enables error detection and reporting on the TI Parity Error.
* BIT(1) enables error detection and reporting on the TI CPP Pull interface.
* BIT(0) enables error detection and reporting on the TI CPP Push interface.
*/
#define TICPP_EN_C3XXX \
(__BIT(3) | __BIT(2) | __BIT(1) | __BIT(0))
/* CFC Uncorrectable Errors */
#define CPP_CFC_ERR_CTRL_C3XXX (0x30000 + 0xC00)
/*
* BIT(1) enables interrupt.
* BIT(0) enables detecting and logging of push/pull data errors.
*/
#define CPP_CFC_UE_C3XXX (__BIT(1) | __BIT(0))
#define SLICEPWRDOWN_C3XXX(i) ((i) * 0x4000 + 0x2C)
/* Enabling PKE4-PKE0. */
#define MMP_PWR_UP_MSK_C3XXX \
(__BIT(20) | __BIT(19) | __BIT(18) | __BIT(17) | __BIT(16))
/* CPM Uncorrectable Errors */
#define INTMASKSSM_C3XXX(i) ((i) * 0x4000 + 0x0)
/* Disabling interrupts for correctable errors. */
#define INTMASKSSM_UERR_C3XXX \
(__BIT(11) | __BIT(9) | __BIT(7) | __BIT(5) | __BIT(3) | __BIT(1))
/* MMP */
/* BIT(3) enables correction. */
#define CERRSSMMMP_EN_C3XXX (__BIT(3))
/* BIT(3) enables logging. */
#define UERRSSMMMP_EN_C3XXX (__BIT(3))
/* ETR */
#define ETR_MAX_BANKS_C3XXX 16
#define ETR_TX_RX_GAP_C3XXX 8
#define ETR_TX_RINGS_MASK_C3XXX 0xFF
#define ETR_BUNDLE_SIZE_C3XXX 0x1000
/* AE firmware */
#define AE_FW_PROD_TYPE_C3XXX 0x02000000
#define AE_FW_MOF_NAME_C3XXX "qat_c3xxx"
#define AE_FW_MMP_NAME_C3XXX "qat_c3xxx_mmp"
#define AE_FW_UOF_NAME_C3XXX "icp_qat_ae.suof"
/* Clock frequency */
#define CLOCK_PER_SEC_C3XXX (685 * 1000000 / 16)
#endif

314
sys/dev/qat/qat_c62x.c Normal file
View File

@ -0,0 +1,314 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_c62x.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2014 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#if 0
__KERNEL_RCSID(0, "$NetBSD: qat_c62x.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
#endif
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/systm.h>
#include <machine/bus.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include "qatreg.h"
#include "qat_hw17reg.h"
#include "qat_c62xreg.h"
#include "qatvar.h"
#include "qat_hw17var.h"
static uint32_t
qat_c62x_get_accel_mask(struct qat_softc *sc)
{
uint32_t fusectl, strap;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C62X, 4);
return (((~(fusectl | strap)) >> ACCEL_REG_OFFSET_C62X) &
ACCEL_MASK_C62X);
}
static uint32_t
qat_c62x_get_ae_mask(struct qat_softc *sc)
{
uint32_t fusectl, me_strap, me_disable, ssms_disabled;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
me_strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C62X, 4);
/* If SSMs are disabled, then disable the corresponding MEs */
ssms_disabled = (~qat_c62x_get_accel_mask(sc)) & ACCEL_MASK_C62X;
me_disable = 0x3;
while (ssms_disabled) {
if (ssms_disabled & 1)
me_strap |= me_disable;
ssms_disabled >>= 1;
me_disable <<= 2;
}
return (~(fusectl | me_strap)) & AE_MASK_C62X;
}
static enum qat_sku
qat_c62x_get_sku(struct qat_softc *sc)
{
switch (sc->sc_ae_num) {
case 8:
return QAT_SKU_2;
case MAX_AE_C62X:
return QAT_SKU_4;
}
return QAT_SKU_UNKNOWN;
}
static uint32_t
qat_c62x_get_accel_cap(struct qat_softc *sc)
{
uint32_t cap, legfuse, strap;
legfuse = pci_read_config(sc->sc_dev, LEGFUSE_REG, 4);
strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C62X, 4);
cap = QAT_ACCEL_CAP_CRYPTO_SYMMETRIC +
QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC +
QAT_ACCEL_CAP_CIPHER +
QAT_ACCEL_CAP_AUTHENTICATION +
QAT_ACCEL_CAP_COMPRESSION +
QAT_ACCEL_CAP_ZUC +
QAT_ACCEL_CAP_SHA3;
if (legfuse & LEGFUSE_ACCEL_MASK_CIPHER_SLICE) {
cap &= ~QAT_ACCEL_CAP_CRYPTO_SYMMETRIC;
cap &= ~QAT_ACCEL_CAP_CIPHER;
}
if (legfuse & LEGFUSE_ACCEL_MASK_AUTH_SLICE)
cap &= ~QAT_ACCEL_CAP_AUTHENTICATION;
if (legfuse & LEGFUSE_ACCEL_MASK_PKE_SLICE)
cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
if (legfuse & LEGFUSE_ACCEL_MASK_COMPRESS_SLICE)
cap &= ~QAT_ACCEL_CAP_COMPRESSION;
if (legfuse & LEGFUSE_ACCEL_MASK_EIA3_SLICE)
cap &= ~QAT_ACCEL_CAP_ZUC;
if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_PKE_C62X)
cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_CY_C62X)
cap &= ~QAT_ACCEL_CAP_COMPRESSION;
return cap;
}
static const char *
qat_c62x_get_fw_uof_name(struct qat_softc *sc)
{
return AE_FW_UOF_NAME_C62X;
}
static void
qat_c62x_enable_intr(struct qat_softc *sc)
{
/* Enable bundle and misc interrupts */
qat_misc_write_4(sc, SMIAPF0_C62X, SMIA0_MASK_C62X);
qat_misc_write_4(sc, SMIAPF1_C62X, SMIA1_MASK_C62X);
}
/* Worker thread to service arbiter mappings */
static uint32_t thrd_to_arb_map[] = {
0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
};
static void
qat_c62x_get_arb_mapping(struct qat_softc *sc, const uint32_t **arb_map_config)
{
int i;
for (i = 1; i < MAX_AE_C62X; i++) {
if ((~sc->sc_ae_mask) & (1 << i))
thrd_to_arb_map[i] = 0;
}
*arb_map_config = thrd_to_arb_map;
}
static void
qat_c62x_enable_error_interrupts(struct qat_softc *sc)
{
qat_misc_write_4(sc, ERRMSK0, ERRMSK0_CERR_C62X); /* ME0-ME3 */
qat_misc_write_4(sc, ERRMSK1, ERRMSK1_CERR_C62X); /* ME4-ME7 */
qat_misc_write_4(sc, ERRMSK4, ERRMSK4_CERR_C62X); /* ME8-ME9 */
qat_misc_write_4(sc, ERRMSK5, ERRMSK5_CERR_C62X); /* SSM2-SSM4 */
/* Reset everything except VFtoPF1_16. */
qat_misc_read_write_and_4(sc, ERRMSK3, VF2PF1_16_C62X);
/* Disable Secure RAM correctable error interrupt */
qat_misc_read_write_or_4(sc, ERRMSK3, ERRMSK3_CERR_C62X);
/* RI CPP bus interface error detection and reporting. */
qat_misc_write_4(sc, RICPPINTCTL_C62X, RICPP_EN_C62X);
/* TI CPP bus interface error detection and reporting. */
qat_misc_write_4(sc, TICPPINTCTL_C62X, TICPP_EN_C62X);
/* Enable CFC Error interrupts and logging. */
qat_misc_write_4(sc, CPP_CFC_ERR_CTRL_C62X, CPP_CFC_UE_C62X);
/* Enable SecureRAM to fix and log Correctable errors */
qat_misc_write_4(sc, SECRAMCERR_C62X, SECRAM_CERR_C62X);
/* Enable SecureRAM Uncorrectable error interrupts and logging */
qat_misc_write_4(sc, SECRAMUERR, SECRAM_UERR_C62X);
/* Enable Push/Pull Misc Uncorrectable error interrupts and logging */
qat_misc_write_4(sc, CPPMEMTGTERR, TGT_UERR_C62X);
}
static void
qat_c62x_disable_error_interrupts(struct qat_softc *sc)
{
/* ME0-ME3 */
qat_misc_write_4(sc, ERRMSK0, ERRMSK0_UERR_C62X | ERRMSK0_CERR_C62X);
/* ME4-ME7 */
qat_misc_write_4(sc, ERRMSK1, ERRMSK1_UERR_C62X | ERRMSK1_CERR_C62X);
/* Secure RAM, CPP Push Pull, RI, TI, SSM0-SSM1, CFC */
qat_misc_write_4(sc, ERRMSK3, ERRMSK3_UERR_C62X | ERRMSK3_CERR_C62X);
/* ME8-ME9 */
qat_misc_write_4(sc, ERRMSK4, ERRMSK4_UERR_C62X | ERRMSK4_CERR_C62X);
/* SSM2-SSM4 */
qat_misc_write_4(sc, ERRMSK5, ERRMSK5_UERR_C62X | ERRMSK5_CERR_C62X);
}
static void
qat_c62x_enable_error_correction(struct qat_softc *sc)
{
u_int i, mask;
/* Enable Accel Engine error detection & correction */
for (i = 0, mask = sc->sc_ae_mask; mask; i++, mask >>= 1) {
if (!(mask & 1))
continue;
qat_misc_read_write_or_4(sc, AE_CTX_ENABLES_C62X(i),
ENABLE_AE_ECC_ERR_C62X);
qat_misc_read_write_or_4(sc, AE_MISC_CONTROL_C62X(i),
ENABLE_AE_ECC_PARITY_CORR_C62X);
}
/* Enable shared memory error detection & correction */
for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
if (!(mask & 1))
continue;
qat_misc_read_write_or_4(sc, UERRSSMSH(i), ERRSSMSH_EN_C62X);
qat_misc_read_write_or_4(sc, CERRSSMSH(i), ERRSSMSH_EN_C62X);
qat_misc_read_write_or_4(sc, PPERR(i), PPERR_EN_C62X);
}
qat_c62x_enable_error_interrupts(sc);
}
const struct qat_hw qat_hw_c62x = {
.qhw_sram_bar_id = BAR_SRAM_ID_C62X,
.qhw_misc_bar_id = BAR_PMISC_ID_C62X,
.qhw_etr_bar_id = BAR_ETR_ID_C62X,
.qhw_cap_global_offset = CAP_GLOBAL_OFFSET_C62X,
.qhw_ae_offset = AE_OFFSET_C62X,
.qhw_ae_local_offset = AE_LOCAL_OFFSET_C62X,
.qhw_etr_bundle_size = ETR_BUNDLE_SIZE_C62X,
.qhw_num_banks = ETR_MAX_BANKS_C62X,
.qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK,
.qhw_num_accel = MAX_ACCEL_C62X,
.qhw_num_engines = MAX_AE_C62X,
.qhw_tx_rx_gap = ETR_TX_RX_GAP_C62X,
.qhw_tx_rings_mask = ETR_TX_RINGS_MASK_C62X,
.qhw_clock_per_sec = CLOCK_PER_SEC_C62X,
.qhw_fw_auth = true,
.qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW17,
.qhw_fw_resp_size = FW_RESP_DEFAULT_SZ_HW17,
.qhw_ring_asym_tx = 0,
.qhw_ring_asym_rx = 8,
.qhw_ring_sym_tx = 2,
.qhw_ring_sym_rx = 10,
.qhw_mof_fwname = AE_FW_MOF_NAME_C62X,
.qhw_mmp_fwname = AE_FW_MMP_NAME_C62X,
.qhw_prod_type = AE_FW_PROD_TYPE_C62X,
.qhw_get_accel_mask = qat_c62x_get_accel_mask,
.qhw_get_ae_mask = qat_c62x_get_ae_mask,
.qhw_get_sku = qat_c62x_get_sku,
.qhw_get_accel_cap = qat_c62x_get_accel_cap,
.qhw_get_fw_uof_name = qat_c62x_get_fw_uof_name,
.qhw_enable_intr = qat_c62x_enable_intr,
.qhw_init_admin_comms = qat_adm_mailbox_init,
.qhw_send_admin_init = qat_adm_mailbox_send_init,
.qhw_init_arb = qat_arb_init,
.qhw_get_arb_mapping = qat_c62x_get_arb_mapping,
.qhw_enable_error_correction = qat_c62x_enable_error_correction,
.qhw_disable_error_interrupts = qat_c62x_disable_error_interrupts,
.qhw_set_ssm_wdtimer = qat_set_ssm_wdtimer,
.qhw_check_slice_hang = qat_check_slice_hang,
.qhw_crypto_setup_desc = qat_hw17_crypto_setup_desc,
.qhw_crypto_setup_req_params = qat_hw17_crypto_setup_req_params,
.qhw_crypto_opaque_offset = offsetof(struct fw_la_resp, opaque_data),
};

201
sys/dev/qat/qat_c62xreg.h Normal file
View File

@ -0,0 +1,201 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_c62xreg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2014 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* $FreeBSD$ */
#ifndef _DEV_PCI_QAT_C62XREG_H_
#define _DEV_PCI_QAT_C62XREG_H_
/* Max number of accelerators and engines */
#define MAX_ACCEL_C62X 5
#define MAX_AE_C62X 10
/* PCIe BAR index */
#define BAR_SRAM_ID_C62X 0
#define BAR_PMISC_ID_C62X 1
#define BAR_ETR_ID_C62X 2
/* BAR PMISC sub-regions */
#define AE_OFFSET_C62X 0x20000
#define AE_LOCAL_OFFSET_C62X 0x20800
#define CAP_GLOBAL_OFFSET_C62X 0x30000
#define SOFTSTRAP_REG_C62X 0x2EC
#define SOFTSTRAP_SS_POWERGATE_CY_C62X __BIT(23)
#define SOFTSTRAP_SS_POWERGATE_PKE_C62X __BIT(24)
#define ACCEL_REG_OFFSET_C62X 16
#define ACCEL_MASK_C62X 0x1F
#define AE_MASK_C62X 0x3FF
#define SMIAPF0_C62X 0x3A028
#define SMIAPF1_C62X 0x3A030
#define SMIA0_MASK_C62X 0xFFFF
#define SMIA1_MASK_C62X 0x1
/* Error detection and correction */
#define AE_CTX_ENABLES_C62X(i) ((i) * 0x1000 + 0x20818)
#define AE_MISC_CONTROL_C62X(i) ((i) * 0x1000 + 0x20960)
#define ENABLE_AE_ECC_ERR_C62X __BIT(28)
#define ENABLE_AE_ECC_PARITY_CORR_C62X (__BIT(24) | __BIT(12))
#define ERRSSMSH_EN_C62X __BIT(3)
/* BIT(2) enables the logging of push/pull data errors. */
#define PPERR_EN_C62X (__BIT(2))
/* Mask for VF2PF interrupts */
#define VF2PF1_16_C62X (0xFFFF << 9)
#define ERRSOU3_VF2PF_C62X(errsou3) (((errsou3) & 0x01FFFE00) >> 9)
#define ERRMSK3_VF2PF_C62X(vf_mask) (((vf_mask) & 0xFFFF) << 9)
/* Masks for correctable error interrupts. */
#define ERRMSK0_CERR_C62X (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0))
#define ERRMSK1_CERR_C62X (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0))
#define ERRMSK3_CERR_C62X (__BIT(7))
#define ERRMSK4_CERR_C62X (__BIT(8) | __BIT(0))
#define ERRMSK5_CERR_C62X (0)
/* Masks for uncorrectable error interrupts. */
#define ERRMSK0_UERR_C62X (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1))
#define ERRMSK1_UERR_C62X (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1))
#define ERRMSK3_UERR_C62X (__BIT(8) | __BIT(6) | __BIT(5) | __BIT(4) | \
__BIT(3) | __BIT(2) | __BIT(0))
#define ERRMSK4_UERR_C62X (__BIT(9) | __BIT(1))
#define ERRMSK5_UERR_C62X (__BIT(18) | __BIT(17) | __BIT(16))
/* RI CPP control */
#define RICPPINTCTL_C62X (0x3A000 + 0x110)
/*
* BIT(2) enables error detection and reporting on the RI Parity Error.
* BIT(1) enables error detection and reporting on the RI CPP Pull interface.
* BIT(0) enables error detection and reporting on the RI CPP Push interface.
*/
#define RICPP_EN_C62X (__BIT(2) | __BIT(1) | __BIT(0))
/* TI CPP control */
#define TICPPINTCTL_C62X (0x3A400 + 0x138)
/*
* BIT(3) enables error detection and reporting on the ETR Parity Error.
* BIT(2) enables error detection and reporting on the TI Parity Error.
* BIT(1) enables error detection and reporting on the TI CPP Pull interface.
* BIT(0) enables error detection and reporting on the TI CPP Push interface.
*/
#define TICPP_EN_C62X \
(__BIT(4) | __BIT(3) | __BIT(2) | __BIT(1) | __BIT(0))
/* CFC Uncorrectable Errors */
#define CPP_CFC_ERR_CTRL_C62X (0x30000 + 0xC00)
/*
* BIT(1) enables interrupt.
* BIT(0) enables detecting and logging of push/pull data errors.
*/
#define CPP_CFC_UE_C62X (__BIT(1) | __BIT(0))
/* Correctable SecureRAM Error Reg */
#define SECRAMCERR_C62X (0x3AC00 + 0x00)
/* BIT(3) enables fixing and logging of correctable errors. */
#define SECRAM_CERR_C62X (__BIT(3))
/* Uncorrectable SecureRAM Error Reg */
/*
* BIT(17) enables interrupt.
* BIT(3) enables detecting and logging of uncorrectable errors.
*/
#define SECRAM_UERR_C62X (__BIT(17) | __BIT(3))
/* Miscellaneous Memory Target Errors Register */
/*
* BIT(3) enables detecting and logging push/pull data errors.
* BIT(2) enables interrupt.
*/
#define TGT_UERR_C62X (__BIT(3) | __BIT(2))
#define SLICEPWRDOWN_C62X(i) ((i) * 0x4000 + 0x2C)
/* Enabling PKE4-PKE0. */
#define MMP_PWR_UP_MSK_C62X \
(__BIT(20) | __BIT(19) | __BIT(18) | __BIT(17) | __BIT(16))
/* CPM Uncorrectable Errors */
#define INTMASKSSM_C62X(i) ((i) * 0x4000 + 0x0)
/* Disabling interrupts for correctable errors. */
#define INTMASKSSM_UERR_C62X \
(__BIT(11) | __BIT(9) | __BIT(7) | __BIT(5) | __BIT(3) | __BIT(1))
/* MMP */
/* BIT(3) enables correction. */
#define CERRSSMMMP_EN_C62X (__BIT(3))
/* BIT(3) enables logging. */
#define UERRSSMMMP_EN_C62X (__BIT(3))
/* ETR */
#define ETR_MAX_BANKS_C62X 16
#define ETR_TX_RX_GAP_C62X 8
#define ETR_TX_RINGS_MASK_C62X 0xFF
#define ETR_BUNDLE_SIZE_C62X 0x1000
/* AE firmware */
#define AE_FW_PROD_TYPE_C62X 0x01000000
#define AE_FW_MOF_NAME_C62X "qat_c62x"
#define AE_FW_MMP_NAME_C62X "qat_c62x_mmp"
#define AE_FW_UOF_NAME_C62X "icp_qat_ae.suof"
/* Clock frequency */
#define CLOCK_PER_SEC_C62X (685 * 1000000 / 16)
#endif

314
sys/dev/qat/qat_d15xx.c Normal file
View File

@ -0,0 +1,314 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_d15xx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2014 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#if 0
__KERNEL_RCSID(0, "$NetBSD: qat_d15xx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
#endif
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/systm.h>
#include <machine/bus.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include "qatreg.h"
#include "qat_hw17reg.h"
#include "qat_d15xxreg.h"
#include "qatvar.h"
#include "qat_hw17var.h"
static uint32_t
qat_d15xx_get_accel_mask(struct qat_softc *sc)
{
uint32_t fusectl, strap;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_D15XX, 4);
return (((~(fusectl | strap)) >> ACCEL_REG_OFFSET_D15XX) &
ACCEL_MASK_D15XX);
}
static uint32_t
qat_d15xx_get_ae_mask(struct qat_softc *sc)
{
uint32_t fusectl, me_strap, me_disable, ssms_disabled;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
me_strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_D15XX, 4);
/* If SSMs are disabled, then disable the corresponding MEs */
ssms_disabled = (~qat_d15xx_get_accel_mask(sc)) & ACCEL_MASK_D15XX;
me_disable = 0x3;
while (ssms_disabled) {
if (ssms_disabled & 1)
me_strap |= me_disable;
ssms_disabled >>= 1;
me_disable <<= 2;
}
return (~(fusectl | me_strap)) & AE_MASK_D15XX;
}
static enum qat_sku
qat_d15xx_get_sku(struct qat_softc *sc)
{
switch (sc->sc_ae_num) {
case 8:
return QAT_SKU_2;
case MAX_AE_D15XX:
return QAT_SKU_4;
}
return QAT_SKU_UNKNOWN;
}
static uint32_t
qat_d15xx_get_accel_cap(struct qat_softc *sc)
{
uint32_t cap, legfuse, strap;
legfuse = pci_read_config(sc->sc_dev, LEGFUSE_REG, 4);
strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_D15XX, 4);
cap = QAT_ACCEL_CAP_CRYPTO_SYMMETRIC +
QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC +
QAT_ACCEL_CAP_CIPHER +
QAT_ACCEL_CAP_AUTHENTICATION +
QAT_ACCEL_CAP_COMPRESSION +
QAT_ACCEL_CAP_ZUC +
QAT_ACCEL_CAP_SHA3;
if (legfuse & LEGFUSE_ACCEL_MASK_CIPHER_SLICE) {
cap &= ~QAT_ACCEL_CAP_CRYPTO_SYMMETRIC;
cap &= ~QAT_ACCEL_CAP_CIPHER;
}
if (legfuse & LEGFUSE_ACCEL_MASK_AUTH_SLICE)
cap &= ~QAT_ACCEL_CAP_AUTHENTICATION;
if (legfuse & LEGFUSE_ACCEL_MASK_PKE_SLICE)
cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
if (legfuse & LEGFUSE_ACCEL_MASK_COMPRESS_SLICE)
cap &= ~QAT_ACCEL_CAP_COMPRESSION;
if (legfuse & LEGFUSE_ACCEL_MASK_EIA3_SLICE)
cap &= ~QAT_ACCEL_CAP_ZUC;
if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_PKE_D15XX)
cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_CY_D15XX)
cap &= ~QAT_ACCEL_CAP_COMPRESSION;
return cap;
}
static const char *
qat_d15xx_get_fw_uof_name(struct qat_softc *sc)
{
return AE_FW_UOF_NAME_D15XX;
}
static void
qat_d15xx_enable_intr(struct qat_softc *sc)
{
/* Enable bundle and misc interrupts */
qat_misc_write_4(sc, SMIAPF0_D15XX, SMIA0_MASK_D15XX);
qat_misc_write_4(sc, SMIAPF1_D15XX, SMIA1_MASK_D15XX);
}
/* Worker thread to service arbiter mappings */
static uint32_t thrd_to_arb_map[] = {
0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
};
static void
qat_d15xx_get_arb_mapping(struct qat_softc *sc, const uint32_t **arb_map_config)
{
int i;
for (i = 1; i < MAX_AE_D15XX; i++) {
if ((~sc->sc_ae_mask) & (1 << i))
thrd_to_arb_map[i] = 0;
}
*arb_map_config = thrd_to_arb_map;
}
static void
qat_d15xx_enable_error_interrupts(struct qat_softc *sc)
{
qat_misc_write_4(sc, ERRMSK0, ERRMSK0_CERR_D15XX); /* ME0-ME3 */
qat_misc_write_4(sc, ERRMSK1, ERRMSK1_CERR_D15XX); /* ME4-ME7 */
qat_misc_write_4(sc, ERRMSK4, ERRMSK4_CERR_D15XX); /* ME8-ME9 */
qat_misc_write_4(sc, ERRMSK5, ERRMSK5_CERR_D15XX); /* SSM2-SSM4 */
/* Reset everything except VFtoPF1_16. */
qat_misc_read_write_and_4(sc, ERRMSK3, VF2PF1_16_D15XX);
/* Disable Secure RAM correctable error interrupt */
qat_misc_read_write_or_4(sc, ERRMSK3, ERRMSK3_CERR_D15XX);
/* RI CPP bus interface error detection and reporting. */
qat_misc_write_4(sc, RICPPINTCTL_D15XX, RICPP_EN_D15XX);
/* TI CPP bus interface error detection and reporting. */
qat_misc_write_4(sc, TICPPINTCTL_D15XX, TICPP_EN_D15XX);
/* Enable CFC Error interrupts and logging. */
qat_misc_write_4(sc, CPP_CFC_ERR_CTRL_D15XX, CPP_CFC_UE_D15XX);
/* Enable SecureRAM to fix and log Correctable errors */
qat_misc_write_4(sc, SECRAMCERR_D15XX, SECRAM_CERR_D15XX);
/* Enable SecureRAM Uncorrectable error interrupts and logging */
qat_misc_write_4(sc, SECRAMUERR, SECRAM_UERR_D15XX);
/* Enable Push/Pull Misc Uncorrectable error interrupts and logging */
qat_misc_write_4(sc, CPPMEMTGTERR, TGT_UERR_D15XX);
}
static void
qat_d15xx_disable_error_interrupts(struct qat_softc *sc)
{
/* ME0-ME3 */
qat_misc_write_4(sc, ERRMSK0, ERRMSK0_UERR_D15XX | ERRMSK0_CERR_D15XX);
/* ME4-ME7 */
qat_misc_write_4(sc, ERRMSK1, ERRMSK1_UERR_D15XX | ERRMSK1_CERR_D15XX);
/* Secure RAM, CPP Push Pull, RI, TI, SSM0-SSM1, CFC */
qat_misc_write_4(sc, ERRMSK3, ERRMSK3_UERR_D15XX | ERRMSK3_CERR_D15XX);
/* ME8-ME9 */
qat_misc_write_4(sc, ERRMSK4, ERRMSK4_UERR_D15XX | ERRMSK4_CERR_D15XX);
/* SSM2-SSM4 */
qat_misc_write_4(sc, ERRMSK5, ERRMSK5_UERR_D15XX | ERRMSK5_CERR_D15XX);
}
static void
qat_d15xx_enable_error_correction(struct qat_softc *sc)
{
u_int i, mask;
/* Enable Accel Engine error detection & correction */
for (i = 0, mask = sc->sc_ae_mask; mask; i++, mask >>= 1) {
if (!(mask & 1))
continue;
qat_misc_read_write_or_4(sc, AE_CTX_ENABLES_D15XX(i),
ENABLE_AE_ECC_ERR_D15XX);
qat_misc_read_write_or_4(sc, AE_MISC_CONTROL_D15XX(i),
ENABLE_AE_ECC_PARITY_CORR_D15XX);
}
/* Enable shared memory error detection & correction */
for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
if (!(mask & 1))
continue;
qat_misc_read_write_or_4(sc, UERRSSMSH(i), ERRSSMSH_EN_D15XX);
qat_misc_read_write_or_4(sc, CERRSSMSH(i), ERRSSMSH_EN_D15XX);
qat_misc_read_write_or_4(sc, PPERR(i), PPERR_EN_D15XX);
}
qat_d15xx_enable_error_interrupts(sc);
}
const struct qat_hw qat_hw_d15xx = {
.qhw_sram_bar_id = BAR_SRAM_ID_D15XX,
.qhw_misc_bar_id = BAR_PMISC_ID_D15XX,
.qhw_etr_bar_id = BAR_ETR_ID_D15XX,
.qhw_cap_global_offset = CAP_GLOBAL_OFFSET_D15XX,
.qhw_ae_offset = AE_OFFSET_D15XX,
.qhw_ae_local_offset = AE_LOCAL_OFFSET_D15XX,
.qhw_etr_bundle_size = ETR_BUNDLE_SIZE_D15XX,
.qhw_num_banks = ETR_MAX_BANKS_D15XX,
.qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK,
.qhw_num_accel = MAX_ACCEL_D15XX,
.qhw_num_engines = MAX_AE_D15XX,
.qhw_tx_rx_gap = ETR_TX_RX_GAP_D15XX,
.qhw_tx_rings_mask = ETR_TX_RINGS_MASK_D15XX,
.qhw_clock_per_sec = CLOCK_PER_SEC_D15XX,
.qhw_fw_auth = true,
.qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW17,
.qhw_fw_resp_size = FW_RESP_DEFAULT_SZ_HW17,
.qhw_ring_asym_tx = 0,
.qhw_ring_asym_rx = 8,
.qhw_ring_sym_tx = 2,
.qhw_ring_sym_rx = 10,
.qhw_mof_fwname = AE_FW_MOF_NAME_D15XX,
.qhw_mmp_fwname = AE_FW_MMP_NAME_D15XX,
.qhw_prod_type = AE_FW_PROD_TYPE_D15XX,
.qhw_get_accel_mask = qat_d15xx_get_accel_mask,
.qhw_get_ae_mask = qat_d15xx_get_ae_mask,
.qhw_get_sku = qat_d15xx_get_sku,
.qhw_get_accel_cap = qat_d15xx_get_accel_cap,
.qhw_get_fw_uof_name = qat_d15xx_get_fw_uof_name,
.qhw_enable_intr = qat_d15xx_enable_intr,
.qhw_init_admin_comms = qat_adm_mailbox_init,
.qhw_send_admin_init = qat_adm_mailbox_send_init,
.qhw_init_arb = qat_arb_init,
.qhw_get_arb_mapping = qat_d15xx_get_arb_mapping,
.qhw_enable_error_correction = qat_d15xx_enable_error_correction,
.qhw_disable_error_interrupts = qat_d15xx_disable_error_interrupts,
.qhw_set_ssm_wdtimer = qat_set_ssm_wdtimer,
.qhw_check_slice_hang = qat_check_slice_hang,
.qhw_crypto_setup_desc = qat_hw17_crypto_setup_desc,
.qhw_crypto_setup_req_params = qat_hw17_crypto_setup_req_params,
.qhw_crypto_opaque_offset = offsetof(struct fw_la_resp, opaque_data),
};

201
sys/dev/qat/qat_d15xxreg.h Normal file
View File

@ -0,0 +1,201 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_d15xxreg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2014 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* $FreeBSD$ */
#ifndef _DEV_PCI_QAT_D15XXREG_H_
#define _DEV_PCI_QAT_D15XXREG_H_
/* Max number of accelerators and engines */
#define MAX_ACCEL_D15XX 5
#define MAX_AE_D15XX 10
/* PCIe BAR index */
#define BAR_SRAM_ID_D15XX 0
#define BAR_PMISC_ID_D15XX 1
#define BAR_ETR_ID_D15XX 2
/* BAR PMISC sub-regions */
#define AE_OFFSET_D15XX 0x20000
#define AE_LOCAL_OFFSET_D15XX 0x20800
#define CAP_GLOBAL_OFFSET_D15XX 0x30000
#define SOFTSTRAP_REG_D15XX 0x2EC
#define SOFTSTRAP_SS_POWERGATE_CY_D15XX __BIT(23)
#define SOFTSTRAP_SS_POWERGATE_PKE_D15XX __BIT(24)
#define ACCEL_REG_OFFSET_D15XX 16
#define ACCEL_MASK_D15XX 0x1F
#define AE_MASK_D15XX 0x3FF
#define SMIAPF0_D15XX 0x3A028
#define SMIAPF1_D15XX 0x3A030
#define SMIA0_MASK_D15XX 0xFFFF
#define SMIA1_MASK_D15XX 0x1
/* Error detection and correction */
#define AE_CTX_ENABLES_D15XX(i) ((i) * 0x1000 + 0x20818)
#define AE_MISC_CONTROL_D15XX(i) ((i) * 0x1000 + 0x20960)
#define ENABLE_AE_ECC_ERR_D15XX __BIT(28)
#define ENABLE_AE_ECC_PARITY_CORR_D15XX (__BIT(24) | __BIT(12))
#define ERRSSMSH_EN_D15XX __BIT(3)
/* BIT(2) enables the logging of push/pull data errors. */
#define PPERR_EN_D15XX (__BIT(2))
/* Mask for VF2PF interrupts */
#define VF2PF1_16_D15XX (0xFFFF << 9)
#define ERRSOU3_VF2PF_D15XX(errsou3) (((errsou3) & 0x01FFFE00) >> 9)
#define ERRMSK3_VF2PF_D15XX(vf_mask) (((vf_mask) & 0xFFFF) << 9)
/* Masks for correctable error interrupts. */
#define ERRMSK0_CERR_D15XX (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0))
#define ERRMSK1_CERR_D15XX (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0))
#define ERRMSK3_CERR_D15XX (__BIT(7))
#define ERRMSK4_CERR_D15XX (__BIT(8) | __BIT(0))
#define ERRMSK5_CERR_D15XX (0)
/* Masks for uncorrectable error interrupts. */
#define ERRMSK0_UERR_D15XX (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1))
#define ERRMSK1_UERR_D15XX (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1))
#define ERRMSK3_UERR_D15XX (__BIT(8) | __BIT(6) | __BIT(5) | __BIT(4) | \
__BIT(3) | __BIT(2) | __BIT(0))
#define ERRMSK4_UERR_D15XX (__BIT(9) | __BIT(1))
#define ERRMSK5_UERR_D15XX (__BIT(18) | __BIT(17) | __BIT(16))
/* RI CPP control */
#define RICPPINTCTL_D15XX (0x3A000 + 0x110)
/*
* BIT(2) enables error detection and reporting on the RI Parity Error.
* BIT(1) enables error detection and reporting on the RI CPP Pull interface.
* BIT(0) enables error detection and reporting on the RI CPP Push interface.
*/
#define RICPP_EN_D15XX (__BIT(2) | __BIT(1) | __BIT(0))
/* TI CPP control */
#define TICPPINTCTL_D15XX (0x3A400 + 0x138)
/*
* BIT(3) enables error detection and reporting on the ETR Parity Error.
* BIT(2) enables error detection and reporting on the TI Parity Error.
* BIT(1) enables error detection and reporting on the TI CPP Pull interface.
* BIT(0) enables error detection and reporting on the TI CPP Push interface.
*/
#define TICPP_EN_D15XX \
(__BIT(4) | __BIT(3) | __BIT(2) | __BIT(1) | __BIT(0))
/* CFC Uncorrectable Errors */
#define CPP_CFC_ERR_CTRL_D15XX (0x30000 + 0xC00)
/*
* BIT(1) enables interrupt.
* BIT(0) enables detecting and logging of push/pull data errors.
*/
#define CPP_CFC_UE_D15XX (__BIT(1) | __BIT(0))
/* Correctable SecureRAM Error Reg */
#define SECRAMCERR_D15XX (0x3AC00 + 0x00)
/* BIT(3) enables fixing and logging of correctable errors. */
#define SECRAM_CERR_D15XX (__BIT(3))
/* Uncorrectable SecureRAM Error Reg */
/*
* BIT(17) enables interrupt.
* BIT(3) enables detecting and logging of uncorrectable errors.
*/
#define SECRAM_UERR_D15XX (__BIT(17) | __BIT(3))
/* Miscellaneous Memory Target Errors Register */
/*
* BIT(3) enables detecting and logging push/pull data errors.
* BIT(2) enables interrupt.
*/
#define TGT_UERR_D15XX (__BIT(3) | __BIT(2))
#define SLICEPWRDOWN_D15XX(i) ((i) * 0x4000 + 0x2C)
/* Enabling PKE4-PKE0. */
#define MMP_PWR_UP_MSK_D15XX \
(__BIT(20) | __BIT(19) | __BIT(18) | __BIT(17) | __BIT(16))
/* CPM Uncorrectable Errors */
#define INTMASKSSM_D15XX(i) ((i) * 0x4000 + 0x0)
/* Disabling interrupts for correctable errors. */
#define INTMASKSSM_UERR_D15XX \
(__BIT(11) | __BIT(9) | __BIT(7) | __BIT(5) | __BIT(3) | __BIT(1))
/* MMP */
/* BIT(3) enables correction. */
#define CERRSSMMMP_EN_D15XX (__BIT(3))
/* BIT(3) enables logging. */
#define UERRSSMMMP_EN_D15XX (__BIT(3))
/* ETR */
#define ETR_MAX_BANKS_D15XX 16
#define ETR_TX_RX_GAP_D15XX 8
#define ETR_TX_RINGS_MASK_D15XX 0xFF
#define ETR_BUNDLE_SIZE_D15XX 0x1000
/* AE firmware */
#define AE_FW_PROD_TYPE_D15XX 0x01000000
#define AE_FW_MOF_NAME_D15XX "qat_d15xx"
#define AE_FW_MMP_NAME_D15XX "qat_d15xx_mmp"
#define AE_FW_UOF_NAME_D15XX "icp_qat_ae.suof"
/* Clock frequency */
#define CLOCK_PER_SEC_D15XX (685 * 1000000 / 16)
#endif

271
sys/dev/qat/qat_dh895xcc.c Normal file
View File

@ -0,0 +1,271 @@
/* SPDX-License-Identifier: BSD-2-Clause AND BSD-3-Clause */
/*
* Copyright (c) 2020 Rubicon Communications, LLC (Netgate)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Copyright(c) 2014 - 2020 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include "qatreg.h"
#include "qatvar.h"
#include "qat_hw17reg.h"
#include "qat_hw17var.h"
#include "qat_dh895xccreg.h"
static uint32_t
qat_dh895xcc_get_accel_mask(struct qat_softc *sc)
{
uint32_t fusectl, strap;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_DH895XCC, 4);
return (((~(fusectl | strap)) >> ACCEL_REG_OFFSET_DH895XCC) &
ACCEL_MASK_DH895XCC);
}
static uint32_t
qat_dh895xcc_get_ae_mask(struct qat_softc *sc)
{
uint32_t fusectl, strap;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_DH895XCC, 4);
return (~(fusectl | strap)) & AE_MASK_DH895XCC;
}
static enum qat_sku
qat_dh895xcc_get_sku(struct qat_softc *sc)
{
uint32_t fusectl, sku;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
sku = (fusectl & FUSECTL_SKU_MASK_DH895XCC) >>
FUSECTL_SKU_SHIFT_DH895XCC;
switch (sku) {
case FUSECTL_SKU_1_DH895XCC:
return QAT_SKU_1;
case FUSECTL_SKU_2_DH895XCC:
return QAT_SKU_2;
case FUSECTL_SKU_3_DH895XCC:
return QAT_SKU_3;
case FUSECTL_SKU_4_DH895XCC:
return QAT_SKU_4;
default:
return QAT_SKU_UNKNOWN;
}
}
static uint32_t
qat_dh895xcc_get_accel_cap(struct qat_softc *sc)
{
uint32_t cap, legfuse;
legfuse = pci_read_config(sc->sc_dev, LEGFUSE_REG, 4);
cap = QAT_ACCEL_CAP_CRYPTO_SYMMETRIC +
QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC +
QAT_ACCEL_CAP_CIPHER +
QAT_ACCEL_CAP_AUTHENTICATION +
QAT_ACCEL_CAP_COMPRESSION +
QAT_ACCEL_CAP_ZUC +
QAT_ACCEL_CAP_SHA3;
if (legfuse & LEGFUSE_ACCEL_MASK_CIPHER_SLICE) {
cap &= ~QAT_ACCEL_CAP_CRYPTO_SYMMETRIC;
cap &= ~QAT_ACCEL_CAP_CIPHER;
}
if (legfuse & LEGFUSE_ACCEL_MASK_AUTH_SLICE)
cap &= ~QAT_ACCEL_CAP_AUTHENTICATION;
if (legfuse & LEGFUSE_ACCEL_MASK_PKE_SLICE)
cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
if (legfuse & LEGFUSE_ACCEL_MASK_COMPRESS_SLICE)
cap &= ~QAT_ACCEL_CAP_COMPRESSION;
if (legfuse & LEGFUSE_ACCEL_MASK_EIA3_SLICE)
cap &= ~QAT_ACCEL_CAP_ZUC;
return cap;
}
static const char *
qat_dh895xcc_get_fw_uof_name(struct qat_softc *sc)
{
return AE_FW_UOF_NAME_DH895XCC;
}
static void
qat_dh895xcc_enable_intr(struct qat_softc *sc)
{
/* Enable bundle and misc interrupts */
qat_misc_write_4(sc, SMIAPF0_DH895XCC, SMIA0_MASK_DH895XCC);
qat_misc_write_4(sc, SMIAPF1_DH895XCC, SMIA1_MASK_DH895XCC);
}
/* Worker thread to service arbiter mappings based on dev SKUs */
static uint32_t thrd_to_arb_map_sku4[] = {
0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
static uint32_t thrd_to_arb_map_sku6[] = {
0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
};
static void
qat_dh895xcc_get_arb_mapping(struct qat_softc *sc,
const uint32_t **arb_map_config)
{
uint32_t *map, sku;
int i;
sku = qat_dh895xcc_get_sku(sc);
switch (sku) {
case QAT_SKU_1:
map = thrd_to_arb_map_sku4;
break;
case QAT_SKU_2:
case QAT_SKU_4:
map = thrd_to_arb_map_sku6;
break;
default:
*arb_map_config = NULL;
return;
}
for (i = 1; i < MAX_AE_DH895XCC; i++) {
if ((~sc->sc_ae_mask) & (1 << i))
map[i] = 0;
}
*arb_map_config = map;
}
static void
qat_dh895xcc_enable_error_correction(struct qat_softc *sc)
{
uint32_t mask;
u_int i;
/* Enable Accel Engine error detection & correction */
for (i = 0, mask = sc->sc_ae_mask; mask; i++, mask >>= 1) {
if (!(mask & 1))
continue;
qat_misc_read_write_or_4(sc, AE_CTX_ENABLES_DH895XCC(i),
ENABLE_AE_ECC_ERR_DH895XCC);
qat_misc_read_write_or_4(sc, AE_MISC_CONTROL_DH895XCC(i),
ENABLE_AE_ECC_PARITY_CORR_DH895XCC);
}
/* Enable shared memory error detection & correction */
for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
if (!(mask & 1))
continue;
qat_misc_read_write_or_4(sc, UERRSSMSH(i), ERRSSMSH_EN_DH895XCC);
qat_misc_read_write_or_4(sc, CERRSSMSH(i), ERRSSMSH_EN_DH895XCC);
qat_misc_read_write_or_4(sc, PPERR(i), PPERR_EN_DH895XCC);
}
}
const struct qat_hw qat_hw_dh895xcc = {
.qhw_sram_bar_id = BAR_SRAM_ID_DH895XCC,
.qhw_misc_bar_id = BAR_PMISC_ID_DH895XCC,
.qhw_etr_bar_id = BAR_ETR_ID_DH895XCC,
.qhw_cap_global_offset = CAP_GLOBAL_OFFSET_DH895XCC,
.qhw_ae_offset = AE_OFFSET_DH895XCC,
.qhw_ae_local_offset = AE_LOCAL_OFFSET_DH895XCC,
.qhw_etr_bundle_size = ETR_BUNDLE_SIZE_DH895XCC,
.qhw_num_banks = ETR_MAX_BANKS_DH895XCC,
.qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK,
.qhw_num_accel = MAX_ACCEL_DH895XCC,
.qhw_num_engines = MAX_AE_DH895XCC,
.qhw_tx_rx_gap = ETR_TX_RX_GAP_DH895XCC,
.qhw_tx_rings_mask = ETR_TX_RINGS_MASK_DH895XCC,
.qhw_clock_per_sec = CLOCK_PER_SEC_DH895XCC,
.qhw_fw_auth = false,
.qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW17,
.qhw_fw_resp_size = FW_RESP_DEFAULT_SZ_HW17,
.qhw_ring_asym_tx = 0,
.qhw_ring_asym_rx = 8,
.qhw_ring_sym_tx = 2,
.qhw_ring_sym_rx = 10,
.qhw_mof_fwname = AE_FW_MOF_NAME_DH895XCC,
.qhw_mmp_fwname = AE_FW_MMP_NAME_DH895XCC,
.qhw_prod_type = AE_FW_PROD_TYPE_DH895XCC,
.qhw_get_accel_mask = qat_dh895xcc_get_accel_mask,
.qhw_get_ae_mask = qat_dh895xcc_get_ae_mask,
.qhw_get_sku = qat_dh895xcc_get_sku,
.qhw_get_accel_cap = qat_dh895xcc_get_accel_cap,
.qhw_get_fw_uof_name = qat_dh895xcc_get_fw_uof_name,
.qhw_enable_intr = qat_dh895xcc_enable_intr,
.qhw_init_admin_comms = qat_adm_mailbox_init,
.qhw_send_admin_init = qat_adm_mailbox_send_init,
.qhw_init_arb = qat_arb_init,
.qhw_get_arb_mapping = qat_dh895xcc_get_arb_mapping,
.qhw_enable_error_correction = qat_dh895xcc_enable_error_correction,
.qhw_check_slice_hang = qat_check_slice_hang,
.qhw_crypto_setup_desc = qat_hw17_crypto_setup_desc,
.qhw_crypto_setup_req_params = qat_hw17_crypto_setup_req_params,
.qhw_crypto_opaque_offset = offsetof(struct fw_la_resp, opaque_data),
};

View File

@ -0,0 +1,119 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2014-2020 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* $FreeBSD$ */
#ifndef _DEV_PCI_QAT_DH895XCCREG_H_
#define _DEV_PCI_QAT_DH895XCCREG_H_
/* Max number of accelerators and engines */
#define MAX_ACCEL_DH895XCC 6
#define MAX_AE_DH895XCC 12
/* PCIe BAR index */
#define BAR_SRAM_ID_DH895XCC 0
#define BAR_PMISC_ID_DH895XCC 1
#define BAR_ETR_ID_DH895XCC 2
/* BAR PMISC sub-regions */
#define AE_OFFSET_DH895XCC 0x20000
#define AE_LOCAL_OFFSET_DH895XCC 0x20800
#define CAP_GLOBAL_OFFSET_DH895XCC 0x30000
#define SOFTSTRAP_REG_DH895XCC 0x2EC
#define FUSECTL_SKU_MASK_DH895XCC 0x300000
#define FUSECTL_SKU_SHIFT_DH895XCC 20
#define FUSECTL_SKU_1_DH895XCC 0
#define FUSECTL_SKU_2_DH895XCC 1
#define FUSECTL_SKU_3_DH895XCC 2
#define FUSECTL_SKU_4_DH895XCC 3
#define ACCEL_REG_OFFSET_DH895XCC 13
#define ACCEL_MASK_DH895XCC 0x3F
#define AE_MASK_DH895XCC 0xFFF
#define SMIAPF0_DH895XCC 0x3A028
#define SMIAPF1_DH895XCC 0x3A030
#define SMIA0_MASK_DH895XCC 0xFFFFFFFF
#define SMIA1_MASK_DH895XCC 0x1
/* Error detection and correction */
#define AE_CTX_ENABLES_DH895XCC(i) ((i) * 0x1000 + 0x20818)
#define AE_MISC_CONTROL_DH895XCC(i) ((i) * 0x1000 + 0x20960)
#define ENABLE_AE_ECC_ERR_DH895XCC __BIT(28)
#define ENABLE_AE_ECC_PARITY_CORR_DH895XCC (__BIT(24) | __BIT(12))
#define ERRSSMSH_EN_DH895XCC __BIT(3)
/* BIT(2) enables the logging of push/pull data errors. */
#define PPERR_EN_DH895XCC (__BIT(2))
/* ETR */
#define ETR_MAX_BANKS_DH895XCC 32
#define ETR_TX_RX_GAP_DH895XCC 8
#define ETR_TX_RINGS_MASK_DH895XCC 0xFF
#define ETR_BUNDLE_SIZE_DH895XCC 0x1000
/* AE firmware */
#define AE_FW_PROD_TYPE_DH895XCC 0x00400000
#define AE_FW_MOF_NAME_DH895XCC "qat_895xcc"
#define AE_FW_MMP_NAME_DH895XCC "qat_895xcc_mmp"
#define AE_FW_UOF_NAME_DH895XCC "icp_qat_ae.uof"
/* Clock frequency */
#define CLOCK_PER_SEC_DH895XCC (685 * 1000000 / 16)
#endif

953
sys/dev/qat/qat_hw15.c Normal file
View File

@ -0,0 +1,953 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_hw15.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2007-2013 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#if 0
__KERNEL_RCSID(0, "$NetBSD: qat_hw15.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
#endif
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/proc.h>
#include <sys/systm.h>
#include <machine/bus.h>
#include <opencrypto/xform.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include "qatreg.h"
#include "qat_hw15reg.h"
#include "qatvar.h"
#include "qat_hw15var.h"
static int qat_adm_ring_init_ring_table(struct qat_softc *);
static void qat_adm_ring_build_slice_mask(uint16_t *, uint32_t, uint32_t);
static void qat_adm_ring_build_shram_mask(uint64_t *, uint32_t, uint32_t);
static int qat_adm_ring_build_ring_table(struct qat_softc *, uint32_t);
static int qat_adm_ring_build_init_msg(struct qat_softc *,
struct fw_init_req *, enum fw_init_cmd_id, uint32_t,
struct qat_accel_init_cb *);
static int qat_adm_ring_send_init_msg_sync(struct qat_softc *,
enum fw_init_cmd_id, uint32_t);
static int qat_adm_ring_send_init_msg(struct qat_softc *,
enum fw_init_cmd_id);
static int qat_adm_ring_intr(struct qat_softc *, void *, void *);
void
qat_msg_req_type_populate(struct arch_if_req_hdr *msg, enum arch_if_req type,
uint32_t rxring)
{
memset(msg, 0, sizeof(struct arch_if_req_hdr));
msg->flags = ARCH_IF_FLAGS_VALID_FLAG |
ARCH_IF_FLAGS_RESP_RING_TYPE_ET | ARCH_IF_FLAGS_RESP_TYPE_S;
msg->req_type = type;
msg->resp_pipe_id = rxring;
}
void
qat_msg_cmn_hdr_populate(struct fw_la_bulk_req *msg, bus_addr_t desc_paddr,
uint8_t hdrsz, uint8_t hwblksz, uint16_t comn_req_flags, uint32_t flow_id)
{
struct fw_comn_req_hdr *hdr = &msg->comn_hdr;
hdr->comn_req_flags = comn_req_flags;
hdr->content_desc_params_sz = hwblksz;
hdr->content_desc_hdr_sz = hdrsz;
hdr->content_desc_addr = desc_paddr;
msg->flow_id = flow_id;
}
void
qat_msg_service_cmd_populate(struct fw_la_bulk_req *msg, enum fw_la_cmd_id cmdid,
uint16_t cmd_flags)
{
msg->comn_la_req.la_cmd_id = cmdid;
msg->comn_la_req.u.la_flags = cmd_flags;
}
void
qat_msg_cmn_mid_populate(struct fw_comn_req_mid *msg, void *cookie,
uint64_t src, uint64_t dst)
{
msg->opaque_data = (uint64_t)(uintptr_t)cookie;
msg->src_data_addr = src;
if (dst == 0)
msg->dest_data_addr = src;
else
msg->dest_data_addr = dst;
}
void
qat_msg_req_params_populate(struct fw_la_bulk_req *msg,
bus_addr_t req_params_paddr, uint8_t req_params_sz)
{
msg->req_params_addr = req_params_paddr;
msg->comn_la_req.u1.req_params_blk_sz = req_params_sz / 8;
}
void
qat_msg_cmn_footer_populate(union fw_comn_req_ftr *msg, uint64_t next_addr)
{
msg->next_request_addr = next_addr;
}
void
qat_msg_params_populate(struct fw_la_bulk_req *msg,
struct qat_crypto_desc *desc, uint8_t req_params_sz,
uint16_t service_cmd_flags, uint16_t comn_req_flags)
{
qat_msg_cmn_hdr_populate(msg, desc->qcd_desc_paddr,
desc->qcd_hdr_sz, desc->qcd_hw_blk_sz, comn_req_flags, 0);
qat_msg_service_cmd_populate(msg, desc->qcd_cmd_id, service_cmd_flags);
qat_msg_cmn_mid_populate(&msg->comn_mid, NULL, 0, 0);
qat_msg_req_params_populate(msg, 0, req_params_sz);
qat_msg_cmn_footer_populate(&msg->comn_ftr, 0);
}
static int
qat_adm_ring_init_ring_table(struct qat_softc *sc)
{
struct qat_admin_rings *qadr = &sc->sc_admin_rings;
if (sc->sc_ae_num == 1) {
qadr->qadr_cya_ring_tbl =
&qadr->qadr_master_ring_tbl[0];
qadr->qadr_srv_mask[0] = QAT_SERVICE_CRYPTO_A;
} else if (sc->sc_ae_num == 2 || sc->sc_ae_num == 4) {
qadr->qadr_cya_ring_tbl =
&qadr->qadr_master_ring_tbl[0];
qadr->qadr_srv_mask[0] = QAT_SERVICE_CRYPTO_A;
qadr->qadr_cyb_ring_tbl =
&qadr->qadr_master_ring_tbl[1];
qadr->qadr_srv_mask[1] = QAT_SERVICE_CRYPTO_B;
}
return 0;
}
int
qat_adm_ring_init(struct qat_softc *sc)
{
struct qat_admin_rings *qadr = &sc->sc_admin_rings;
int error, i, j;
error = qat_alloc_dmamem(sc, &qadr->qadr_dma, 1, PAGE_SIZE, PAGE_SIZE);
if (error)
return error;
qadr->qadr_master_ring_tbl = qadr->qadr_dma.qdm_dma_vaddr;
MPASS(sc->sc_ae_num *
sizeof(struct fw_init_ring_table) <= PAGE_SIZE);
/* Initialize the Master Ring Table */
for (i = 0; i < sc->sc_ae_num; i++) {
struct fw_init_ring_table *firt =
&qadr->qadr_master_ring_tbl[i];
for (j = 0; j < INIT_RING_TABLE_SZ; j++) {
struct fw_init_ring_params *firp =
&firt->firt_bulk_rings[j];
firp->firp_reserved = 0;
firp->firp_curr_weight = QAT_DEFAULT_RING_WEIGHT;
firp->firp_init_weight = QAT_DEFAULT_RING_WEIGHT;
firp->firp_ring_pvl = QAT_DEFAULT_PVL;
}
memset(firt->firt_ring_mask, 0, sizeof(firt->firt_ring_mask));
}
error = qat_etr_setup_ring(sc, 0, RING_NUM_ADMIN_TX,
ADMIN_RING_SIZE, sc->sc_hw.qhw_fw_req_size,
NULL, NULL, "admin_tx", &qadr->qadr_admin_tx);
if (error)
return error;
error = qat_etr_setup_ring(sc, 0, RING_NUM_ADMIN_RX,
ADMIN_RING_SIZE, sc->sc_hw.qhw_fw_resp_size,
qat_adm_ring_intr, qadr, "admin_rx", &qadr->qadr_admin_rx);
if (error)
return error;
/*
* Finally set up the service indices into the Master Ring Table
* and convenient ring table pointers for each service enabled.
* Only the Admin rings are initialized.
*/
error = qat_adm_ring_init_ring_table(sc);
if (error)
return error;
/*
* Calculate the number of active AEs per QAT
* needed for Shram partitioning.
*/
for (i = 0; i < sc->sc_ae_num; i++) {
if (qadr->qadr_srv_mask[i])
qadr->qadr_active_aes_per_accel++;
}
return 0;
}
static void
qat_adm_ring_build_slice_mask(uint16_t *slice_mask, uint32_t srv_mask,
uint32_t init_shram)
{
uint16_t shram = 0, comn_req = 0;
if (init_shram)
shram = COMN_REQ_SHRAM_INIT_REQUIRED;
if (srv_mask & QAT_SERVICE_CRYPTO_A)
comn_req |= COMN_REQ_CY0_ONLY(shram);
if (srv_mask & QAT_SERVICE_CRYPTO_B)
comn_req |= COMN_REQ_CY1_ONLY(shram);
*slice_mask = comn_req;
}
static void
qat_adm_ring_build_shram_mask(uint64_t *shram_mask, uint32_t active_aes,
uint32_t ae)
{
*shram_mask = 0;
if (active_aes == 1) {
*shram_mask = ~(*shram_mask);
} else if (active_aes == 2) {
if (ae == 1)
*shram_mask = ((~(*shram_mask)) & 0xffffffff);
else
*shram_mask = ((~(*shram_mask)) & 0xffffffff00000000ull);
} else if (active_aes == 3) {
if (ae == 0)
*shram_mask = ((~(*shram_mask)) & 0x7fffff);
else if (ae == 1)
*shram_mask = ((~(*shram_mask)) & 0x3fffff800000ull);
else
*shram_mask = ((~(*shram_mask)) & 0xffffc00000000000ull);
} else {
panic("Only three services are supported in current version");
}
}
static int
qat_adm_ring_build_ring_table(struct qat_softc *sc, uint32_t ae)
{
struct qat_admin_rings *qadr = &sc->sc_admin_rings;
struct fw_init_ring_table *tbl;
struct fw_init_ring_params *param;
uint8_t srv_mask = sc->sc_admin_rings.qadr_srv_mask[ae];
if ((srv_mask & QAT_SERVICE_CRYPTO_A)) {
tbl = qadr->qadr_cya_ring_tbl;
} else if ((srv_mask & QAT_SERVICE_CRYPTO_B)) {
tbl = qadr->qadr_cyb_ring_tbl;
} else {
device_printf(sc->sc_dev,
"Invalid execution engine %d\n", ae);
return EINVAL;
}
param = &tbl->firt_bulk_rings[sc->sc_hw.qhw_ring_sym_tx];
param->firp_curr_weight = QAT_HI_PRIO_RING_WEIGHT;
param->firp_init_weight = QAT_HI_PRIO_RING_WEIGHT;
FW_INIT_RING_MASK_SET(tbl, sc->sc_hw.qhw_ring_sym_tx);
return 0;
}
static int
qat_adm_ring_build_init_msg(struct qat_softc *sc,
struct fw_init_req *initmsg, enum fw_init_cmd_id cmd, uint32_t ae,
struct qat_accel_init_cb *cb)
{
struct fw_init_set_ae_info_hdr *aehdr;
struct fw_init_set_ae_info *aeinfo;
struct fw_init_set_ring_info_hdr *ringhdr;
struct fw_init_set_ring_info *ringinfo;
int init_shram = 0, tgt_id, cluster_id;
uint32_t srv_mask;
srv_mask = sc->sc_admin_rings.qadr_srv_mask[
ae % sc->sc_ae_num];
memset(initmsg, 0, sizeof(struct fw_init_req));
qat_msg_req_type_populate(&initmsg->comn_hdr.arch_if,
ARCH_IF_REQ_QAT_FW_INIT,
sc->sc_admin_rings.qadr_admin_rx->qr_ring_id);
qat_msg_cmn_mid_populate(&initmsg->comn_mid, cb, 0, 0);
switch (cmd) {
case FW_INIT_CMD_SET_AE_INFO:
if (ae % sc->sc_ae_num == 0)
init_shram = 1;
if (ae >= sc->sc_ae_num) {
tgt_id = 1;
cluster_id = 1;
} else {
cluster_id = 0;
if (sc->sc_ae_mask)
tgt_id = 0;
else
tgt_id = 1;
}
aehdr = &initmsg->u.set_ae_info;
aeinfo = &initmsg->u1.set_ae_info;
aehdr->init_cmd_id = cmd;
/* XXX that does not support sparse ae_mask */
aehdr->init_trgt_id = ae;
aehdr->init_ring_cluster_id = cluster_id;
aehdr->init_qat_id = tgt_id;
qat_adm_ring_build_slice_mask(&aehdr->init_slice_mask, srv_mask,
init_shram);
qat_adm_ring_build_shram_mask(&aeinfo->init_shram_mask,
sc->sc_admin_rings.qadr_active_aes_per_accel,
ae % sc->sc_ae_num);
break;
case FW_INIT_CMD_SET_RING_INFO:
ringhdr = &initmsg->u.set_ring_info;
ringinfo = &initmsg->u1.set_ring_info;
ringhdr->init_cmd_id = cmd;
/* XXX that does not support sparse ae_mask */
ringhdr->init_trgt_id = ae;
/* XXX */
qat_adm_ring_build_ring_table(sc,
ae % sc->sc_ae_num);
ringhdr->init_ring_tbl_sz = sizeof(struct fw_init_ring_table);
ringinfo->init_ring_table_ptr =
sc->sc_admin_rings.qadr_dma.qdm_dma_seg.ds_addr +
((ae % sc->sc_ae_num) *
sizeof(struct fw_init_ring_table));
break;
default:
return ENOTSUP;
}
return 0;
}
static int
qat_adm_ring_send_init_msg_sync(struct qat_softc *sc,
enum fw_init_cmd_id cmd, uint32_t ae)
{
struct fw_init_req initmsg;
struct qat_accel_init_cb cb;
int error;
error = qat_adm_ring_build_init_msg(sc, &initmsg, cmd, ae, &cb);
if (error)
return error;
error = qat_etr_put_msg(sc, sc->sc_admin_rings.qadr_admin_tx,
(uint32_t *)&initmsg);
if (error)
return error;
error = tsleep(&cb, PZERO, "qat_init", hz * 3 / 2);
if (error) {
device_printf(sc->sc_dev,
"Timed out initialization firmware: %d\n", error);
return error;
}
if (cb.qaic_status) {
device_printf(sc->sc_dev, "Failed to initialize firmware\n");
return EIO;
}
return error;
}
static int
qat_adm_ring_send_init_msg(struct qat_softc *sc,
enum fw_init_cmd_id cmd)
{
struct qat_admin_rings *qadr = &sc->sc_admin_rings;
uint32_t error, ae;
for (ae = 0; ae < sc->sc_ae_num; ae++) {
uint8_t srv_mask = qadr->qadr_srv_mask[ae];
switch (cmd) {
case FW_INIT_CMD_SET_AE_INFO:
case FW_INIT_CMD_SET_RING_INFO:
if (!srv_mask)
continue;
break;
case FW_INIT_CMD_TRNG_ENABLE:
case FW_INIT_CMD_TRNG_DISABLE:
if (!(srv_mask & QAT_SERVICE_CRYPTO_A))
continue;
break;
default:
return ENOTSUP;
}
error = qat_adm_ring_send_init_msg_sync(sc, cmd, ae);
if (error)
return error;
}
return 0;
}
int
qat_adm_ring_send_init(struct qat_softc *sc)
{
int error;
error = qat_adm_ring_send_init_msg(sc, FW_INIT_CMD_SET_AE_INFO);
if (error)
return error;
error = qat_adm_ring_send_init_msg(sc, FW_INIT_CMD_SET_RING_INFO);
if (error)
return error;
return 0;
}
static int
qat_adm_ring_intr(struct qat_softc *sc, void *arg, void *msg)
{
struct arch_if_resp_hdr *resp;
struct fw_init_resp *init_resp;
struct qat_accel_init_cb *init_cb;
int handled = 0;
resp = (struct arch_if_resp_hdr *)msg;
switch (resp->resp_type) {
case ARCH_IF_REQ_QAT_FW_INIT:
init_resp = (struct fw_init_resp *)msg;
init_cb = (struct qat_accel_init_cb *)
(uintptr_t)init_resp->comn_resp.opaque_data;
init_cb->qaic_status =
__SHIFTOUT(init_resp->comn_resp.comn_status,
COMN_RESP_INIT_ADMIN_STATUS);
wakeup(init_cb);
break;
default:
device_printf(sc->sc_dev,
"unknown resp type %d\n", resp->resp_type);
break;
}
return handled;
}
static inline uint16_t
qat_hw15_get_comn_req_flags(uint8_t ae)
{
if (ae == 0) {
return COMN_REQ_ORD_STRICT | COMN_REQ_PTR_TYPE_SGL |
COMN_REQ_AUTH0_SLICE_REQUIRED |
COMN_REQ_CIPHER0_SLICE_REQUIRED;
} else {
return COMN_REQ_ORD_STRICT | COMN_REQ_PTR_TYPE_SGL |
COMN_REQ_AUTH1_SLICE_REQUIRED |
COMN_REQ_CIPHER1_SLICE_REQUIRED;
}
}
static uint32_t
qat_hw15_crypto_setup_cipher_desc(struct qat_crypto_desc *desc,
struct qat_session *qs, struct fw_cipher_hdr *cipher_hdr,
uint32_t hw_blk_offset, enum fw_slice next_slice)
{
desc->qcd_cipher_blk_sz = HW_AES_BLK_SZ;
cipher_hdr->state_padding_sz = 0;
cipher_hdr->key_sz = qs->qs_cipher_klen / 8;
cipher_hdr->state_sz = desc->qcd_cipher_blk_sz / 8;
cipher_hdr->next_id = next_slice;
cipher_hdr->curr_id = FW_SLICE_CIPHER;
cipher_hdr->offset = hw_blk_offset / 8;
cipher_hdr->resrvd = 0;
return sizeof(struct hw_cipher_config) + qs->qs_cipher_klen;
}
static void
qat_hw15_crypto_setup_cipher_config(const struct qat_crypto_desc *desc,
const struct qat_session *qs, const struct cryptop *crp,
struct hw_cipher_config *cipher_config)
{
const uint8_t *key;
uint8_t *cipher_key;
cipher_config->val = qat_crypto_load_cipher_session(desc, qs);
cipher_config->reserved = 0;
cipher_key = (uint8_t *)(cipher_config + 1);
if (crp != NULL && crp->crp_cipher_key != NULL)
key = crp->crp_cipher_key;
else
key = qs->qs_cipher_key;
memcpy(cipher_key, key, qs->qs_cipher_klen);
}
static uint32_t
qat_hw15_crypto_setup_auth_desc(struct qat_crypto_desc *desc,
struct qat_session *qs, struct fw_auth_hdr *auth_hdr,
uint32_t ctrl_blk_offset, uint32_t hw_blk_offset,
enum fw_slice next_slice)
{
const struct qat_sym_hash_def *hash_def;
(void)qat_crypto_load_auth_session(desc, qs, &hash_def);
auth_hdr->next_id = next_slice;
auth_hdr->curr_id = FW_SLICE_AUTH;
auth_hdr->offset = hw_blk_offset / 8;
auth_hdr->resrvd = 0;
auth_hdr->hash_flags = FW_AUTH_HDR_FLAG_NO_NESTED;
auth_hdr->u.inner_prefix_sz = 0;
auth_hdr->outer_prefix_sz = 0;
auth_hdr->final_sz = hash_def->qshd_alg->qshai_digest_len;
auth_hdr->inner_state1_sz =
roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
auth_hdr->inner_res_sz = hash_def->qshd_alg->qshai_digest_len;
auth_hdr->inner_state2_sz =
roundup(hash_def->qshd_qat->qshqi_state2_len, 8);
auth_hdr->inner_state2_off = auth_hdr->offset +
((sizeof(struct hw_auth_setup) + auth_hdr->inner_state1_sz) / 8);
auth_hdr->outer_config_off = 0;
auth_hdr->outer_state1_sz = 0;
auth_hdr->outer_res_sz = 0;
auth_hdr->outer_prefix_off = 0;
desc->qcd_auth_sz = hash_def->qshd_alg->qshai_sah->hashsize;
desc->qcd_state_storage_sz = (sizeof(struct hw_auth_counter) +
roundup(hash_def->qshd_alg->qshai_state_size, 8)) / 8;
desc->qcd_gcm_aad_sz_offset1 = desc->qcd_auth_offset +
sizeof(struct hw_auth_setup) + auth_hdr->inner_state1_sz +
AES_BLOCK_LEN;
desc->qcd_gcm_aad_sz_offset2 = ctrl_blk_offset +
offsetof(struct fw_auth_hdr, u.aad_sz);
return sizeof(struct hw_auth_setup) + auth_hdr->inner_state1_sz +
auth_hdr->inner_state2_sz;
}
static void
qat_hw15_crypto_setup_auth_setup(const struct qat_crypto_desc *desc,
const struct qat_session *qs, const struct cryptop *crp,
struct hw_auth_setup *auth_setup)
{
const struct qat_sym_hash_def *hash_def;
const uint8_t *key;
uint8_t *state1, *state2;
uint32_t state_sz, state1_sz, state2_sz, state1_pad_len, state2_pad_len;
auth_setup->auth_config.config = qat_crypto_load_auth_session(desc, qs,
&hash_def);
auth_setup->auth_config.reserved = 0;
auth_setup->auth_counter.counter =
htobe32(hash_def->qshd_qat->qshqi_auth_counter);
auth_setup->auth_counter.reserved = 0;
state1 = (uint8_t *)(auth_setup + 1);
state2 = state1 + roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
switch (qs->qs_auth_algo) {
case HW_AUTH_ALGO_GALOIS_128:
qat_crypto_gmac_precompute(desc, qs->qs_cipher_key,
qs->qs_cipher_klen, hash_def, state2);
break;
case HW_AUTH_ALGO_SHA1:
state_sz = hash_def->qshd_alg->qshai_state_size;
state1_sz = roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
state2_sz = roundup(hash_def->qshd_qat->qshqi_state2_len, 8);
if (qs->qs_auth_mode == HW_AUTH_MODE1) {
state1_pad_len = state1_sz - state_sz;
state2_pad_len = state2_sz - state_sz;
if (state1_pad_len > 0)
memset(state1 + state_sz, 0, state1_pad_len);
if (state2_pad_len > 0)
memset(state2 + state_sz, 0, state2_pad_len);
}
/* FALLTHROUGH */
case HW_AUTH_ALGO_SHA256:
case HW_AUTH_ALGO_SHA384:
case HW_AUTH_ALGO_SHA512:
switch (qs->qs_auth_mode) {
case HW_AUTH_MODE0:
memcpy(state1, hash_def->qshd_alg->qshai_init_state,
state1_sz);
/* Override for mode 0 hashes. */
auth_setup->auth_counter.counter = 0;
break;
case HW_AUTH_MODE1:
if (crp != NULL && crp->crp_auth_key != NULL)
key = crp->crp_auth_key;
else
key = qs->qs_auth_key;
if (key != NULL) {
qat_crypto_hmac_precompute(desc, key,
qs->qs_auth_klen, hash_def, state1, state2);
}
break;
default:
panic("%s: unhandled auth mode %d", __func__,
qs->qs_auth_mode);
}
break;
default:
panic("%s: unhandled auth algorithm %d", __func__,
qs->qs_auth_algo);
}
}
void
qat_hw15_crypto_setup_desc(struct qat_crypto *qcy, struct qat_session *qs,
struct qat_crypto_desc *desc)
{
struct fw_cipher_hdr *cipher_hdr;
struct fw_auth_hdr *auth_hdr;
struct fw_la_bulk_req *req_cache;
struct hw_auth_setup *auth_setup;
struct hw_cipher_config *cipher_config;
uint32_t ctrl_blk_sz, ctrl_blk_offset, hw_blk_offset;
int i;
uint16_t la_cmd_flags;
uint8_t req_params_sz;
uint8_t *ctrl_blk_ptr, *hw_blk_ptr;
ctrl_blk_sz = 0;
if (qs->qs_cipher_algo != HW_CIPHER_ALGO_NULL)
ctrl_blk_sz += sizeof(struct fw_cipher_hdr);
if (qs->qs_auth_algo != HW_AUTH_ALGO_NULL)
ctrl_blk_sz += sizeof(struct fw_auth_hdr);
ctrl_blk_ptr = desc->qcd_content_desc;
ctrl_blk_offset = 0;
hw_blk_ptr = ctrl_blk_ptr + ctrl_blk_sz;
hw_blk_offset = 0;
la_cmd_flags = 0;
req_params_sz = 0;
for (i = 0; i < MAX_FW_SLICE; i++) {
switch (desc->qcd_slices[i]) {
case FW_SLICE_CIPHER:
cipher_hdr = (struct fw_cipher_hdr *)(ctrl_blk_ptr +
ctrl_blk_offset);
cipher_config = (struct hw_cipher_config *)(hw_blk_ptr +
hw_blk_offset);
desc->qcd_cipher_offset = ctrl_blk_sz + hw_blk_offset;
hw_blk_offset += qat_hw15_crypto_setup_cipher_desc(desc,
qs, cipher_hdr, hw_blk_offset,
desc->qcd_slices[i + 1]);
qat_hw15_crypto_setup_cipher_config(desc, qs, NULL,
cipher_config);
ctrl_blk_offset += sizeof(struct fw_cipher_hdr);
req_params_sz += sizeof(struct fw_la_cipher_req_params);
break;
case FW_SLICE_AUTH:
auth_hdr = (struct fw_auth_hdr *)(ctrl_blk_ptr +
ctrl_blk_offset);
auth_setup = (struct hw_auth_setup *)(hw_blk_ptr +
hw_blk_offset);
desc->qcd_auth_offset = ctrl_blk_sz + hw_blk_offset;
hw_blk_offset += qat_hw15_crypto_setup_auth_desc(desc,
qs, auth_hdr, ctrl_blk_offset, hw_blk_offset,
desc->qcd_slices[i + 1]);
qat_hw15_crypto_setup_auth_setup(desc, qs, NULL,
auth_setup);
ctrl_blk_offset += sizeof(struct fw_auth_hdr);
req_params_sz += sizeof(struct fw_la_auth_req_params);
la_cmd_flags |= LA_FLAGS_RET_AUTH_RES;
/* no digest verify */
break;
case FW_SLICE_DRAM_WR:
i = MAX_FW_SLICE; /* end of chain */
break;
default:
MPASS(0);
break;
}
}
desc->qcd_hdr_sz = ctrl_blk_offset / 8;
desc->qcd_hw_blk_sz = hw_blk_offset / 8;
req_cache = (struct fw_la_bulk_req *)desc->qcd_req_cache;
qat_msg_req_type_populate(
&req_cache->comn_hdr.arch_if,
ARCH_IF_REQ_QAT_FW_LA, 0);
if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128)
la_cmd_flags |= LA_FLAGS_PROTO_GCM | LA_FLAGS_GCM_IV_LEN_FLAG;
else
la_cmd_flags |= LA_FLAGS_PROTO_NO;
qat_msg_params_populate(req_cache, desc, req_params_sz,
la_cmd_flags, 0);
bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag,
qs->qs_desc_mem.qdm_dma_map, BUS_DMASYNC_PREWRITE);
}
static void
qat_hw15_crypto_req_setkey(const struct qat_crypto_desc *desc,
const struct qat_session *qs, struct qat_sym_cookie *qsc,
struct fw_la_bulk_req *bulk_req, struct cryptop *crp)
{
struct hw_auth_setup *auth_setup;
struct hw_cipher_config *cipher_config;
uint8_t *cdesc;
int i;
cdesc = qsc->qsc_content_desc;
memcpy(cdesc, desc->qcd_content_desc, CONTENT_DESC_MAX_SIZE);
for (i = 0; i < MAX_FW_SLICE; i++) {
switch (desc->qcd_slices[i]) {
case FW_SLICE_CIPHER:
cipher_config = (struct hw_cipher_config *)
(cdesc + desc->qcd_cipher_offset);
qat_hw15_crypto_setup_cipher_config(desc, qs, crp,
cipher_config);
break;
case FW_SLICE_AUTH:
auth_setup = (struct hw_auth_setup *)
(cdesc + desc->qcd_auth_offset);
qat_hw15_crypto_setup_auth_setup(desc, qs, crp,
auth_setup);
break;
case FW_SLICE_DRAM_WR:
i = MAX_FW_SLICE; /* end of chain */
break;
default:
MPASS(0);
}
}
bulk_req->comn_hdr.content_desc_addr = qsc->qsc_content_desc_paddr;
}
void
qat_hw15_crypto_setup_req_params(struct qat_crypto_bank *qcb,
struct qat_session *qs, struct qat_crypto_desc const *desc,
struct qat_sym_cookie *qsc, struct cryptop *crp)
{
struct qat_sym_bulk_cookie *qsbc;
struct fw_la_bulk_req *bulk_req;
struct fw_la_cipher_req_params *cipher_req;
struct fw_la_auth_req_params *auth_req;
bus_addr_t digest_paddr;
uint8_t *aad_szp2, *req_params_ptr;
uint32_t aad_sz, *aad_szp1;
enum fw_la_cmd_id cmd_id = desc->qcd_cmd_id;
enum fw_slice next_slice;
qsbc = &qsc->u.qsc_bulk_cookie;
bulk_req = (struct fw_la_bulk_req *)qsbc->qsbc_msg;
memcpy(bulk_req, &desc->qcd_req_cache, QAT_HW15_SESSION_REQ_CACHE_SIZE);
bulk_req->comn_hdr.arch_if.resp_pipe_id = qcb->qcb_sym_rx->qr_ring_id;
bulk_req->comn_hdr.comn_req_flags =
qat_hw15_get_comn_req_flags(qcb->qcb_bank % 2);
bulk_req->comn_mid.src_data_addr = qsc->qsc_buffer_list_desc_paddr;
bulk_req->comn_mid.dest_data_addr = qsc->qsc_buffer_list_desc_paddr;
bulk_req->req_params_addr = qsc->qsc_bulk_req_params_buf_paddr;
bulk_req->comn_ftr.next_request_addr = 0;
bulk_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)qsc;
if (__predict_false(crp->crp_cipher_key != NULL ||
crp->crp_auth_key != NULL)) {
qat_hw15_crypto_req_setkey(desc, qs, qsc, bulk_req, crp);
}
digest_paddr = 0;
if (desc->qcd_auth_sz != 0)
digest_paddr = qsc->qsc_auth_res_paddr;
req_params_ptr = qsbc->qsbc_req_params_buf;
memset(req_params_ptr, 0, sizeof(qsbc->qsbc_req_params_buf));
/*
* The SG list layout is a bit different for GCM and GMAC, it's simpler
* to handle those cases separately.
*/
if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
cipher_req = (struct fw_la_cipher_req_params *)req_params_ptr;
auth_req = (struct fw_la_auth_req_params *)
(req_params_ptr + sizeof(struct fw_la_cipher_req_params));
cipher_req->cipher_state_sz = desc->qcd_cipher_blk_sz / 8;
cipher_req->curr_id = FW_SLICE_CIPHER;
if (cmd_id == FW_LA_CMD_HASH_CIPHER || cmd_id == FW_LA_CMD_AUTH)
cipher_req->next_id = FW_SLICE_DRAM_WR;
else
cipher_req->next_id = FW_SLICE_AUTH;
cipher_req->state_address = qsc->qsc_iv_buf_paddr;
if (cmd_id != FW_LA_CMD_AUTH) {
/*
* Don't fill out the cipher block if we're doing GMAC
* only.
*/
cipher_req->cipher_off = 0;
cipher_req->cipher_len = crp->crp_payload_length;
}
auth_req->curr_id = FW_SLICE_AUTH;
if (cmd_id == FW_LA_CMD_HASH_CIPHER || cmd_id == FW_LA_CMD_AUTH)
auth_req->next_id = FW_SLICE_CIPHER;
else
auth_req->next_id = FW_SLICE_DRAM_WR;
auth_req->auth_res_address = digest_paddr;
auth_req->auth_res_sz = desc->qcd_auth_sz;
auth_req->auth_off = 0;
auth_req->auth_len = crp->crp_payload_length;
auth_req->hash_state_sz =
roundup2(crp->crp_aad_length, QAT_AES_GCM_AAD_ALIGN) >> 3;
auth_req->u1.aad_addr = crp->crp_aad_length > 0 ?
qsc->qsc_gcm_aad_paddr : 0;
/*
* Update the hash state block if necessary. This only occurs
* when the AAD length changes between requests in a session and
* is synchronized by qat_process().
*/
aad_sz = htobe32(crp->crp_aad_length);
aad_szp1 = (uint32_t *)(
__DECONST(uint8_t *, desc->qcd_content_desc) +
desc->qcd_gcm_aad_sz_offset1);
aad_szp2 = __DECONST(uint8_t *, desc->qcd_content_desc) +
desc->qcd_gcm_aad_sz_offset2;
if (__predict_false(*aad_szp1 != aad_sz)) {
*aad_szp1 = aad_sz;
*aad_szp2 = (uint8_t)roundup2(crp->crp_aad_length,
QAT_AES_GCM_AAD_ALIGN);
bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag,
qs->qs_desc_mem.qdm_dma_map,
BUS_DMASYNC_PREWRITE);
}
} else {
cipher_req = (struct fw_la_cipher_req_params *)req_params_ptr;
if (cmd_id != FW_LA_CMD_AUTH) {
if (cmd_id == FW_LA_CMD_CIPHER ||
cmd_id == FW_LA_CMD_HASH_CIPHER)
next_slice = FW_SLICE_DRAM_WR;
else
next_slice = FW_SLICE_AUTH;
cipher_req->cipher_state_sz =
desc->qcd_cipher_blk_sz / 8;
cipher_req->curr_id = FW_SLICE_CIPHER;
cipher_req->next_id = next_slice;
cipher_req->cipher_off = crp->crp_aad_length == 0 ? 0 :
crp->crp_payload_start - crp->crp_aad_start;
cipher_req->cipher_len = crp->crp_payload_length;
cipher_req->state_address = qsc->qsc_iv_buf_paddr;
}
if (cmd_id != FW_LA_CMD_CIPHER) {
if (cmd_id == FW_LA_CMD_AUTH)
auth_req = (struct fw_la_auth_req_params *)
req_params_ptr;
else
auth_req = (struct fw_la_auth_req_params *)
(cipher_req + 1);
if (cmd_id == FW_LA_CMD_HASH_CIPHER)
next_slice = FW_SLICE_CIPHER;
else
next_slice = FW_SLICE_DRAM_WR;
auth_req->curr_id = FW_SLICE_AUTH;
auth_req->next_id = next_slice;
auth_req->auth_res_address = digest_paddr;
auth_req->auth_res_sz = desc->qcd_auth_sz;
auth_req->auth_len =
crp->crp_payload_length + crp->crp_aad_length;
auth_req->auth_off = 0;
auth_req->hash_state_sz = 0;
auth_req->u1.prefix_addr = desc->qcd_hash_state_paddr +
desc->qcd_state_storage_sz;
}
}
}

635
sys/dev/qat/qat_hw15reg.h Normal file
View File

@ -0,0 +1,635 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_hw15reg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2007-2013 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* $FreeBSD$ */
#ifndef _DEV_PCI_QAT_HW15REG_H_
#define _DEV_PCI_QAT_HW15REG_H_
/* Default message size in bytes */
#define FW_REQ_DEFAULT_SZ_HW15 64
#define FW_RESP_DEFAULT_SZ_HW15 64
#define ADMIN_RING_SIZE 256
#define RING_NUM_ADMIN_TX 0
#define RING_NUM_ADMIN_RX 1
/* -------------------------------------------------------------------------- */
/* accel */
#define ARCH_IF_FLAGS_VALID_FLAG __BIT(7)
#define ARCH_IF_FLAGS_RESP_RING_TYPE __BITS(4, 3)
#define ARCH_IF_FLAGS_RESP_RING_TYPE_SHIFT 3
#define ARCH_IF_FLAGS_RESP_RING_TYPE_SCRATCH (0 << ARCH_IF_FLAGS_RESP_RING_TYPE_SHIFT)
#define ARCH_IF_FLAGS_RESP_RING_TYPE_NN (1 << ARCH_IF_FLAGS_RESP_RING_TYPE_SHIFT)
#define ARCH_IF_FLAGS_RESP_RING_TYPE_ET (2 << ARCH_IF_FLAGS_RESP_RING_TYPE_SHIFT)
#define ARCH_IF_FLAGS_RESP_TYPE __BITS(2, 0)
#define ARCH_IF_FLAGS_RESP_TYPE_SHIFT 0
#define ARCH_IF_FLAGS_RESP_TYPE_A (0 << ARCH_IF_FLAGS_RESP_TYPE_SHIFT)
#define ARCH_IF_FLAGS_RESP_TYPE_B (1 << ARCH_IF_FLAGS_RESP_TYPE_SHIFT)
#define ARCH_IF_FLAGS_RESP_TYPE_C (2 << ARCH_IF_FLAGS_RESP_TYPE_SHIFT)
#define ARCH_IF_FLAGS_RESP_TYPE_S (3 << ARCH_IF_FLAGS_RESP_TYPE_SHIFT)
enum arch_if_req {
ARCH_IF_REQ_NULL, /* NULL request type */
/* QAT-AE Service Request Type IDs - 01 to 20 */
ARCH_IF_REQ_QAT_FW_INIT, /* QAT-FW Initialization Request */
ARCH_IF_REQ_QAT_FW_ADMIN, /* QAT-FW Administration Request */
ARCH_IF_REQ_QAT_FW_PKE, /* QAT-FW PKE Request */
ARCH_IF_REQ_QAT_FW_LA, /* QAT-FW Lookaside Request */
ARCH_IF_REQ_QAT_FW_IPSEC, /* QAT-FW IPSec Request */
ARCH_IF_REQ_QAT_FW_SSL, /* QAT-FW SSL Request */
ARCH_IF_REQ_QAT_FW_DMA, /* QAT-FW DMA Request */
ARCH_IF_REQ_QAT_FW_STORAGE, /* QAT-FW Storage Request */
ARCH_IF_REQ_QAT_FW_COMPRESS, /* QAT-FW Compression Request */
ARCH_IF_REQ_QAT_FW_PATMATCH, /* QAT-FW Pattern Matching Request */
/* IP Service (Range Match and Exception) Blocks Request Type IDs 21 - 30 */
ARCH_IF_REQ_RM_FLOW_MISS = 21, /* RM flow miss request */
ARCH_IF_REQ_RM_FLOW_TIMER_EXP, /* RM flow timer exp Request */
ARCH_IF_REQ_IP_SERVICES_RFC_LOOKUP_UPDATE, /* RFC Lookup request */
ARCH_IF_REQ_IP_SERVICES_CONFIG_UPDATE, /* Config Update request */
ARCH_IF_REQ_IP_SERVICES_FCT_CONFIG, /* FCT Config request */
ARCH_IF_REQ_IP_SERVICES_NEXT_HOP_TIMER_EXPIRY, /* NH Timer expiry request */
ARCH_IF_REQ_IP_SERVICES_EXCEPTION, /* Exception processign request */
ARCH_IF_REQ_IP_SERVICES_STACK_DRIVER, /* Send to SD request */
ARCH_IF_REQ_IP_SERVICES_ACTION_HANDLER, /* Send to AH request */
ARCH_IF_REQ_IP_SERVICES_EVENT_HANDLER, /* Send to EH request */
ARCH_IF_REQ_DELIMITER /* End delimiter */
};
struct arch_if_req_hdr {
uint8_t resp_dest_id;
/* Opaque identifier passed from the request to response to allow
* response handler perform any further processing */
uint8_t resp_pipe_id;
/* Response pipe to write the response associated with this request to */
uint8_t req_type;
/* Definition of the service described by the request */
uint8_t flags;
/* Request and response control flags */
};
struct arch_if_resp_hdr {
uint8_t dest_id;
/* Opaque identifier passed from the request to response to allow
* response handler perform any further processing */
uint8_t serv_id;
/* Definition of the service id generating the response */
uint8_t resp_type;
/* Definition of the service described by the request */
uint8_t flags;
/* Request and response control flags */
};
struct fw_comn_req_hdr {
struct arch_if_req_hdr arch_if;
/* Common arch fields used by all ICP interface requests. Remaining
* fields are specific to the common QAT FW service. */
uint16_t comn_req_flags;
/* Flags used to describe common processing required by the request and
* the meaning of parameters in it i.e. differentiating between a buffer
* descriptor and a flat buffer pointer in the source (src) and destination
* (dest) data address fields. Full definition of the fields is given
* below */
uint8_t content_desc_params_sz;
/* Size of the content descriptor parameters in quad words. These
* parameters describe the session setup configuration info for the
* slices that this request relies upon i.e. the configuration word and
* cipher key needed by the cipher slice if there is a request for cipher
* processing. The format of the parameters are contained in icp_qat_hw.h
* and vary depending on the algorithm and mode being used. It is the
* clients responsibility to ensure this structure is correctly packed */
uint8_t content_desc_hdr_sz;
/* Size of the content descriptor header in quad words. This information
* is read into the QAT AE xfr registers */
uint64_t content_desc_addr;
/* Address of the content descriptor containing both the content header
* the size of which is defined by content_desc_hdr_sz followed by the
* content parameters whose size is described bycontent_desc_params_sz
*/
};
struct fw_comn_req_mid {
uint64_t opaque_data;
/* Opaque data passed unmodified from the request to response messages
* by firmware (fw) */
uint64_t src_data_addr;
/* Generic definition of the source data supplied to the QAT AE. The
* common flags are used to further describe the attributes of this
* field */
uint64_t dest_data_addr;
/* Generic definition of the destination data supplied to the QAT AE.
* The common flags are used to further describe the attributes of this
* field */
};
union fw_comn_req_ftr {
uint64_t next_request_addr;
/* Overloaded field, for stateful requests, this field is the pointer to
next request descriptor */
struct {
uint32_t src_length;
/* Length of source flat buffer incase src buffer type is flat */
uint32_t dst_length;
/* Length of source flat buffer incase dst buffer type is flat */
} s;
};
union fw_comn_error {
struct {
uint8_t resrvd; /* 8 bit reserved field */
uint8_t comn_err_code; /**< 8 bit common error code */
} s;
/* Structure which is used for non-compression responses */
struct {
uint8_t xlat_err_code; /* 8 bit translator error field */
uint8_t cmp_err_code; /* 8 bit compression error field */
} s1;
/* Structure which is used for compression responses */
};
struct fw_comn_resp_hdr {
struct arch_if_resp_hdr arch_if;
/* Common arch fields used by all ICP interface response messages. The
* remaining fields are specific to the QAT FW */
union fw_comn_error comn_error;
/* This field is overloaded to allow for one 8 bit common error field
* or two 8 bit error fields from compression and translator */
uint8_t comn_status;
/* Status field which specifies which slice(s) report an error */
uint8_t serv_cmd_id;
/* For services that define multiple commands this field represents the
* command. If only 1 command is supported then this field will be 0 */
uint64_t opaque_data;
/* Opaque data passed from the request to the response message */
};
#define RING_MASK_TABLE_ENTRY_LOG_SZ (5)
#define FW_INIT_RING_MASK_SET(table, id) \
table->firt_ring_mask[id >> RING_MASK_TABLE_ENTRY_LOG_SZ] =\
table->firt_ring_mask[id >> RING_MASK_TABLE_ENTRY_LOG_SZ] | \
(1 << (id & 0x1f))
struct fw_init_ring_params {
uint8_t firp_curr_weight; /* Current ring weight (working copy),
* has to be equal to init_weight */
uint8_t firp_init_weight; /* Initial ring weight: -1 ... 0
* -1 is equal to FF, -2 is equal to FE,
* the weighting uses negative logic
* where FF means poll the ring once,
* -2 is poll the ring twice,
* 0 is poll the ring 255 times */
uint8_t firp_ring_pvl; /* Ring Privilege Level. */
uint8_t firp_reserved; /* Reserved field which must be set
* to 0 by the client */
};
#define INIT_RING_TABLE_SZ 128
#define INIT_RING_TABLE_LW_SZ 4
struct fw_init_ring_table {
struct fw_init_ring_params firt_bulk_rings[INIT_RING_TABLE_SZ];
/* array of ring parameters */
uint32_t firt_ring_mask[INIT_RING_TABLE_LW_SZ];
/* Structure to hold the bit masks for
* 128 rings. */
};
struct fw_init_set_ae_info_hdr {
uint16_t init_slice_mask; /* Init time flags to set the ownership of the slices */
uint16_t resrvd; /* Reserved field and must be set to 0 by the client */
uint8_t init_qat_id; /* Init time qat id described in the request */
uint8_t init_ring_cluster_id; /* Init time ring cluster Id */
uint8_t init_trgt_id; /* Init time target AE id described in the request */
uint8_t init_cmd_id; /* Init time command that is described in the request */
};
struct fw_init_set_ae_info {
uint64_t init_shram_mask; /* Init time shram mask to set the page ownership in page pool of AE*/
uint64_t resrvd; /* Reserved field and must be set to 0 by the client */
};
struct fw_init_set_ring_info_hdr {
uint32_t resrvd; /* Reserved field and must be set to 0 by the client */
uint16_t init_ring_tbl_sz; /* Init time information to state size of the ring table */
uint8_t init_trgt_id; /* Init time target AE id described in the request */
uint8_t init_cmd_id; /* Init time command that is described in the request */
};
struct fw_init_set_ring_info {
uint64_t init_ring_table_ptr; /* Pointer to weighting information for 128 rings */
uint64_t resrvd; /* Reserved field and must be set to 0 by the client */
};
struct fw_init_trng_hdr {
uint32_t resrvd; /* Reserved field and must be set to 0 by the client */
union {
uint8_t resrvd; /* Reserved field set to if cmd type is trng disable */
uint8_t init_trng_cfg_sz; /* Size of the trng config word in QW*/
} u;
uint8_t resrvd1; /* Reserved field and must be set to 0 by the client */
uint8_t init_trgt_id; /* Init time target AE id described in the request */
uint8_t init_cmd_id; /* Init time command that is described in the request */
};
struct fw_init_trng {
union {
uint64_t resrvd; /* Reserved field set to 0 if cmd type is trng disable */
uint64_t init_trng_cfg_ptr; /* Pointer to TRNG Slice config word*/
} u;
uint64_t resrvd; /* Reserved field and must be set to 0 by the client */
};
struct fw_init_req {
struct fw_comn_req_hdr comn_hdr; /* Common request header */
union {
struct fw_init_set_ae_info_hdr set_ae_info;
/* INIT SET_AE_INFO request header structure */
struct fw_init_set_ring_info_hdr set_ring_info;
/* INIT SET_RING_INFO request header structure */
struct fw_init_trng_hdr init_trng;
/* INIT TRNG ENABLE/DISABLE request header structure */
} u;
struct fw_comn_req_mid comn_mid; /* Common request middle section */
union {
struct fw_init_set_ae_info set_ae_info;
/* INIT SET_AE_INFO request data structure */
struct fw_init_set_ring_info set_ring_info;
/* INIT SET_RING_INFO request data structure */
struct fw_init_trng init_trng;
/* INIT TRNG ENABLE/DISABLE request data structure */
} u1;
};
enum fw_init_cmd_id {
FW_INIT_CMD_SET_AE_INFO, /* Setup AE Info command type */
FW_INIT_CMD_SET_RING_INFO, /* Setup Ring Info command type */
FW_INIT_CMD_TRNG_ENABLE, /* TRNG Enable command type */
FW_INIT_CMD_TRNG_DISABLE, /* TRNG Disable command type */
FW_INIT_CMD_DELIMITER /* Delimiter type */
};
struct fw_init_resp {
struct fw_comn_resp_hdr comn_resp; /* Common interface response */
uint8_t resrvd[64 - sizeof(struct fw_comn_resp_hdr)];
/* XXX FW_RESP_DEFAULT_SZ_HW15 */
/* Reserved padding out to the default response size */
};
/* -------------------------------------------------------------------------- */
/* look aside */
#define COMN_REQ_ORD UINT16_C(0x8000)
#define COMN_REQ_ORD_SHIFT 15
#define COMN_REQ_ORD_NONE (0 << COMN_REQ_ORD_SHIFT)
#define COMN_REQ_ORD_STRICT (1 << COMN_REQ_ORD_SHIFT)
#define COMN_REQ_PTR_TYPE UINT16_C(0x4000)
#define COMN_REQ_PTR_TYPE_SHIFT 14
#define COMN_REQ_PTR_TYPE_FLAT (0 << COMN_REQ_PTR_TYPE_SHIFT)
#define COMN_REQ_PTR_TYPE_SGL (1 << COMN_REQ_PTR_TYPE_SHIFT)
#define COMN_REQ_RESERVED UINT16_C(0x2000)
#define COMN_REQ_SHRAM_INIT UINT16_C(0x1000)
#define COMN_REQ_SHRAM_INIT_SHIFT 12
#define COMN_REQ_SHRAM_INIT_REQUIRED (1 << COMN_REQ_SHRAM_INIT_SHIFT)
#define COMN_REQ_REGEX_SLICE UINT16_C(0x0800)
#define COMN_REQ_REGEX_SLICE_SHIFT 11
#define COMN_REQ_REGEX_SLICE_REQUIRED (1 << COMN_REQ_REGEX_SLICE_SHIFT)
#define COMN_REQ_XLAT_SLICE UINT16_C(0x0400)
#define COMN_REQ_XLAT_SLICE_SHIFT 10
#define COMN_REQ_XLAT_SLICE_REQUIRED (1 << COMN_REQ_XLAT_SLICE_SHIFT)
#define COMN_REQ_CPR_SLICE UINT16_C(0x0200)
#define COMN_REQ_CPR_SLICE_SHIFT 9
#define COMN_REQ_CPR_SLICE_REQUIRED (1 << COMN_REQ_CPR_SLICE_SHIFT)
#define COMN_REQ_BULK_SLICE UINT16_C(0x0100)
#define COMN_REQ_BULK_SLICE_SHIFT 8
#define COMN_REQ_BULK_SLICE_REQUIRED (1 << COMN_REQ_BULK_SLICE_SHIFT)
#define COMN_REQ_STORAGE_SLICE UINT16_C(0x0080)
#define COMN_REQ_STORAGE_SLICE_SHIFT 7
#define COMN_REQ_STORAGE_SLICE_REQUIRED (1 << COMN_REQ_STORAGE_SLICE_SHIFT)
#define COMN_REQ_RND_SLICE UINT16_C(0x0040)
#define COMN_REQ_RND_SLICE_SHIFT 6
#define COMN_REQ_RND_SLICE_REQUIRED (1 << COMN_REQ_RND_SLICE_SHIFT)
#define COMN_REQ_PKE1_SLICE UINT16_C(0x0020)
#define COMN_REQ_PKE1_SLICE_SHIFT 5
#define COMN_REQ_PKE1_SLICE_REQUIRED (1 << COMN_REQ_PKE1_SLICE_SHIFT)
#define COMN_REQ_PKE0_SLICE UINT16_C(0x0010)
#define COMN_REQ_PKE0_SLICE_SHIFT 4
#define COMN_REQ_PKE0_SLICE_REQUIRED (1 << COMN_REQ_PKE0_SLICE_SHIFT)
#define COMN_REQ_AUTH1_SLICE UINT16_C(0x0008)
#define COMN_REQ_AUTH1_SLICE_SHIFT 3
#define COMN_REQ_AUTH1_SLICE_REQUIRED (1 << COMN_REQ_AUTH1_SLICE_SHIFT)
#define COMN_REQ_AUTH0_SLICE UINT16_C(0x0004)
#define COMN_REQ_AUTH0_SLICE_SHIFT 2
#define COMN_REQ_AUTH0_SLICE_REQUIRED (1 << COMN_REQ_AUTH0_SLICE_SHIFT)
#define COMN_REQ_CIPHER1_SLICE UINT16_C(0x0002)
#define COMN_REQ_CIPHER1_SLICE_SHIFT 1
#define COMN_REQ_CIPHER1_SLICE_REQUIRED (1 << COMN_REQ_CIPHER1_SLICE_SHIFT)
#define COMN_REQ_CIPHER0_SLICE UINT16_C(0x0001)
#define COMN_REQ_CIPHER0_SLICE_SHIFT 0
#define COMN_REQ_CIPHER0_SLICE_REQUIRED (1 << COMN_REQ_CIPHER0_SLICE_SHIFT)
#define COMN_REQ_CY0_ONLY(shram) \
COMN_REQ_ORD_STRICT | \
COMN_REQ_PTR_TYPE_FLAT | \
(shram) | \
COMN_REQ_RND_SLICE_REQUIRED | \
COMN_REQ_PKE0_SLICE_REQUIRED | \
COMN_REQ_AUTH0_SLICE_REQUIRED | \
COMN_REQ_CIPHER0_SLICE_REQUIRED;
#define COMN_REQ_CY1_ONLY(shram) \
COMN_REQ_ORD_STRICT | \
COMN_REQ_PTR_TYPE_FLAT | \
(shram) | \
COMN_REQ_PKE1_SLICE_REQUIRED | \
COMN_REQ_AUTH1_SLICE_REQUIRED | \
COMN_REQ_CIPHER1_SLICE_REQUIRED;
#define COMN_RESP_CRYPTO_STATUS __BIT(7)
#define COMN_RESP_PKE_STATUS __BIT(6)
#define COMN_RESP_CMP_STATUS __BIT(5)
#define COMN_RESP_XLAT_STATUS __BIT(4)
#define COMN_RESP_PM_STATUS __BIT(3)
#define COMN_RESP_INIT_ADMIN_STATUS __BIT(2)
#define COMN_STATUS_FLAG_OK 0
#define COMN_STATUS_FLAG_ERROR 1
struct fw_la_ssl_tls_common {
uint8_t out_len; /* Number of bytes of key material to output. */
uint8_t label_len; /* Number of bytes of label for SSL and bytes
* for TLS key generation */
};
struct fw_la_mgf_common {
uint8_t hash_len;
/* Number of bytes of hash output by the QAT per iteration */
uint8_t seed_len;
/* Number of bytes of seed provided in src buffer for MGF1 */
};
struct fw_cipher_hdr {
uint8_t state_sz;
/* State size in quad words of the cipher algorithm used in this session.
* Set to zero if the algorithm doesnt provide any state */
uint8_t offset;
/* Quad word offset from the content descriptor parameters address i.e.
* (content_address + (cd_hdr_sz << 3)) to the parameters for the cipher
* processing */
uint8_t curr_id;
/* Initialised with the cipher slice type */
uint8_t next_id;
/* Set to the next slice to pass the ciphered data through.
* Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
* anymore slices after cipher */
uint16_t resrvd;
/* Reserved padding byte to bring the struct to the word boundary. MUST be
* set to 0 */
uint8_t state_padding_sz;
/* State padding size in quad words. Set to 0 if no padding is required. */
uint8_t key_sz;
/* Key size in quad words of the cipher algorithm used in this session */
};
struct fw_auth_hdr {
uint8_t hash_flags;
/* General flags defining the processing to perform. 0 is normal processing
* and 1 means there is a nested hash processing loop to go through */
uint8_t offset;
/* Quad word offset from the content descriptor parameters address to the
* parameters for the auth processing */
uint8_t curr_id;
/* Initialised with the auth slice type */
uint8_t next_id;
/* Set to the next slice to pass data through.
* Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
* anymore slices after auth */
union {
uint8_t inner_prefix_sz;
/* Size in bytes of the inner prefix data */
uint8_t aad_sz;
/* Size in bytes of padded AAD data to prefix to the packet for CCM
* or GCM processing */
} u;
uint8_t outer_prefix_sz;
/* Size in bytes of outer prefix data */
uint8_t final_sz;
/* Size in bytes of digest to be returned to the client if requested */
uint8_t inner_res_sz;
/* Size in bytes of the digest from the inner hash algorithm */
uint8_t resrvd;
/* This field is unused, assumed value is zero. */
uint8_t inner_state1_sz;
/* Size in bytes of inner hash state1 data. Must be a qword multiple */
uint8_t inner_state2_off;
/* Quad word offset from the content descriptor parameters pointer to the
* inner state2 value */
uint8_t inner_state2_sz;
/* Size in bytes of inner hash state2 data. Must be a qword multiple */
uint8_t outer_config_off;
/* Quad word offset from the content descriptor parameters pointer to the
* outer configuration information */
uint8_t outer_state1_sz;
/* Size in bytes of the outer state1 value */
uint8_t outer_res_sz;
/* Size in bytes of digest from the outer auth algorithm */
uint8_t outer_prefix_off;
/* Quad word offset from the start of the inner prefix data to the outer
* prefix information. Should equal the rounded inner prefix size, converted
* to qwords */
};
#define FW_AUTH_HDR_FLAG_DO_NESTED 1
#define FW_AUTH_HDR_FLAG_NO_NESTED 0
struct fw_la_comn_req {
union {
uint16_t la_flags;
/* Definition of the common LA processing flags used for the
* bulk processing */
union {
struct fw_la_ssl_tls_common ssl_tls_common;
/* For TLS or SSL Key Generation, this field is
* overloaded with ssl_tls common information */
struct fw_la_mgf_common mgf_common;
/* For MGF Key Generation, this field is overloaded with
mgf information */
} u;
} u;
union {
uint8_t resrvd;
/* If not useRd by a request this field must be set to 0 */
uint8_t tls_seed_len;
/* Byte Len of tls seed */
uint8_t req_params_blk_sz;
/* For bulk processing this field represents the request
* parameters block size */
uint8_t trng_cfg_sz;
/* This field is used for TRNG_ENABLE requests to indicate the
* size of the TRNG Slice configuration word. Size is in QW's */
} u1;
uint8_t la_cmd_id;
/* Definition of the LA command defined by this request */
};
#define LA_FLAGS_GCM_IV_LEN_FLAG __BIT(9)
#define LA_FLAGS_PROTO __BITS(8, 6)
#define LA_FLAGS_PROTO_SNOW_3G __SHIFTIN(4, LA_FLAGS_PROTO)
#define LA_FLAGS_PROTO_GCM __SHIFTIN(2, LA_FLAGS_PROTO)
#define LA_FLAGS_PROTO_CCM __SHIFTIN(1, LA_FLAGS_PROTO)
#define LA_FLAGS_PROTO_NO __SHIFTIN(0, LA_FLAGS_PROTO)
#define LA_FLAGS_DIGEST_IN_BUFFER __BIT(5)
#define LA_FLAGS_CMP_AUTH_RES __BIT(4)
#define LA_FLAGS_RET_AUTH_RES __BIT(3)
#define LA_FLAGS_UPDATE_STATE __BIT(2)
#define LA_FLAGS_PARTIAL __BITS(1, 0)
struct fw_la_bulk_req {
struct fw_comn_req_hdr comn_hdr;
/* Common request header */
uint32_t flow_id;
/* Field used by Firmware to limit the number of stateful requests
* for a session being processed at a given point of time */
struct fw_la_comn_req comn_la_req;
/* Common LA request parameters */
struct fw_comn_req_mid comn_mid;
/* Common request middle section */
uint64_t req_params_addr;
/* Memory address of the request parameters */
union fw_comn_req_ftr comn_ftr;
/* Common request footer */
};
struct fw_la_resp {
struct fw_comn_resp_hdr comn_resp;
uint8_t resrvd[64 - sizeof(struct fw_comn_resp_hdr)];
/* FW_RESP_DEFAULT_SZ_HW15 */
};
struct fw_la_cipher_req_params {
uint8_t resrvd;
/* Reserved field and assumed set to 0 */
uint8_t cipher_state_sz;
/* Number of quad words of state data for the cipher algorithm */
uint8_t curr_id;
/* Initialised with the cipher slice type */
uint8_t next_id;
/* Set to the next slice to pass the ciphered data through.
* Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
* anymore slices after cipher */
uint16_t resrvd1;
/* Reserved field, should be set to zero*/
uint8_t resrvd2;
/* Reserved field, should be set to zero*/
uint8_t next_offset;
/* Offset in bytes to the next request parameter block */
uint32_t cipher_off;
/* Byte offset from the start of packet to the cipher data region */
uint32_t cipher_len;
/* Byte length of the cipher data region */
uint64_t state_address;
/* Flat buffer address in memory of the cipher state information. Unused
* if the state size is 0 */
};
struct fw_la_auth_req_params {
uint8_t auth_res_sz;
/* Size in quad words of digest information to validate */
uint8_t hash_state_sz;
/* Number of quad words of inner and outer hash prefix data to process */
uint8_t curr_id;
/* Initialised with the auth slice type */
uint8_t next_id;
/* Set to the next slice to pass the auth data through.
* Set to ICP_QAT_FW_SLICE_NULL for in-place auth-only requests
* Set to ICP_QAT_FW_SLICE_DRAM_WR for all other request types
* if the data is not to go through anymore slices after auth */
union {
uint16_t resrvd;
/* Reserved field should be set to zero for bulk services */
uint16_t tls_secret_len;
/* Length of Secret information for TLS. */
} u;
uint8_t resrvd;
/* Reserved field, should be set to zero*/
uint8_t next_offset;
/* offset in bytes to the next request parameter block */
uint32_t auth_off;
/* Byte offset from the start of packet to the auth data region */
uint32_t auth_len;
/* Byte length of the auth data region */
union {
uint64_t prefix_addr;
/* Address of the prefix information */
uint64_t aad_addr;
/* Address of the AAD info in DRAM. Used for the CCM and GCM
* protocols */
} u1;
uint64_t auth_res_address;
/* Address of the auth result information to validate or the location to
* writeback the digest information to */
};
#endif

105
sys/dev/qat/qat_hw15var.h Normal file
View File

@ -0,0 +1,105 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_hw15var.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2007-2013 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* $FreeBSD$ */
#ifndef _DEV_PCI_QAT_HW15VAR_H_
#define _DEV_PCI_QAT_HW15VAR_H_
CTASSERT(HASH_CONTENT_DESC_SIZE >=
sizeof(struct fw_auth_hdr) + MAX_HASH_SETUP_BLK_SZ);
CTASSERT(CIPHER_CONTENT_DESC_SIZE >=
sizeof(struct fw_cipher_hdr) + MAX_CIPHER_SETUP_BLK_SZ);
CTASSERT(CONTENT_DESC_MAX_SIZE >=
roundup(HASH_CONTENT_DESC_SIZE + CIPHER_CONTENT_DESC_SIZE,
QAT_OPTIMAL_ALIGN));
CTASSERT(QAT_SYM_REQ_PARAMS_SIZE_PADDED >=
roundup(sizeof(struct fw_la_cipher_req_params) +
sizeof(struct fw_la_auth_req_params), QAT_OPTIMAL_ALIGN));
/* length of the 5 long words of the request that are stored in the session
* This is rounded up to 32 in order to use the fast memcopy function */
#define QAT_HW15_SESSION_REQ_CACHE_SIZE (32)
void qat_msg_req_type_populate(struct arch_if_req_hdr *,
enum arch_if_req, uint32_t);
void qat_msg_cmn_hdr_populate(struct fw_la_bulk_req *, bus_addr_t,
uint8_t, uint8_t, uint16_t, uint32_t);
void qat_msg_service_cmd_populate(struct fw_la_bulk_req *,
enum fw_la_cmd_id, uint16_t);
void qat_msg_cmn_mid_populate(struct fw_comn_req_mid *, void *,
uint64_t , uint64_t);
void qat_msg_req_params_populate(struct fw_la_bulk_req *, bus_addr_t,
uint8_t);
void qat_msg_cmn_footer_populate(union fw_comn_req_ftr *, uint64_t);
void qat_msg_params_populate(struct fw_la_bulk_req *,
struct qat_crypto_desc *, uint8_t, uint16_t,
uint16_t);
int qat_adm_ring_init(struct qat_softc *);
int qat_adm_ring_send_init(struct qat_softc *);
void qat_hw15_crypto_setup_desc(struct qat_crypto *,
struct qat_session *, struct qat_crypto_desc *);
void qat_hw15_crypto_setup_req_params(struct qat_crypto_bank *,
struct qat_session *, struct qat_crypto_desc const *,
struct qat_sym_cookie *, struct cryptop *);
#endif

662
sys/dev/qat/qat_hw17.c Normal file
View File

@ -0,0 +1,662 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_hw17.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2014 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#if 0
__KERNEL_RCSID(0, "$NetBSD: qat_hw17.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/proc.h>
#include <machine/bus.h>
#include <opencrypto/xform.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include "qatreg.h"
#include "qat_hw17reg.h"
#include "qatvar.h"
#include "qat_hw17var.h"
int qat_adm_mailbox_put_msg_sync(struct qat_softc *, uint32_t,
void *, void *);
int qat_adm_mailbox_send(struct qat_softc *,
struct fw_init_admin_req *, struct fw_init_admin_resp *);
int qat_adm_mailbox_send_init_me(struct qat_softc *);
int qat_adm_mailbox_send_hb_timer(struct qat_softc *);
int qat_adm_mailbox_send_fw_status(struct qat_softc *);
int qat_adm_mailbox_send_constants(struct qat_softc *);
int
qat_adm_mailbox_init(struct qat_softc *sc)
{
uint64_t addr;
int error;
struct qat_dmamem *qdm;
error = qat_alloc_dmamem(sc, &sc->sc_admin_comms.qadc_dma, 1,
PAGE_SIZE, PAGE_SIZE);
if (error)
return error;
qdm = &sc->sc_admin_comms.qadc_const_tbl_dma;
error = qat_alloc_dmamem(sc, qdm, 1, PAGE_SIZE, PAGE_SIZE);
if (error)
return error;
memcpy(qdm->qdm_dma_vaddr,
mailbox_const_tab, sizeof(mailbox_const_tab));
bus_dmamap_sync(qdm->qdm_dma_tag, qdm->qdm_dma_map,
BUS_DMASYNC_PREWRITE);
error = qat_alloc_dmamem(sc, &sc->sc_admin_comms.qadc_hb_dma, 1,
PAGE_SIZE, PAGE_SIZE);
if (error)
return error;
addr = (uint64_t)sc->sc_admin_comms.qadc_dma.qdm_dma_seg.ds_addr;
qat_misc_write_4(sc, ADMINMSGUR, addr >> 32);
qat_misc_write_4(sc, ADMINMSGLR, addr);
return 0;
}
int
qat_adm_mailbox_put_msg_sync(struct qat_softc *sc, uint32_t ae,
void *in, void *out)
{
struct qat_dmamem *qdm;
uint32_t mailbox;
bus_size_t mb_offset = MAILBOX_BASE + (ae * MAILBOX_STRIDE);
int offset = ae * ADMINMSG_LEN * 2;
int times, received;
uint8_t *buf = (uint8_t *)sc->sc_admin_comms.qadc_dma.qdm_dma_vaddr + offset;
mailbox = qat_misc_read_4(sc, mb_offset);
if (mailbox == 1)
return EAGAIN;
qdm = &sc->sc_admin_comms.qadc_dma;
memcpy(buf, in, ADMINMSG_LEN);
bus_dmamap_sync(qdm->qdm_dma_tag, qdm->qdm_dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
qat_misc_write_4(sc, mb_offset, 1);
received = 0;
for (times = 0; times < 50; times++) {
DELAY(20000);
if (qat_misc_read_4(sc, mb_offset) == 0) {
received = 1;
break;
}
}
if (received) {
bus_dmamap_sync(qdm->qdm_dma_tag, qdm->qdm_dma_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
memcpy(out, buf + ADMINMSG_LEN, ADMINMSG_LEN);
} else {
device_printf(sc->sc_dev,
"Failed to send admin msg to accelerator\n");
}
return received ? 0 : EFAULT;
}
int
qat_adm_mailbox_send(struct qat_softc *sc,
struct fw_init_admin_req *req, struct fw_init_admin_resp *resp)
{
int error;
uint32_t mask;
uint8_t ae;
for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
if (!(mask & 1))
continue;
error = qat_adm_mailbox_put_msg_sync(sc, ae, req, resp);
if (error)
return error;
if (resp->init_resp_hdr.status) {
device_printf(sc->sc_dev,
"Failed to send admin msg: cmd %d\n",
req->init_admin_cmd_id);
return EFAULT;
}
}
return 0;
}
int
qat_adm_mailbox_send_init_me(struct qat_softc *sc)
{
struct fw_init_admin_req req;
struct fw_init_admin_resp resp;
memset(&req, 0, sizeof(req));
req.init_admin_cmd_id = FW_INIT_ME;
return qat_adm_mailbox_send(sc, &req, &resp);
}
int
qat_adm_mailbox_send_hb_timer(struct qat_softc *sc)
{
struct fw_init_admin_req req;
struct fw_init_admin_resp resp;
memset(&req, 0, sizeof(req));
req.init_admin_cmd_id = FW_HEARTBEAT_TIMER_SET;
req.init_cfg_ptr = sc->sc_admin_comms.qadc_hb_dma.qdm_dma_seg.ds_addr;
req.heartbeat_ticks =
sc->sc_hw.qhw_clock_per_sec / 1000 * QAT_HB_INTERVAL;
return qat_adm_mailbox_send(sc, &req, &resp);
}
int
qat_adm_mailbox_send_fw_status(struct qat_softc *sc)
{
int error;
struct fw_init_admin_req req;
struct fw_init_admin_resp resp;
memset(&req, 0, sizeof(req));
req.init_admin_cmd_id = FW_STATUS_GET;
error = qat_adm_mailbox_send(sc, &req, &resp);
if (error)
return error;
return 0;
}
int
qat_adm_mailbox_send_constants(struct qat_softc *sc)
{
struct fw_init_admin_req req;
struct fw_init_admin_resp resp;
memset(&req, 0, sizeof(req));
req.init_admin_cmd_id = FW_CONSTANTS_CFG;
req.init_cfg_sz = 1024;
req.init_cfg_ptr =
sc->sc_admin_comms.qadc_const_tbl_dma.qdm_dma_seg.ds_addr;
return qat_adm_mailbox_send(sc, &req, &resp);
}
int
qat_adm_mailbox_send_init(struct qat_softc *sc)
{
int error;
error = qat_adm_mailbox_send_init_me(sc);
if (error)
return error;
error = qat_adm_mailbox_send_hb_timer(sc);
if (error)
return error;
error = qat_adm_mailbox_send_fw_status(sc);
if (error)
return error;
return qat_adm_mailbox_send_constants(sc);
}
int
qat_arb_init(struct qat_softc *sc)
{
uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
uint32_t arb, i;
const uint32_t *thd_2_arb_cfg;
/* Service arb configured for 32 bytes responses and
* ring flow control check enabled. */
for (arb = 0; arb < MAX_ARB; arb++)
qat_arb_sarconfig_write_4(sc, arb, arb_cfg);
/* Map worker threads to service arbiters */
sc->sc_hw.qhw_get_arb_mapping(sc, &thd_2_arb_cfg);
if (!thd_2_arb_cfg)
return EINVAL;
for (i = 0; i < sc->sc_hw.qhw_num_engines; i++)
qat_arb_wrk_2_ser_map_write_4(sc, i, *(thd_2_arb_cfg + i));
return 0;
}
int
qat_set_ssm_wdtimer(struct qat_softc *sc)
{
uint32_t timer;
u_int mask;
int i;
timer = sc->sc_hw.qhw_clock_per_sec / 1000 * QAT_SSM_WDT;
for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
if (!(mask & 1))
continue;
qat_misc_write_4(sc, SSMWDT(i), timer);
qat_misc_write_4(sc, SSMWDTPKE(i), timer);
}
return 0;
}
int
qat_check_slice_hang(struct qat_softc *sc)
{
int handled = 0;
return handled;
}
static uint32_t
qat_hw17_crypto_setup_cipher_ctrl(struct qat_crypto_desc *desc,
struct qat_session *qs, uint32_t cd_blk_offset,
struct fw_la_bulk_req *req_tmpl, enum fw_slice next_slice)
{
struct fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
(struct fw_cipher_cd_ctrl_hdr *)&req_tmpl->cd_ctrl;
desc->qcd_cipher_blk_sz = HW_AES_BLK_SZ;
desc->qcd_cipher_offset = cd_blk_offset;
cipher_cd_ctrl->cipher_state_sz = desc->qcd_cipher_blk_sz >> 3;
cipher_cd_ctrl->cipher_key_sz = qs->qs_cipher_klen >> 3;
cipher_cd_ctrl->cipher_cfg_offset = cd_blk_offset >> 3;
FW_COMN_CURR_ID_SET(cipher_cd_ctrl, FW_SLICE_CIPHER);
FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, next_slice);
return roundup(sizeof(struct hw_cipher_config) + qs->qs_cipher_klen, 8);
}
static void
qat_hw17_crypto_setup_cipher_cdesc(const struct qat_crypto_desc *desc,
const struct qat_session *qs, const struct cryptop *crp,
union hw_cipher_algo_blk *cipher)
{
const uint8_t *key;
cipher->max.cipher_config.val =
qat_crypto_load_cipher_session(desc, qs);
if (crp != NULL && crp->crp_cipher_key != NULL)
key = crp->crp_cipher_key;
else
key = qs->qs_cipher_key;
memcpy(cipher->max.key, key, qs->qs_cipher_klen);
}
static uint32_t
qat_hw17_crypto_setup_auth_ctrl(struct qat_crypto_desc *desc,
struct qat_session *qs, uint32_t cd_blk_offset,
struct fw_la_bulk_req *req_tmpl, enum fw_slice next_slice)
{
struct fw_auth_cd_ctrl_hdr *auth_cd_ctrl =
(struct fw_auth_cd_ctrl_hdr *)&req_tmpl->cd_ctrl;
struct qat_sym_hash_def const *hash_def;
(void)qat_crypto_load_auth_session(desc, qs, &hash_def);
auth_cd_ctrl->hash_cfg_offset = cd_blk_offset >> 3;
auth_cd_ctrl->hash_flags = FW_AUTH_HDR_FLAG_NO_NESTED;
auth_cd_ctrl->inner_res_sz = hash_def->qshd_alg->qshai_digest_len;
auth_cd_ctrl->final_sz = hash_def->qshd_alg->qshai_sah->hashsize;
auth_cd_ctrl->inner_state1_sz =
roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
auth_cd_ctrl->inner_state2_sz =
roundup(hash_def->qshd_qat->qshqi_state2_len, 8);
auth_cd_ctrl->inner_state2_offset =
auth_cd_ctrl->hash_cfg_offset +
((sizeof(struct hw_auth_setup) +
auth_cd_ctrl->inner_state1_sz) >> 3);
FW_COMN_CURR_ID_SET(auth_cd_ctrl, FW_SLICE_AUTH);
FW_COMN_NEXT_ID_SET(auth_cd_ctrl, next_slice);
desc->qcd_auth_sz = auth_cd_ctrl->final_sz;
desc->qcd_auth_offset = cd_blk_offset;
desc->qcd_gcm_aad_sz_offset1 =
cd_blk_offset + offsetof(union hw_auth_algo_blk, max.state1) +
auth_cd_ctrl->inner_state1_sz + AES_BLOCK_LEN;
return roundup(auth_cd_ctrl->inner_state1_sz +
auth_cd_ctrl->inner_state2_sz +
sizeof(struct hw_auth_setup), 8);
}
static void
qat_hw17_crypto_setup_auth_cdesc(const struct qat_crypto_desc *desc,
const struct qat_session *qs, const struct cryptop *crp,
union hw_auth_algo_blk *auth)
{
struct qat_sym_hash_def const *hash_def;
uint8_t inner_state1_sz, *state1, *state2;
const uint8_t *key;
auth->max.inner_setup.auth_config.config =
qat_crypto_load_auth_session(desc, qs, &hash_def);
auth->max.inner_setup.auth_counter.counter =
htobe32(hash_def->qshd_qat->qshqi_auth_counter);
inner_state1_sz = roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
state1 = auth->max.state1;
state2 = auth->max.state1 + inner_state1_sz;
switch (qs->qs_auth_algo) {
case HW_AUTH_ALGO_GALOIS_128:
key = NULL;
if (crp != NULL && crp->crp_cipher_key != NULL)
key = crp->crp_cipher_key;
else if (qs->qs_cipher_key != NULL)
key = qs->qs_cipher_key;
if (key != NULL) {
qat_crypto_gmac_precompute(desc, key,
qs->qs_cipher_klen, hash_def, state2);
}
break;
case HW_AUTH_ALGO_SHA1:
case HW_AUTH_ALGO_SHA256:
case HW_AUTH_ALGO_SHA384:
case HW_AUTH_ALGO_SHA512:
switch (qs->qs_auth_mode) {
case HW_AUTH_MODE0:
memcpy(state1, hash_def->qshd_alg->qshai_init_state,
inner_state1_sz);
/* Override for mode 0 hashes. */
auth->max.inner_setup.auth_counter.counter = 0;
break;
case HW_AUTH_MODE1:
if (crp != NULL && crp->crp_auth_key != NULL)
key = crp->crp_auth_key;
else
key = qs->qs_auth_key;
if (key != NULL) {
qat_crypto_hmac_precompute(desc, key,
qs->qs_auth_klen, hash_def, state1, state2);
}
break;
default:
panic("%s: unhandled auth mode %d", __func__,
qs->qs_auth_mode);
}
break;
default:
panic("%s: unhandled auth algorithm %d", __func__,
qs->qs_auth_algo);
}
}
static void
qat_hw17_init_comn_req_hdr(struct qat_crypto_desc *desc,
struct fw_la_bulk_req *req)
{
union fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
struct fw_comn_req_hdr *req_hdr = &req->comn_hdr;
req_hdr->service_cmd_id = desc->qcd_cmd_id;
req_hdr->hdr_flags = FW_COMN_VALID;
req_hdr->service_type = FW_COMN_REQ_CPM_FW_LA;
req_hdr->comn_req_flags = FW_COMN_FLAGS_BUILD(
COMN_CD_FLD_TYPE_64BIT_ADR, COMN_PTR_TYPE_SGL);
req_hdr->serv_specif_flags = 0;
cd_pars->s.content_desc_addr = desc->qcd_desc_paddr;
}
void
qat_hw17_crypto_setup_desc(struct qat_crypto *qcy, struct qat_session *qs,
struct qat_crypto_desc *desc)
{
union hw_cipher_algo_blk *cipher;
union hw_auth_algo_blk *auth;
struct fw_la_bulk_req *req_tmpl;
struct fw_comn_req_hdr *req_hdr;
uint32_t cd_blk_offset = 0;
int i;
uint8_t *cd_blk_ptr;
req_tmpl = (struct fw_la_bulk_req *)desc->qcd_req_cache;
req_hdr = &req_tmpl->comn_hdr;
cd_blk_ptr = desc->qcd_content_desc;
memset(req_tmpl, 0, sizeof(struct fw_la_bulk_req));
qat_hw17_init_comn_req_hdr(desc, req_tmpl);
for (i = 0; i < MAX_FW_SLICE; i++) {
switch (desc->qcd_slices[i]) {
case FW_SLICE_CIPHER:
cipher = (union hw_cipher_algo_blk *)(cd_blk_ptr +
cd_blk_offset);
cd_blk_offset += qat_hw17_crypto_setup_cipher_ctrl(desc,
qs, cd_blk_offset, req_tmpl,
desc->qcd_slices[i + 1]);
qat_hw17_crypto_setup_cipher_cdesc(desc, qs, NULL,
cipher);
break;
case FW_SLICE_AUTH:
auth = (union hw_auth_algo_blk *)(cd_blk_ptr +
cd_blk_offset);
cd_blk_offset += qat_hw17_crypto_setup_auth_ctrl(desc,
qs, cd_blk_offset, req_tmpl,
desc->qcd_slices[i + 1]);
qat_hw17_crypto_setup_auth_cdesc(desc, qs, NULL, auth);
req_hdr->serv_specif_flags |= FW_LA_RET_AUTH_RES;
break;
case FW_SLICE_DRAM_WR:
i = MAX_FW_SLICE; /* end of chain */
break;
default:
MPASS(0);
break;
}
}
req_tmpl->cd_pars.s.content_desc_params_sz =
roundup(cd_blk_offset, QAT_OPTIMAL_ALIGN) >> 3;
if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128)
req_hdr->serv_specif_flags |=
FW_LA_PROTO_GCM | FW_LA_GCM_IV_LEN_12_OCTETS;
bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag,
qs->qs_desc_mem.qdm_dma_map, BUS_DMASYNC_PREWRITE);
}
static void
qat_hw17_crypto_req_setkey(const struct qat_crypto_desc *desc,
const struct qat_session *qs, struct qat_sym_cookie *qsc,
struct fw_la_bulk_req *bulk_req, const struct cryptop *crp)
{
union hw_auth_algo_blk *auth;
union hw_cipher_algo_blk *cipher;
uint8_t *cdesc;
int i;
cdesc = qsc->qsc_content_desc;
memcpy(cdesc, desc->qcd_content_desc, CONTENT_DESC_MAX_SIZE);
for (i = 0; i < MAX_FW_SLICE; i++) {
switch (desc->qcd_slices[i]) {
case FW_SLICE_CIPHER:
cipher = (union hw_cipher_algo_blk *)
(cdesc + desc->qcd_cipher_offset);
qat_hw17_crypto_setup_cipher_cdesc(desc, qs, crp,
cipher);
break;
case FW_SLICE_AUTH:
auth = (union hw_auth_algo_blk *)
(cdesc + desc->qcd_auth_offset);
qat_hw17_crypto_setup_auth_cdesc(desc, qs, crp, auth);
break;
case FW_SLICE_DRAM_WR:
i = MAX_FW_SLICE; /* end of chain */
break;
default:
MPASS(0);
}
}
bulk_req->cd_pars.s.content_desc_addr = qsc->qsc_content_desc_paddr;
}
void
qat_hw17_crypto_setup_req_params(struct qat_crypto_bank *qcb __unused,
struct qat_session *qs, const struct qat_crypto_desc *desc,
struct qat_sym_cookie *qsc, struct cryptop *crp)
{
struct qat_sym_bulk_cookie *qsbc;
struct fw_la_bulk_req *bulk_req;
struct fw_la_cipher_req_params *cipher_param;
struct fw_la_auth_req_params *auth_param;
bus_addr_t digest_paddr;
uint32_t aad_sz, *aad_szp;
uint8_t *req_params_ptr;
enum fw_la_cmd_id cmd_id = desc->qcd_cmd_id;
qsbc = &qsc->u.qsc_bulk_cookie;
bulk_req = (struct fw_la_bulk_req *)qsbc->qsbc_msg;
memcpy(bulk_req, desc->qcd_req_cache, sizeof(struct fw_la_bulk_req));
bulk_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)qsc;
bulk_req->comn_mid.src_data_addr = qsc->qsc_buffer_list_desc_paddr;
bulk_req->comn_mid.dest_data_addr = qsc->qsc_buffer_list_desc_paddr;
if (__predict_false(crp->crp_cipher_key != NULL ||
crp->crp_auth_key != NULL))
qat_hw17_crypto_req_setkey(desc, qs, qsc, bulk_req, crp);
digest_paddr = 0;
if (desc->qcd_auth_sz != 0)
digest_paddr = qsc->qsc_auth_res_paddr;
req_params_ptr = (uint8_t *)&bulk_req->serv_specif_rqpars;
cipher_param = (struct fw_la_cipher_req_params *)req_params_ptr;
auth_param = (struct fw_la_auth_req_params *)
(req_params_ptr + sizeof(struct fw_la_cipher_req_params));
cipher_param->u.s.cipher_IV_ptr = qsc->qsc_iv_buf_paddr;
/*
* The SG list layout is a bit different for GCM and GMAC, it's simpler
* to handle those cases separately.
*/
if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
if (cmd_id != FW_LA_CMD_AUTH) {
/*
* Don't fill out the cipher block if we're doing GMAC
* only.
*/
cipher_param->cipher_offset = 0;
cipher_param->cipher_length = crp->crp_payload_length;
}
auth_param->auth_off = 0;
auth_param->auth_len = crp->crp_payload_length;
auth_param->auth_res_addr = digest_paddr;
auth_param->auth_res_sz = desc->qcd_auth_sz;
auth_param->u1.aad_adr =
crp->crp_aad_length > 0 ? qsc->qsc_gcm_aad_paddr : 0;
auth_param->u2.aad_sz =
roundup2(crp->crp_aad_length, QAT_AES_GCM_AAD_ALIGN);
auth_param->hash_state_sz = auth_param->u2.aad_sz >> 3;
/*
* Update the hash state block if necessary. This only occurs
* when the AAD length changes between requests in a session and
* is synchronized by qat_process().
*/
aad_sz = htobe32(crp->crp_aad_length);
aad_szp = (uint32_t *)(
__DECONST(uint8_t *, desc->qcd_content_desc) +
desc->qcd_gcm_aad_sz_offset1);
if (__predict_false(*aad_szp != aad_sz)) {
*aad_szp = aad_sz;
bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag,
qs->qs_desc_mem.qdm_dma_map,
BUS_DMASYNC_PREWRITE);
}
} else {
if (cmd_id != FW_LA_CMD_AUTH) {
cipher_param->cipher_offset =
crp->crp_aad_length == 0 ? 0 :
crp->crp_payload_start - crp->crp_aad_start;
cipher_param->cipher_length = crp->crp_payload_length;
}
if (cmd_id != FW_LA_CMD_CIPHER) {
auth_param->auth_off = 0;
auth_param->auth_len =
crp->crp_payload_length + crp->crp_aad_length;
auth_param->auth_res_addr = digest_paddr;
auth_param->auth_res_sz = desc->qcd_auth_sz;
auth_param->u1.aad_adr = 0;
auth_param->u2.aad_sz = 0;
auth_param->hash_state_sz = 0;
}
}
}

2460
sys/dev/qat/qat_hw17reg.h Normal file

File diff suppressed because it is too large Load Diff

80
sys/dev/qat/qat_hw17var.h Normal file
View File

@ -0,0 +1,80 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_hw17var.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2014 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* $FreeBSD$ */
#ifndef _DEV_PCI_QAT_HW17VAR_H_
#define _DEV_PCI_QAT_HW17VAR_H_
CTASSERT(CONTENT_DESC_MAX_SIZE >=
roundup(sizeof(union hw_cipher_algo_blk), 8) +
roundup(sizeof(union hw_auth_algo_blk), 8));
int qat_adm_mailbox_init(struct qat_softc *);
int qat_adm_mailbox_send_init(struct qat_softc *);
int qat_arb_init(struct qat_softc *);
int qat_set_ssm_wdtimer(struct qat_softc *);
int qat_check_slice_hang(struct qat_softc *);
void qat_hw17_crypto_setup_desc(struct qat_crypto *,
struct qat_session *, struct qat_crypto_desc *);
void qat_hw17_crypto_setup_req_params(struct qat_crypto_bank *,
struct qat_session *, struct qat_crypto_desc const *,
struct qat_sym_cookie *, struct cryptop *);
#endif

1582
sys/dev/qat/qatreg.h Normal file

File diff suppressed because it is too large Load Diff

1073
sys/dev/qat/qatvar.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -300,6 +300,7 @@ SUBDIR= \
pty \
puc \
pwm \
${_qat} \
${_qlxge} \
${_qlxgb} \
${_qlxgbe} \
@ -630,6 +631,7 @@ _ntb= ntb
_ocs_fc= ocs_fc
_ossl= ossl
_pccard= pccard
_qat= qat
.if ${MK_OFED} != "no" || defined(ALL_MODULES)
_rdma= rdma
.endif

19
sys/modules/qat/Makefile Normal file
View File

@ -0,0 +1,19 @@
# $FreeBSD$
.PATH: ${SRCTOP}/sys/dev/qat
KMOD= qat
SRCS= qat.c \
qat_ae.c \
qat_c2xxx.c \
qat_c3xxx.c \
qat_c62x.c \
qat_d15xx.c \
qat_dh895xcc.c \
qat_hw15.c \
qat_hw17.c
SRCS+= bus_if.h cryptodev_if.h device_if.h pci_if.h
.include <bsd.kmod.mk>