Merge ^/head r305431 through r305622.

This commit is contained in:
dim 2016-09-08 18:15:36 +00:00
commit d96969942d
272 changed files with 69670 additions and 6035 deletions

View File

@ -130,6 +130,9 @@ OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_
OLD_DIRS+=usr/lib/clang/3.8.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.8.0/lib
OLD_DIRS+=usr/lib/clang/3.8.0
# 20160906: libkqueue tests moved to /usr/tests/sys/kqueue/libkqueue
OLD_FILES+=usr/tests/sys/kqueue/kqtest
OLD_FILES+=usr/tests/sys/kqueue/kqueue_test
# 20160901: Remove digi(4)
OLD_FILES+=usr/share/man/man4/digi.4.gz
# 20160819: Remove ie(4)

View File

@ -78,7 +78,11 @@ ATF_TC_BODY(fifo, tc)
RL(n = kevent(kq, NULL, 0, event, 1, NULL));
(void)printf("kevent num %d filt %d flags: %#x, fflags: %#x, "
#ifdef __FreeBSD__
"data: %" PRIdPTR "\n", n, event[0].filter, event[0].flags,
#else
"data: %" PRId64 "\n", n, event[0].filter, event[0].flags,
#endif
event[0].fflags, event[0].data);
ATF_REQUIRE_EQ(event[0].filter, EVFILT_READ);

View File

@ -111,7 +111,11 @@ ATF_TC_BODY(file, tc)
num += n;
(void)printf("kevent num %d flags: %#x, fflags: %#x, data: "
#ifdef __FreeBSD__
"%" PRIdPTR "\n", n, event[0].flags, event[0].fflags,
#else
"%" PRId64 "\n", n, event[0].flags, event[0].fflags,
#endif
event[0].data);
if (event[0].data < 0)

View File

@ -67,7 +67,11 @@ ATF_TC_BODY(pipe, tc)
RL(n = kevent(kq, NULL, 0, event, 1, NULL));
(void)printf("kevent num %d flags: %#x, fflags: %#x, data: "
#ifdef __FreeBSD__
"%" PRIdPTR "\n", n, event[0].flags, event[0].fflags, event[0].data);
#else
"%" PRId64 "\n", n, event[0].flags, event[0].fflags, event[0].data);
#endif
RL(n = read(fds[0], buffer, event[0].data));
buffer[n] = '\0';

View File

@ -103,7 +103,11 @@ h_check(bool check_master)
RL(n = kevent(kq, NULL, 0, event, 1, NULL));
(void)printf("kevent num %d filt %d flags: %#x, fflags: %#x, "
#ifdef __FreeBSD__
"data: %" PRIdPTR "\n", n, event[0].filter, event[0].flags,
#else
"data: %" PRId64 "\n", n, event[0].filter, event[0].flags,
#endif
event[0].fflags, event[0].data);
ATF_REQUIRE_EQ(event[0].filter, EVFILT_READ);

View File

@ -139,7 +139,11 @@ ATF_TC_BODY(proc1, tc)
printf(" NOTE_FORK");
}
if (event[0].fflags & NOTE_CHILD)
#ifdef __FreeBSD__
printf(" NOTE_CHILD, parent = %" PRIdPTR, event[0].data);
#else
printf(" NOTE_CHILD, parent = %" PRId64, event[0].data);
#endif
printf("\n");
}

View File

@ -34,6 +34,9 @@ __COPYRIGHT("@(#) Copyright (c) 2008\
The NetBSD Foundation, inc. All rights reserved.");
__RCSID("$NetBSD: t_proc2.c,v 1.2 2015/01/14 22:22:32 christos Exp $");
#ifdef __FreeBSD__
#include <sys/types.h>
#endif
#include <sys/event.h>
#include <sys/time.h>
#include <sys/types.h>

View File

@ -32,6 +32,9 @@
#include <sys/cdefs.h>
__RCSID("$NetBSD: t_proc3.c,v 1.2 2015/01/14 22:22:32 christos Exp $");
#ifdef __FreeBSD__
#include <sys/types.h>
#endif
#include <sys/event.h>
#include <sys/time.h>
#include <sys/types.h>

View File

@ -34,6 +34,9 @@ __COPYRIGHT("@(#) Copyright (c) 2008\
The NetBSD Foundation, inc. All rights reserved.");
__RCSID("$NetBSD: t_sig.c,v 1.2 2010/11/03 16:10:20 christos Exp $");
#ifdef __FreeBSD__
#include <sys/types.h>
#endif
#include <sys/event.h>
#include <sys/ioctl.h>
#include <sys/param.h>
@ -60,9 +63,13 @@ ATF_TC_HEAD(sig, tc)
ATF_TC_BODY(sig, tc)
{
struct timespec timeout;
#ifdef __NetBSD__
struct kfilter_mapping km;
#endif
struct kevent event[1];
#ifdef __NetBSD__
char namebuf[32];
#endif
pid_t pid, child;
int kq, n, num, status;
@ -84,16 +91,22 @@ ATF_TC_BODY(sig, tc)
RL(kq = kqueue());
#ifdef __NetBSD__
(void)strlcpy(namebuf, "EVFILT_SIGNAL", sizeof(namebuf));
km.name = namebuf;
RL(ioctl(kq, KFILTER_BYNAME, &km));
(void)printf("got %d as filter number for `%s'.\n", km.filter, km.name);
#endif
/* ignore the signal to avoid taking it for real */
REQUIRE_LIBC(signal(SIGUSR1, SIG_IGN), SIG_ERR);
event[0].ident = SIGUSR1;
#ifdef __NetBSD__
event[0].filter = km.filter;
#else
event[0].filter = EVFILT_SIGNAL;
#endif
event[0].flags = EV_ADD | EV_ENABLE;
RL(kevent(kq, event, 1, NULL, 0, NULL));
@ -117,7 +130,11 @@ ATF_TC_BODY(sig, tc)
if (n == 0)
continue;
#ifdef __FreeBSD__
(void)printf("sig: kevent flags: 0x%x, data: %" PRIdPTR " (# "
#else
(void)printf("sig: kevent flags: 0x%x, data: %" PRId64 " (# "
#endif
"times signal posted)\n", event[0].flags, event[0].data);
}

View File

@ -1,3 +1,6 @@
#ifdef __FreeBSD__
#include <sys/types.h>
#endif
#include <sys/event.h>
#include <sys/stat.h>
#include <sys/time.h>

View File

@ -24,6 +24,13 @@ __RCSID("$NetBSD: t_rpc.c,v 1.9 2015/11/27 13:59:40 christos Exp $");
return; \
} while(/*CONSTCOND*/0)
#ifdef __FreeBSD__
#define SKIPXI(ev, msg, ...) do { \
atf_tc_skip(msg, __VA_ARGS__); \
return ev; \
} while(/*CONSTCOND*/0)
#endif
#else
#define ERRX(ev, msg, ...) errx(ev, msg, __VA_ARGS__)
#define SKIPX(ev, msg, ...) errx(ev, msg, __VA_ARGS__)
@ -188,7 +195,13 @@ regtest(const char *hostname, const char *transp, const char *arg, int p)
svc_fdset_init(p ? SVC_FDSET_POLL : 0);
#endif
if (!svc_create(server, PROGNUM, VERSNUM, transp))
#ifdef __NetBSD__
ERRX(EXIT_FAILURE, "Cannot create server %d", num);
#else
{
SKIPXI(EXIT_FAILURE, "Cannot create server %d", num);
}
#endif
switch ((pid = fork())) {
case 0:
@ -335,6 +348,9 @@ ATF_TC(tcp);
ATF_TC_HEAD(tcp, tc)
{
atf_tc_set_md_var(tc, "descr", "Checks svc tcp (select)");
#ifdef __FreeBSD__
atf_tc_set_md_var(tc, "require.user", "root");
#endif
}
ATF_TC_BODY(tcp, tc)
@ -347,6 +363,9 @@ ATF_TC(udp);
ATF_TC_HEAD(udp, tc)
{
atf_tc_set_md_var(tc, "descr", "Checks svc udp (select)");
#ifdef __FreeBSD__
atf_tc_set_md_var(tc, "require.user", "root");
#endif
}
ATF_TC_BODY(udp, tc)
@ -359,6 +378,9 @@ ATF_TC(tcp_poll);
ATF_TC_HEAD(tcp_poll, tc)
{
atf_tc_set_md_var(tc, "descr", "Checks svc tcp (poll)");
#ifdef __FreeBSD__
atf_tc_set_md_var(tc, "require.user", "root");
#endif
}
ATF_TC_BODY(tcp_poll, tc)
@ -371,6 +393,9 @@ ATF_TC(udp_poll);
ATF_TC_HEAD(udp_poll, tc)
{
atf_tc_set_md_var(tc, "descr", "Checks svc udp (poll)");
#ifdef __FreeBSD__
atf_tc_set_md_var(tc, "require.user", "root");
#endif
}
ATF_TC_BODY(udp_poll, tc)

View File

@ -419,6 +419,8 @@
..
..
kqueue
libkqueue
..
..
mac
bsdextended

View File

@ -5,7 +5,7 @@ PACKAGE= tests
BINDIR= ${TESTSDIR}
PROGS= h_db
PROGS= h_lfsr
PROGS+= h_lfsr
${PACKAGE}FILES+= README

View File

@ -36,6 +36,7 @@ __FBSDID("$FreeBSD$");
#include <sys/socket.h>
#include <sys/stat.h>
#include <string.h>
#include <stddef.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
@ -50,7 +51,8 @@ __FBSDID("$FreeBSD$");
#define NFS_DEBUGxx
#define NFSREAD_SIZE 1024
#define NFSREAD_MIN_SIZE 1024
#define NFSREAD_MAX_SIZE 4096
/* Define our own NFS attributes without NQNFS stuff. */
#ifdef OLD_NFSV2
@ -83,7 +85,7 @@ struct nfs_read_repl {
n_long errno;
struct nfsv2_fattrs fa;
n_long count;
u_char data[NFSREAD_SIZE];
u_char data[NFSREAD_MAX_SIZE];
};
#ifndef NFS_NOSYMLINK
@ -210,6 +212,8 @@ struct fs_ops nfs_fsops = {
nfs_readdir
};
static int nfs_read_size = NFSREAD_MIN_SIZE;
#ifdef OLD_NFSV2
/*
* Fetch the root file handle (call mount daemon)
@ -264,6 +268,17 @@ nfs_getrootfh(struct iodesc *d, char *path, u_char *fhp)
if (repl->errno)
return (ntohl(repl->errno));
bcopy(repl->fh, fhp, sizeof(repl->fh));
/*
* Improve boot performance over NFS
*/
if (getenv("nfs.read_size") != NULL)
nfs_read_size = strtol(getenv("nfs.read_size"), NULL, 0);
if (nfs_read_size < NFSREAD_MIN_SIZE)
nfs_read_size = NFSREAD_MIN_SIZE;
if (nfs_read_size > NFSREAD_MAX_SIZE)
nfs_read_size = NFSREAD_MAX_SIZE;
return (0);
}
@ -401,11 +416,11 @@ nfs_readdata(struct nfs_iodesc *d, off_t off, void *addr, size_t len)
bcopy(d->fh, args->fh, NFS_FHSIZE);
args->off = htonl((n_long)off);
if (len > NFSREAD_SIZE)
len = NFSREAD_SIZE;
if (len > nfs_read_size)
len = nfs_read_size;
args->len = htonl((n_long)len);
args->xxx = htonl((n_long)0);
hlen = sizeof(*repl) - NFSREAD_SIZE;
hlen = offsetof(struct nfs_read_rpl, data[0]);
cc = rpc_call(d->iodesc, NFS_PROG, NFS_VER2, NFSPROC_READ,
args, sizeof(*args),
@ -1025,7 +1040,7 @@ nfs_readdata(struct nfs_iodesc *d, off_t off, void *addr, size_t len)
uint32_t count;
uint32_t eof;
uint32_t len;
u_char data[NFSREAD_SIZE];
u_char data[NFSREAD_MAX_SIZE];
} *repl;
struct {
uint32_t h[RPC_HEADER_WORDS];
@ -1048,10 +1063,10 @@ nfs_readdata(struct nfs_iodesc *d, off_t off, void *addr, size_t len)
pos = roundup(d->fhsize, sizeof(uint32_t)) / sizeof(uint32_t);
args->fhoffcnt[pos++] = 0;
args->fhoffcnt[pos++] = htonl((uint32_t)off);
if (len > NFSREAD_SIZE)
len = NFSREAD_SIZE;
if (len > nfs_read_size)
len = nfs_read_size;
args->fhoffcnt[pos] = htonl((uint32_t)len);
hlen = sizeof(*repl) - NFSREAD_SIZE;
hlen = offsetof(struct repl, data[0]);
cc = rpc_call(d->iodesc, NFS_PROG, NFS_VER3, NFSPROCV3_READ,
args, 4 * sizeof(uint32_t) + roundup(d->fhsize, sizeof(uint32_t)),

View File

@ -24,7 +24,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*

View File

@ -113,6 +113,7 @@ MAN= aac.4 \
cue.4 \
cxgb.4 \
cxgbe.4 \
cxgbev.4 \
cy.4 \
cyapa.4 \
da.4 \
@ -602,6 +603,9 @@ MLINKS+=cxgb.4 if_cxgb.4
MLINKS+=cxgbe.4 if_cxgbe.4 \
cxgbe.4 cxl.4 \
cxgbe.4 if_cxl.4
MLINKS+=cxgbev.4 if_cxgbev.4 \
cxgbev.4 cxlv.4 \
cxgbev.4 if_cxlv.4
MLINKS+=dc.4 if_dc.4
MLINKS+=de.4 if_de.4
MLINKS+=disc.4 if_disc.4

View File

@ -25,7 +25,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd March 29, 2016
.Dd September 8, 2016
.Dt AMDSBWD 4
.Os
.Sh NAME
@ -51,7 +51,22 @@ The
driver provides
.Xr watchdog 4
support for the watchdog timers present on
AMD SB600, SB7xx, SB8xx and SB9xx southbridges and Axx FCHs.
the supported chipsets.
.Sh HARDWARE
The
.Nm
driver supports the following chipsets:
.Pp
.Bl -bullet -compact
.It
AMD SB600/7x0/8x0/9x0 southbridges
.It
AMD Axx/Hudson/Bolton FCHs
.It
AMD FCHs integrated into Family 15h Models 60h-6Fh, 70h-7Fh Processors
.It
AMD FCHs integrated into Family 16h Models 00h-0Fh, 30h-3Fh Processors
.El
.Sh SEE ALSO
.Xr watchdog 4 ,
.Xr watchdog 8 ,

View File

@ -77,8 +77,7 @@ For more information on configuring this device, see
.Sh HARDWARE
The
.Nm
driver supports 40Gb, 10Gb and 1Gb Ethernet adapters based on the T5 ASIC
(ports will be named cxl):
driver supports 40Gb, 10Gb and 1Gb Ethernet adapters based on the T5 ASIC:
.Pp
.Bl -bullet -compact
.It
@ -320,6 +319,7 @@ email all the specific information related to the issue to
.Xr altq 4 ,
.Xr arp 4 ,
.Xr cxgb 4 ,
.Xr cxgbev 4 ,
.Xr netintro 4 ,
.Xr ng_ether 4 ,
.Xr ifconfig 8

290
share/man/man4/cxgbev.4 Normal file
View File

@ -0,0 +1,290 @@
.\" Copyright (c) 2011-2016, Chelsio Inc
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions are met:
.\"
.\" 1. Redistributions of source code must retain the above copyright notice,
.\" this list of conditions and the following disclaimer.
.\"
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" 3. Neither the name of the Chelsio Inc nor the names of its
.\" contributors may be used to endorse or promote products derived from
.\" this software without specific prior written permission.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
.\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
.\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.\" * Other names and brands may be claimed as the property of others.
.\"
.\" $FreeBSD$
.\"
.Dd August 22, 2016
.Dt CXGBEV 4
.Os
.Sh NAME
.Nm cxgbev
.Nd "Chelsio T4 and T5 based 40Gb, 10Gb, and 1Gb Ethernet VF driver"
.Sh SYNOPSIS
To compile this driver into the kernel,
place the following lines in your
kernel configuration file:
.Bd -ragged -offset indent
.Cd "device cxgbe"
.Cd "device cxgbev"
.Ed
.Pp
To load the driver as a
module at boot time, place the following line in
.Xr loader.conf 5 :
.Bd -literal -offset indent
if_cxgbev_load="YES"
.Ed
.Sh DESCRIPTION
The
.Nm
driver provides support for Virtual Functions on PCI Express Ethernet adapters
based on the Chelsio Terminator 4 and Terminator 5 ASICs (T4 and T5).
The driver supports Jumbo Frames, Transmit/Receive checksum offload,
TCP segmentation offload (TSO), Large Receive Offload (LRO), VLAN
tag insertion/extraction, VLAN checksum offload, VLAN TSO, and
Receive Side Steering (RSS).
For further hardware information and questions related to hardware
requirements, see
.Pa http://www.chelsio.com/ .
.Pp
Note that ports of T5 VFs are named cxlv and attach to a t5vf parent device
(in contrast to ports named cxgbev that attach to a t4vf parent for a T4 VF).
Loader tunables with the hw.cxgbe prefix apply to both T4 and T5 VFs.
The Physical Function driver for T4 and T5 adapters shares these tunables.
The sysctl MIBs are at dev.t5vf and dev.cxlv for T5 cards and at dev.t4vf and
dev.cxgbev for T4 cards.
.Pp
For more information on configuring this device, see
.Xr ifconfig 8 .
.Sh HARDWARE
The
.Nm
driver supports Virtual Functions on 40Gb, 10Gb and 1Gb Ethernet adapters
based on the T5 ASIC:
.Pp
.Bl -bullet -compact
.It
Chelsio T580-CR
.It
Chelsio T580-LP-CR
.It
Chelsio T580-LP-SO-CR
.It
Chelsio T560-CR
.It
Chelsio T540-CR
.It
Chelsio T540-LP-CR
.It
Chelsio T522-CR
.It
Chelsio T520-LL-CR
.It
Chelsio T520-CR
.It
Chelsio T520-SO
.It
Chelsio T520-BT
.It
Chelsio T504-BT
.El
.Pp
The
.Nm
driver supports Virtual Functions on 10Gb and 1Gb Ethernet adapters based
on the T4 ASIC:
.Pp
.Bl -bullet -compact
.It
Chelsio T420-CR
.It
Chelsio T422-CR
.It
Chelsio T440-CR
.It
Chelsio T420-BCH
.It
Chelsio T440-BCH
.It
Chelsio T440-CH
.It
Chelsio T420-SO
.It
Chelsio T420-CX
.It
Chelsio T420-BT
.It
Chelsio T404-BT
.El
.Sh LOADER TUNABLES
Tunables can be set at the
.Xr loader 8
prompt before booting the kernel or stored in
.Xr loader.conf 5 .
.Bl -tag -width indent
.It Va hw.cxgbe.ntxq10g
The number of tx queues to use for a 10Gb or 40Gb port.
The default is 16 or the number
of CPU cores in the system, whichever is less.
.It Va hw.cxgbe.nrxq10g
The number of rx queues to use for a 10Gb or 40Gb port.
The default is 8 or the number
of CPU cores in the system, whichever is less.
.It Va hw.cxgbe.ntxq1g
The number of tx queues to use for a 1Gb port.
The default is 4 or the number
of CPU cores in the system, whichever is less.
.It Va hw.cxgbe.nrxq1g
The number of rx queues to use for a 1Gb port.
The default is 2 or the number
of CPU cores in the system, whichever is less.
.It Va hw.cxgbe.holdoff_timer_idx_10G
.It Va hw.cxgbe.holdoff_timer_idx_1G
The timer index value to use to delay interrupts.
The holdoff timer list has the values 1, 5, 10, 50, 100, and 200
by default (all values are in microseconds) and the index selects a
value from this list.
The default value is 1 which means the timer value is 5us.
Different interfaces can be assigned different values at any time via the
dev.cxgbev.X.holdoff_tmr_idx or dev.cxlv.X.holdoff_tmr_idx sysctl.
.It Va hw.cxgbe.holdoff_pktc_idx_10G
.It Va hw.cxgbe.holdoff_pktc_idx_1G
The packet-count index value to use to delay interrupts.
The packet-count list has the values 1, 8, 16, and 32 by default
and the index selects a value from this list.
The default value is -1 which means packet counting is disabled and interrupts
are generated based solely on the holdoff timer value.
Different interfaces can be assigned different values via the
dev.cxgbev.X.holdoff_pktc_idx or dev.cxlv.X.holdoff_pktc_idx sysctl.
This sysctl works only when the interface has never been marked up (as done by
ifconfig up).
.It Va hw.cxgbe.qsize_txq
The size, in number of entries, of the descriptor ring used for a tx
queue.
A buf_ring of the same size is also allocated for additional
software queuing.
See
.Xr ifnet 9 .
The default value is 1024.
Different interfaces can be assigned different values via the
dev.cxgbev.X.qsize_txq sysctl or dev.cxlv.X.qsize_txq sysctl.
This sysctl works only when the interface has never been marked up (as done by
ifconfig up).
.It Va hw.cxgbe.qsize_rxq
The size, in number of entries, of the descriptor ring used for an
rx queue.
The default value is 1024.
Different interfaces can be assigned different values via the
dev.cxgbev.X.qsize_rxq or dev.cxlv.X.qsize_rxq sysctl.
This sysctl works only when the interface has never been marked up (as done by
ifconfig up).
.It Va hw.cxgbe.interrupt_types
The interrupt types that the driver is allowed to use.
Bit 0 represents INTx (line interrupts), bit 1 MSI, bit 2 MSI-X.
The default is 7 (all allowed).
The driver will select the best possible type out of the allowed types by
itself.
.It Va hw.cxgbe.fl_pktshift
The number of bytes of padding inserted before the beginning of an Ethernet
frame in the receive buffer.
The default value of 2 ensures that the Ethernet payload (usually the IP header)
is at a 4 byte aligned address.
0-7 are all valid values.
.It Va hw.cxgbe.fl_pad
A non-zero value ensures that writes from the hardware to a receive buffer are
padded up to the specified boundary.
The default is -1 which lets the driver pick a pad boundary.
0 disables trailer padding completely.
.It Va hw.cxgbe.buffer_packing
Allow the hardware to deliver multiple frames in the same receive buffer
opportunistically.
The default is -1 which lets the driver decide.
0 or 1 explicitly disable or enable this feature.
.It Va hw.cxgbe.allow_mbufs_in_cluster
1 allows the driver to lay down one or more mbufs within the receive buffer
opportunistically.
This is the default.
0 prohibits the driver from doing so.
.It Va hw.cxgbe.largest_rx_cluster
.It Va hw.cxgbe.safest_rx_cluster
Sizes of rx clusters.
Each of these must be set to one of the sizes available
(usually 2048, 4096, 9216, and 16384) and largest_rx_cluster must be greater
than or equal to safest_rx_cluster.
The defaults are 16384 and 4096 respectively.
The driver will never attempt to allocate a receive buffer larger than
largest_rx_cluster and will fall back to allocating buffers of
safest_rx_cluster size if an allocation larger than safest_rx_cluster fails.
Note that largest_rx_cluster merely establishes a ceiling -- the driver is
allowed to allocate buffers of smaller sizes.
.El
.Pp
Certain settings and resources for Virtual Functions are dictated
by the parent Physical Function driver.
For example, the Physical Function driver limits the number of queues a
Virtual Function is permitted to use.
Some of these limits can be adjusted in the firmware configuration file
used with the Physical Function driver.
.Pp
The PAUSE settings on the port of a Virtual Function are inherited from
the settings of the same port on the Physical Function.
Virtual Functions cannot modify the setting and track changes made to
the associated port's setting by the Physical Function driver.
.Pp
Receive queues on a Virtual Function always drop packets in response to
congestion
.Po
equivalent to setting
.Va hw.cxgbe.cong_drop
to 1
.Pc .
.Pp
The VF driver currently depends on the PF driver.
As a result, loading the VF driver will also load the PF driver as a
dependency.
.Sh SUPPORT
For general information and support,
go to the Chelsio support website at:
.Pa http://www.chelsio.com/ .
.Pp
If an issue is identified with this driver with a supported adapter,
email all the specific information related to the issue to
.Aq Mt support@chelsio.com .
.Sh SEE ALSO
.Xr altq 4 ,
.Xr arp 4 ,
.Xr cxgbe 4 ,
.Xr netintro 4 ,
.Xr ng_ether 4 ,
.Xr ifconfig 8
.Sh HISTORY
The
.Nm
device driver first appeared in
.Fx 12.0 .
.Sh AUTHORS
.An -nosplit
The
.Nm
driver was written by
.An Navdeep Parhar Aq Mt np@FreeBSD.org
and
.An John Baldwin Aq Mt jhb@FreeBSD.org .

View File

@ -55,7 +55,11 @@ Intel 82371AB/82443MX
.It
ATI IXP400
.It
AMD SB600/700/710/750
AMD SB600/7x0/8x0/9x0 southbridges
.It
AMD Axx/Hudson/Bolton FCHs
.It
AMD FCH integrated into Family 16h Models 00h-0Fh Processors
.El
.Sh SEE ALSO
.Xr amdpm 4 ,

View File

@ -250,7 +250,7 @@ command.
Enable halt keyboard combination.
.It Va kern.vt.kbd_poweroff
Enable power off key combination.
.It Va kern.vt.kbd_reboot.
.It Va kern.vt.kbd_reboot
Enable reboot key combination, usually Ctrl+Alt+Del.
.It Va kern.vt.kbd_debug
Enable debug request key combination, usually Ctrl+Alt+Esc.

View File

@ -1354,7 +1354,10 @@ MLINKS+=pci.9 pci_alloc_msi.9 \
pci.9 pci_set_max_read_req.9 \
pci.9 pci_write_config.9 \
pci.9 pcie_adjust_config.9 \
pci.9 pcie_flr.9 \
pci.9 pcie_max_completion_timeout.9 \
pci.9 pcie_read_config.9 \
pci.9 pcie_wait_for_pending_transactions.9 \
pci.9 pcie_write_config.9
MLINKS+=pci_iov_schema.9 pci_iov_schema_alloc_node.9 \
pci_iov_schema.9 pci_iov_schema_add_bool.9 \

View File

@ -25,7 +25,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd September 1, 2016
.Dd September 6, 2016
.Dt PCI 9
.Os
.Sh NAME
@ -66,7 +66,10 @@
.Nm pci_set_powerstate ,
.Nm pci_write_config ,
.Nm pcie_adjust_config ,
.Nm pcie_flr ,
.Nm pcie_get_max_completion_timeout ,
.Nm pcie_read_config ,
.Nm pcie_wait_for_pending_transactions ,
.Nm pcie_write_config
.Nd PCI bus interface
.Sh SYNOPSIS
@ -145,10 +148,20 @@
.Fa "uint32_t val"
.Fa "int width"
.Fc
.Ft bool
.Fn pcie_flr "device_t dev" "u_int max_delay" "bool force"
.Ft int
.Fn pcie_get_max_completion_timeout "device_t dev"
.Ft uint32_t
.Fn pcie_read_config "device_t dev" "int reg" "int width"
.Ft bool
.Fn pcie_wait_for_pending_transactions "device_t dev" "u_int max_delay"
.Ft void
.Fn pcie_write_config "device_t dev" "int reg" "uint32_t val" "int width"
.Ft void
.Fn pci_event_fn "void *arg" "device_t dev"
.Fn EVENTHANDLER_REGISTER "pci_add_device" "pci_event_fn"
.Fn EVENTHANDLER_DEREGISTER "pci_delete_resource" "pci_event_fn"
.In dev/pci/pci_iov.h
.Ft int
.Fn pci_iov_attach "device_t dev" "nvlist_t *pf_schema" "nvlist_t *vf_schema"
@ -427,6 +440,51 @@ keyword,
then
.Fn pci_get_vpd_readonly
returns an error.
.Pp
The
.Fn pcie_get_max_completion_timeout
function returns the maximum completion timeout configured for the device
.Fa dev
in microseconds.
If the
.Fa dev
device is not a PCI-express device,
.Fn pcie_get_max_completion_timeout
returns zero.
When completion timeouts are disabled for
.Fa dev ,
this function returns the maxmimum timeout that would be used if timeouts
were enabled.
.Pp
The
.Fn pcie_wait_for_pending_transactions
function waits for any pending transactions initiated by the
.Fa dev
device to complete.
The function checks for pending transactions by polling the transactions
pending flag in the PCI-express device status register.
It returns
.Dv true
once the transaction pending flag is clear.
If transactions are still pending after
.Fa max_delay
milliseconds,
.Fn pcie_wait_for_pending_transactions
returns
.Dv false .
If
.Fa max_delay
is set to zero,
.Fn pcie_wait_for_pending_transactions
performs a single check;
otherwise,
this function may sleep while polling the transactions pending flag.
.Nm pcie_wait_for_pending_transactions
returns
.Dv true
if
.Fa dev
is not a PCI-express device.
.Ss Device Configuration
The
.Fn pci_enable_busmaster
@ -658,6 +716,51 @@ is invoked,
then the device will be transitioned to
.Dv PCI_POWERSTATE_D0
before any config registers are restored.
.Pp
The
.Fn pcie_flr
function requests a Function Level Reset
.Pq FLR
of
.Fa dev .
If
.Fa dev
is not a PCI-express device or does not support Function Level Resets via
the PCI-express device control register,
.Dv false
is returned.
Pending transactions are drained by disabling busmastering and calling
.Fn pcie_wait_for_pending_transactions
before resetting the device.
The
.Fa max_delay
argument specifies the maximum timeout to wait for pending transactions as
described for
.Fn pcie_wait_for_pending_transactions .
If
.Fn pcie_wait_for_pending_transactions
fails with a timeout and
.Fa force
is
.Dv false ,
busmastering is re-enabled and
.Dv false
is returned.
If
.Fn pcie_wait_for_pending_transactions
fails with a timeout and
.Fa force
is
.Dv true ,
the device is reset despite the timeout.
After the reset has been requested,
.Nm pcie_flr
sleeps for at least 100 milliseconds before returning
.Dv true .
Note that
.Nm pcie_flr
does not save and restore any state around the reset.
The caller should save and restore state as needed.
.Ss Message Signaled Interrupts
Message Signaled Interrupts
.Pq MSI
@ -910,6 +1013,24 @@ with one in the new distribution.
The
.Fn pci_remap_msix
function will fail if this condition is not met.
.Ss Device Events
The
.Va pci_add_device
event handler is invoked every time a new PCI device is added to the system.
This includes the creation of Virtual Functions via SR-IOV.
.Pp
The
.Va pci_delete_device
event handler is invoked every time a PCI device is removed from the system.
.Pp
Both event handlers pass the
.Vt device_t
object of the relevant PCI device as
.Fa dev
to each callback function.
Both event handlers are invoked while
.Fa dev
is unattached but with valid instance variables.
.Sh SEE ALSO
.Xr pci 4 ,
.Xr pciconf 8 ,
@ -921,6 +1042,7 @@ function will fail if this condition is not met.
.Xr devclass 9 ,
.Xr device 9 ,
.Xr driver 9 ,
.Xr eventhandler 9 ,
.Xr rman 9
.Rs
.%B FreeBSD Developers' Handbook

View File

@ -4,18 +4,18 @@
# -----------------------------------------------------------------------------
#
# Short month names
1
2
3
4
5
6
7
8
9
10
11
12
1
2
3
4
5
6
7
8
9
10
11
12
#
# Long month names (as in a date)
1월
@ -53,7 +53,7 @@
%H시 %M분 %S초
#
# x_fmt
%Y년 %b월 %e일
%Y년 %_m월 %e일
#
# c_fmt
%x %A %X

View File

@ -4,18 +4,18 @@
# -----------------------------------------------------------------------------
#
# Short month names
1
2
3
4
5
6
7
8
9
10
11
12
1
2
3
4
5
6
7
8
9
10
11
12
#
# Long month names (as in a date)
1월
@ -53,7 +53,7 @@
%H시 %M분 %S초
#
# x_fmt
%Y년 %b월 %e일
%Y년 %_m월 %e일
#
# c_fmt
%x %A %X

View File

@ -108,6 +108,20 @@ init_amd(void)
wrmsr(0xc001102a, msr);
}
}
/*
* Work around Erratum 793: Specific Combination of Writes to Write
* Combined Memory Types and Locked Instructions May Cause Core Hang.
* See Revision Guide for AMD Family 16h Models 00h-0Fh Processors,
* revision 3.04 or later, publication 51810.
*/
if (CPUID_TO_FAMILY(cpu_id) == 0x16 && CPUID_TO_MODEL(cpu_id) <= 0xf) {
if ((cpu_feature2 & CPUID2_HV) == 0) {
msr = rdmsr(0xc0011020);
msr |= (uint64_t)1 << 15;
wrmsr(0xc0011020, msr);
}
}
}
/*

View File

@ -50,7 +50,6 @@ __FBSDID("$FreeBSD$");
#include "opt_kstack_pages.h"
#include "opt_maxmem.h"
#include "opt_mp_watchdog.h"
#include "opt_perfmon.h"
#include "opt_platform.h"
#include "opt_sched.h"
@ -125,9 +124,6 @@ __FBSDID("$FreeBSD$");
#include <machine/reg.h>
#include <machine/sigframe.h>
#include <machine/specialreg.h>
#ifdef PERFMON
#include <machine/perfmon.h>
#endif
#include <machine/tss.h>
#ifdef SMP
#include <machine/smp.h>
@ -274,9 +270,6 @@ cpu_startup(dummy)
startrtclock();
printcpuinfo();
panicifcpuunsupported();
#ifdef PERFMON
perfmon_init();
#endif
/*
* Display physical memory if SMBIOS reports reasonable amount.

View File

@ -28,10 +28,6 @@
__FBSDID("$FreeBSD$");
#ifdef GUPROF
#if 0
#include "opt_i586_guprof.h"
#include "opt_perfmon.h"
#endif
#include <sys/param.h>
#include <sys/systm.h>
@ -44,25 +40,16 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <machine/clock.h>
#if 0
#include <machine/perfmon.h>
#endif
#include <machine/timerreg.h>
#define CPUTIME_CLOCK_UNINITIALIZED 0
#define CPUTIME_CLOCK_I8254 1
#define CPUTIME_CLOCK_TSC 2
#define CPUTIME_CLOCK_I586_PMC 3
#define CPUTIME_CLOCK_I8254_SHIFT 7
int cputime_bias = 1; /* initialize for locality of reference */
static int cputime_clock = CPUTIME_CLOCK_UNINITIALIZED;
#if defined(PERFMON) && defined(I586_PMC_GUPROF)
static u_int cputime_clock_pmc_conf = I586_PMC_GUPROF;
static int cputime_clock_pmc_init;
static struct gmonparam saved_gmp;
#endif
static int cputime_prof_active;
#endif /* GUPROF */
@ -198,9 +185,6 @@ cputime()
{
u_int count;
int delta;
#if defined(PERFMON) && defined(I586_PMC_GUPROF) && !defined(SMP)
u_quad_t event_count;
#endif
u_char high, low;
static u_int prev_count;
@ -217,21 +201,6 @@ cputime()
prev_count = count;
return (delta);
}
#if defined(PERFMON) && defined(I586_PMC_GUPROF) && !defined(SMP)
if (cputime_clock == CPUTIME_CLOCK_I586_PMC) {
/*
* XXX permon_read() should be inlined so that the
* perfmon module doesn't need to be compiled with
* profiling disabled and so that it is fast.
*/
perfmon_read(0, &event_count);
count = (u_int)event_count;
delta = (int)(count - prev_count);
prev_count = count;
return (delta);
}
#endif /* PERFMON && I586_PMC_GUPROF && !SMP */
/*
* Read the current value of the 8254 timer counter 0.
@ -262,39 +231,13 @@ sysctl_machdep_cputime_clock(SYSCTL_HANDLER_ARGS)
{
int clock;
int error;
#if defined(PERFMON) && defined(I586_PMC_GUPROF)
int event;
struct pmc pmc;
#endif
clock = cputime_clock;
#if defined(PERFMON) && defined(I586_PMC_GUPROF)
if (clock == CPUTIME_CLOCK_I586_PMC) {
pmc.pmc_val = cputime_clock_pmc_conf;
clock += pmc.pmc_event;
}
#endif
error = sysctl_handle_opaque(oidp, &clock, sizeof clock, req);
if (error == 0 && req->newptr != NULL) {
#if defined(PERFMON) && defined(I586_PMC_GUPROF)
if (clock >= CPUTIME_CLOCK_I586_PMC) {
event = clock - CPUTIME_CLOCK_I586_PMC;
if (event >= 256)
return (EINVAL);
pmc.pmc_num = 0;
pmc.pmc_event = event;
pmc.pmc_unit = 0;
pmc.pmc_flags = PMCF_E | PMCF_OS | PMCF_USR;
pmc.pmc_mask = 0;
cputime_clock_pmc_conf = pmc.pmc_val;
cputime_clock = CPUTIME_CLOCK_I586_PMC;
} else
#endif
{
if (clock < 0 || clock >= CPUTIME_CLOCK_I586_PMC)
return (EINVAL);
cputime_clock = clock;
}
if (clock < 0 || clock > CPUTIME_CLOCK_TSC)
return (EINVAL);
cputime_clock = clock;
}
return (error);
}
@ -325,32 +268,6 @@ startguprof(gp)
cputime_prof_active = 1;
} else
gp->profrate = i8254_freq << CPUTIME_CLOCK_I8254_SHIFT;
#if defined(PERFMON) && defined(I586_PMC_GUPROF)
if (cputime_clock == CPUTIME_CLOCK_I586_PMC) {
if (perfmon_avail() &&
perfmon_setup(0, cputime_clock_pmc_conf) == 0) {
if (perfmon_start(0) != 0)
perfmon_fini(0);
else {
/* XXX 1 event == 1 us. */
gp->profrate = 1000000;
saved_gmp = *gp;
/* Zap overheads. They are invalid. */
gp->cputime_overhead = 0;
gp->mcount_overhead = 0;
gp->mcount_post_overhead = 0;
gp->mcount_pre_overhead = 0;
gp->mexitcount_overhead = 0;
gp->mexitcount_post_overhead = 0;
gp->mexitcount_pre_overhead = 0;
cputime_clock_pmc_init = TRUE;
}
}
}
#endif /* PERFMON && I586_PMC_GUPROF */
cputime_bias = 0;
cputime();
}
@ -359,13 +276,6 @@ void
stopguprof(gp)
struct gmonparam *gp;
{
#if defined(PERFMON) && defined(I586_PMC_GUPROF)
if (cputime_clock_pmc_init) {
*gp = saved_gmp;
perfmon_fini(0);
cputime_clock_pmc_init = FALSE;
}
#endif
if (cputime_clock == CPUTIME_CLOCK_TSC)
cputime_prof_active = 0;
}

View File

@ -73,12 +73,6 @@ cpu HAMMER # aka K8, aka Opteron & Athlon64
# Options for CPU features.
#
#
# PERFMON causes the driver for Pentium/Pentium Pro performance counters
# to be compiled. See perfmon(4) for more information.
#
#XXX#options PERFMON
#####################################################################
# NETWORKING OPTIONS

View File

@ -58,6 +58,7 @@ SYSCTL_INT(_hw_vmm_iommu, OID_AUTO, enable, CTLFLAG_RDTUN, &iommu_enable, 0,
static struct iommu_ops *ops;
static void *host_domain;
static eventhandler_tag add_tag, delete_tag;
static __inline int
IOMMU_INIT(void)
@ -153,12 +154,26 @@ IOMMU_DISABLE(void)
(*ops->disable)();
}
static void
iommu_pci_add(void *arg, device_t dev)
{
/* Add new devices to the host domain. */
iommu_add_device(host_domain, pci_get_rid(dev));
}
static void
iommu_pci_delete(void *arg, device_t dev)
{
iommu_remove_device(host_domain, pci_get_rid(dev));
}
static void
iommu_init(void)
{
int error, bus, slot, func;
vm_paddr_t maxaddr;
const char *name;
device_t dev;
if (!iommu_enable)
@ -196,6 +211,9 @@ iommu_init(void)
*/
iommu_create_mapping(host_domain, 0, 0, maxaddr);
add_tag = EVENTHANDLER_REGISTER(pci_add_device, iommu_pci_add, NULL, 0);
delete_tag = EVENTHANDLER_REGISTER(pci_delete_device, iommu_pci_delete,
NULL, 0);
for (bus = 0; bus <= PCI_BUSMAX; bus++) {
for (slot = 0; slot <= PCI_SLOTMAX; slot++) {
for (func = 0; func <= PCI_FUNCMAX; func++) {
@ -203,12 +221,7 @@ iommu_init(void)
if (dev == NULL)
continue;
/* skip passthrough devices */
name = device_get_name(dev);
if (name != NULL && strcmp(name, "ppt") == 0)
continue;
/* everything else belongs to the host domain */
/* Everything belongs to the host domain. */
iommu_add_device(host_domain,
pci_get_rid(dev));
}
@ -221,6 +234,15 @@ iommu_init(void)
void
iommu_cleanup(void)
{
if (add_tag != NULL) {
EVENTHANDLER_DEREGISTER(pci_add_device, add_tag);
add_tag = NULL;
}
if (delete_tag != NULL) {
EVENTHANDLER_DEREGISTER(pci_delete_device, delete_tag);
delete_tag = NULL;
}
IOMMU_DISABLE();
IOMMU_DESTROY_DOMAIN(host_domain);
IOMMU_CLEANUP();

View File

@ -362,7 +362,13 @@ ppt_assign_device(struct vm *vm, int bus, int slot, int func)
if (ppt->vm != NULL && ppt->vm != vm)
return (EBUSY);
pci_save_state(ppt->dev);
pcie_flr(ppt->dev,
max(pcie_get_max_completion_timeout(ppt->dev) / 1000, 10),
true);
pci_restore_state(ppt->dev);
ppt->vm = vm;
iommu_remove_device(iommu_host_domain(), pci_get_rid(ppt->dev));
iommu_add_device(vm_iommu_domain(vm), pci_get_rid(ppt->dev));
return (0);
}
@ -381,10 +387,17 @@ ppt_unassign_device(struct vm *vm, int bus, int slot, int func)
*/
if (ppt->vm != vm)
return (EBUSY);
pci_save_state(ppt->dev);
pcie_flr(ppt->dev,
max(pcie_get_max_completion_timeout(ppt->dev) / 1000, 10),
true);
pci_restore_state(ppt->dev);
ppt_unmap_mmio(vm, ppt);
ppt_teardown_msi(ppt);
ppt_teardown_msix(ppt);
iommu_remove_device(vm_iommu_domain(vm), pci_get_rid(ppt->dev));
iommu_add_device(iommu_host_domain(), pci_get_rid(ppt->dev));
ppt->vm = NULL;
return (0);
}

View File

@ -777,9 +777,9 @@ extern devclass_t ofwgpiobus_devclass, gpioc_devclass;
extern driver_t ofw_gpiobus_driver, gpioc_driver;
EARLY_DRIVER_MODULE(axp81x, iicbus, axp81x_driver, axp81x_devclass, 0, 0,
BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);
BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LAST);
EARLY_DRIVER_MODULE(ofw_gpiobus, axp81x_pmu, ofw_gpiobus_driver,
ofwgpiobus_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);
ofwgpiobus_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LAST);
DRIVER_MODULE(gpioc, axp81x_pmu, gpioc_driver, gpioc_devclass, 0, 0);
MODULE_VERSION(axp81x, 1);
MODULE_DEPEND(axp81x, iicbus, 1, 1, 1);

View File

@ -47,8 +47,42 @@ __FBSDID("$FreeBSD$");
#include <dev/extres/clk/clk_mux.h>
#define CPU_CLK_SRC_SEL_WIDTH 2
#define CPU_CLK_SRC_SEL_SHIFT 16
#define A10_CPU_CLK_SRC_SEL_WIDTH 2
#define A10_CPU_CLK_SRC_SEL_SHIFT 16
#define A83T_Cx_CLK_SRC_SEL_WIDTH 1
#define A83T_C0_CLK_SRC_SEL_SHIFT 12
#define A83T_C1_CLK_SRC_SEL_SHIFT 28
struct aw_cpuclk_config {
u_int width;
u_int shift;
};
static struct aw_cpuclk_config a10_config = {
.width = A10_CPU_CLK_SRC_SEL_WIDTH,
.shift = A10_CPU_CLK_SRC_SEL_SHIFT,
};
static struct aw_cpuclk_config a83t_c0_config = {
.width = A83T_Cx_CLK_SRC_SEL_WIDTH,
.shift = A83T_C0_CLK_SRC_SEL_SHIFT,
};
static struct aw_cpuclk_config a83t_c1_config = {
.width = A83T_Cx_CLK_SRC_SEL_WIDTH,
.shift = A83T_C1_CLK_SRC_SEL_SHIFT,
};
static struct ofw_compat_data compat_data[] = {
{ "allwinner,sun4i-a10-cpu-clk", (uintptr_t)&a10_config },
{ "allwinner,sun8i-a83t-c0cpu-clk", (uintptr_t)&a83t_c0_config },
{ "allwinner,sun8i-a83t-c1cpu-clk", (uintptr_t)&a83t_c1_config },
{ NULL, (uintptr_t)NULL }
};
#define CPUCLK_CONF(d) \
(void *)ofw_bus_search_compatible((d), compat_data)->ocd_data
static int
aw_cpuclk_probe(device_t dev)
@ -56,7 +90,7 @@ aw_cpuclk_probe(device_t dev)
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "allwinner,sun4i-a10-cpu-clk"))
if (CPUCLK_CONF(dev) == NULL)
return (ENXIO);
device_set_desc(dev, "Allwinner CPU Clock");
@ -68,6 +102,7 @@ aw_cpuclk_attach(device_t dev)
{
struct clk_mux_def def;
struct clkdom *clkdom;
struct aw_cpuclk_config *conf;
bus_addr_t paddr;
bus_size_t psize;
phandle_t node;
@ -75,6 +110,7 @@ aw_cpuclk_attach(device_t dev)
clk_t clk;
node = ofw_bus_get_node(dev);
conf = CPUCLK_CONF(dev);
if (ofw_reg_to_paddr(node, 0, &paddr, &psize, NULL) != 0) {
device_printf(dev, "cannot parse 'reg' property\n");
@ -105,8 +141,8 @@ aw_cpuclk_attach(device_t dev)
}
def.clkdef.parent_cnt = ncells;
def.offset = paddr;
def.shift = CPU_CLK_SRC_SEL_SHIFT;
def.width = CPU_CLK_SRC_SEL_WIDTH;
def.shift = conf->shift;
def.width = conf->width;
error = clk_parse_ofw_clk_name(dev, node, &def.clkdef.name);
if (error != 0) {

View File

@ -157,6 +157,17 @@ __FBSDID("$FreeBSD$");
#define A80_PLL4_FACTOR_N (0xff << 8)
#define A80_PLL4_FACTOR_N_SHIFT 8
#define A83T_PLLCPUX_LOCK_TIME (0x7 << 24)
#define A83T_PLLCPUX_LOCK_TIME_SHIFT 24
#define A83T_PLLCPUX_CLOCK_OUTPUT_DIS (1 << 20)
#define A83T_PLLCPUX_OUT_EXT_DIVP (1 << 16)
#define A83T_PLLCPUX_FACTOR_N (0xff << 8)
#define A83T_PLLCPUX_FACTOR_N_SHIFT 8
#define A83T_PLLCPUX_FACTOR_N_MIN 12
#define A83T_PLLCPUX_FACTOR_N_MAX 125
#define A83T_PLLCPUX_POSTDIV_M (0x3 << 0)
#define A83T_PLLCPUX_POSTDIV_M_SHIFT 0
#define CLKID_A10_PLL3_1X 0
#define CLKID_A10_PLL3_2X 1
@ -202,6 +213,7 @@ enum aw_pll_type {
AWPLL_A31_PLL6,
AWPLL_A64_PLLHSIC,
AWPLL_A80_PLL4,
AWPLL_A83T_PLLCPUX,
AWPLL_H3_PLL1,
};
@ -824,6 +836,46 @@ a64_pllhsic_init(device_t dev, bus_addr_t reg, struct clknode_init_def *def)
return (0);
}
static int
a83t_pllcpux_recalc(struct aw_pll_sc *sc, uint64_t *freq)
{
uint32_t val, n, p;
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
DEVICE_UNLOCK(sc);
n = (val & A83T_PLLCPUX_FACTOR_N) >> A83T_PLLCPUX_FACTOR_N_SHIFT;
p = (val & A83T_PLLCPUX_OUT_EXT_DIVP) ? 4 : 1;
*freq = (*freq * n) / p;
return (0);
}
static int
a83t_pllcpux_set_freq(struct aw_pll_sc *sc, uint64_t fin, uint64_t *fout,
int flags)
{
uint32_t val;
u_int n;
n = *fout / fin;
if (n < A83T_PLLCPUX_FACTOR_N_MIN || n > A83T_PLLCPUX_FACTOR_N_MAX)
return (EINVAL);
DEVICE_LOCK(sc);
PLL_READ(sc, &val);
val &= ~A83T_PLLCPUX_FACTOR_N;
val |= (n << A83T_PLLCPUX_FACTOR_N_SHIFT);
val &= ~A83T_PLLCPUX_CLOCK_OUTPUT_DIS;
PLL_WRITE(sc, val);
DEVICE_UNLOCK(sc);
return (0);
}
#define PLL(_type, _recalc, _set_freq, _init) \
[(_type)] = { \
.recalc = (_recalc), \
@ -842,6 +894,7 @@ static struct aw_pll_funcs aw_pll_func[] = {
PLL(AWPLL_A31_PLL1, a31_pll1_recalc, NULL, NULL),
PLL(AWPLL_A31_PLL6, a31_pll6_recalc, NULL, a31_pll6_init),
PLL(AWPLL_A80_PLL4, a80_pll4_recalc, NULL, NULL),
PLL(AWPLL_A83T_PLLCPUX, a83t_pllcpux_recalc, a83t_pllcpux_set_freq, NULL),
PLL(AWPLL_A64_PLLHSIC, a64_pllhsic_recalc, NULL, a64_pllhsic_init),
PLL(AWPLL_H3_PLL1, a23_pll1_recalc, h3_pll1_set_freq, NULL),
};
@ -856,6 +909,7 @@ static struct ofw_compat_data compat_data[] = {
{ "allwinner,sun6i-a31-pll1-clk", AWPLL_A31_PLL1 },
{ "allwinner,sun6i-a31-pll6-clk", AWPLL_A31_PLL6 },
{ "allwinner,sun8i-a23-pll1-clk", AWPLL_A23_PLL1 },
{ "allwinner,sun8i-a83t-pllcpux-clk", AWPLL_A83T_PLLCPUX },
{ "allwinner,sun8i-h3-pll1-clk", AWPLL_H3_PLL1 },
{ "allwinner,sun9i-a80-pll4-clk", AWPLL_A80_PLL4 },
{ "allwinner,sun50i-a64-pllhsic-clk", AWPLL_A64_PLLHSIC },

View File

@ -0,0 +1,131 @@
/*-
* Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates
* All rights reserved.
*
* Developed by Semihalf.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/rman.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/resource.h>
#include <sys/systm.h>
#include <machine/bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#define AL_CCU_SNOOP_CONTROL_IOFAB_0_OFFSET 0x4000
#define AL_CCU_SNOOP_CONTROL_IOFAB_1_OFFSET 0x5000
#define AL_CCU_SPECULATION_CONTROL_OFFSET 0x4
static struct resource_spec al_ccu_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ -1, 0 }
};
struct al_ccu_softc {
struct resource *res;
};
static int al_ccu_probe(device_t dev);
static int al_ccu_attach(device_t dev);
static int al_ccu_detach(device_t dev);
static device_method_t al_ccu_methods[] = {
DEVMETHOD(device_probe, al_ccu_probe),
DEVMETHOD(device_attach, al_ccu_attach),
DEVMETHOD(device_detach, al_ccu_detach),
{ 0, 0 }
};
static driver_t al_ccu_driver = {
"ccu",
al_ccu_methods,
sizeof(struct al_ccu_softc)
};
static devclass_t al_ccu_devclass;
EARLY_DRIVER_MODULE(al_ccu, simplebus, al_ccu_driver,
al_ccu_devclass, 0, 0, BUS_PASS_CPU + BUS_PASS_ORDER_MIDDLE);
EARLY_DRIVER_MODULE(al_ccu, ofwbus, al_ccu_driver,
al_ccu_devclass, 0, 0, BUS_PASS_CPU + BUS_PASS_ORDER_MIDDLE);
static int
al_ccu_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "annapurna-labs,al-ccu"))
return (ENXIO);
device_set_desc(dev, "Alpine CCU");
return (BUS_PROBE_DEFAULT);
}
static int
al_ccu_attach(device_t dev)
{
struct al_ccu_softc *sc;
int err;
sc = device_get_softc(dev);
err = bus_alloc_resources(dev, al_ccu_spec, &sc->res);
if (err != 0) {
device_printf(dev, "could not allocate resources\n");
return (err);
}
/* Enable cache snoop */
bus_write_4(sc->res, AL_CCU_SNOOP_CONTROL_IOFAB_0_OFFSET, 1);
bus_write_4(sc->res, AL_CCU_SNOOP_CONTROL_IOFAB_1_OFFSET, 1);
/* Disable speculative fetches from masters */
bus_write_4(sc->res, AL_CCU_SPECULATION_CONTROL_OFFSET, 7);
return (0);
}
static int
al_ccu_detach(device_t dev)
{
struct al_ccu_softc *sc;
sc = device_get_softc(dev);
bus_release_resources(dev, al_ccu_spec, &sc->res);
return (0);
}

View File

@ -51,21 +51,10 @@ __FBSDID("$FreeBSD$");
#include "opt_ddb.h"
#include "opt_platform.h"
struct mtx al_dbg_lock;
#define DEVMAP_MAX_VA_ADDRESS 0xF0000000
bus_addr_t al_devmap_pa;
bus_addr_t al_devmap_size;
#define AL_NB_SERVICE_OFFSET 0x70000
#define AL_NB_CCU_OFFSET 0x90000
#define AL_CCU_SNOOP_CONTROL_IOFAB_0_OFFSET 0x4000
#define AL_CCU_SNOOP_CONTROL_IOFAB_1_OFFSET 0x5000
#define AL_CCU_SPECULATION_CONTROL_OFFSET 0x4
#define AL_NB_ACF_MISC_OFFSET 0xD0
#define AL_NB_ACF_MISC_READ_BYPASS (1 << 30)
int alpine_get_devmap_base(bus_addr_t *pa, bus_addr_t *size);
vm_offset_t
@ -90,35 +79,7 @@ platform_gpio_init(void)
void
platform_late_init(void)
{
bus_addr_t reg_baddr;
uint32_t val;
if (!mtx_initialized(&al_dbg_lock))
mtx_init(&al_dbg_lock, "ALDBG", "ALDBG", MTX_SPIN);
/* configure system fabric */
if (bus_space_map(fdtbus_bs_tag, al_devmap_pa, al_devmap_size, 0,
&reg_baddr))
panic("Couldn't map Register Space area");
/* do not allow reads to bypass writes to different addresses */
val = bus_space_read_4(fdtbus_bs_tag, reg_baddr,
AL_NB_SERVICE_OFFSET + AL_NB_ACF_MISC_OFFSET);
val &= ~AL_NB_ACF_MISC_READ_BYPASS;
bus_space_write_4(fdtbus_bs_tag, reg_baddr,
AL_NB_SERVICE_OFFSET + AL_NB_ACF_MISC_OFFSET, val);
/* enable cache snoop */
bus_space_write_4(fdtbus_bs_tag, reg_baddr,
AL_NB_CCU_OFFSET + AL_CCU_SNOOP_CONTROL_IOFAB_0_OFFSET, 1);
bus_space_write_4(fdtbus_bs_tag, reg_baddr,
AL_NB_CCU_OFFSET + AL_CCU_SNOOP_CONTROL_IOFAB_1_OFFSET, 1);
/* disable speculative fetches from masters */
bus_space_write_4(fdtbus_bs_tag, reg_baddr,
AL_NB_CCU_OFFSET + AL_CCU_SPECULATION_CONTROL_OFFSET, 7);
bus_space_unmap(fdtbus_bs_tag, reg_baddr, al_devmap_size);
}
/*

View File

@ -68,22 +68,14 @@ __FBSDID("$FreeBSD$");
#define AL_NB_INIT_CONTROL (0x8)
#define AL_NB_CONFIG_STATUS_PWR_CTRL(cpu) (0x2020 + (cpu)*0x100)
#define SERDES_NUM_GROUPS 4
#define SERDES_GROUP_SIZE 0x400
extern bus_addr_t al_devmap_pa;
extern bus_addr_t al_devmap_size;
extern void mpentry(void);
int alpine_serdes_resource_get(uint32_t group, bus_space_tag_t *tag,
bus_addr_t *baddr);
static int platform_mp_get_core_cnt(void);
static int alpine_get_cpu_resume_base(u_long *pbase, u_long *psize);
static int alpine_get_nb_base(u_long *pbase, u_long *psize);
static int alpine_get_serdes_base(u_long *pbase, u_long *psize);
int alpine_serdes_resource_get(uint32_t group, bus_space_tag_t *tag,
bus_addr_t *baddr);
static boolean_t alpine_validate_cpu(u_int, phandle_t, u_int, pcell_t *);
static boolean_t
@ -254,60 +246,3 @@ platform_mp_start_ap(void)
bus_space_unmap(fdtbus_bs_tag, nb_baddr, nb_size);
bus_space_unmap(fdtbus_bs_tag, cpu_resume_baddr, cpu_resume_size);
}
static int
alpine_get_serdes_base(u_long *pbase, u_long *psize)
{
phandle_t node;
u_long base = 0;
u_long size = 0;
if (pbase == NULL || psize == NULL)
return (EINVAL);
if ((node = OF_finddevice("/")) == -1)
return (EFAULT);
if ((node =
ofw_bus_find_compatible(node, "annapurna-labs,al-serdes")) == 0)
return (EFAULT);
if (fdt_regsize(node, &base, &size))
return (EFAULT);
*pbase = base;
*psize = size;
return (0);
}
int
alpine_serdes_resource_get(uint32_t group, bus_space_tag_t *tag, bus_addr_t *baddr)
{
u_long serdes_base, serdes_size;
int ret;
static bus_addr_t baddr_mapped[SERDES_NUM_GROUPS];
if (group >= SERDES_NUM_GROUPS)
return (EINVAL);
if (baddr_mapped[group]) {
*tag = fdtbus_bs_tag;
*baddr = baddr_mapped[group];
return (0);
}
ret = alpine_get_serdes_base(&serdes_base, &serdes_size);
if (ret)
return (ret);
ret = bus_space_map(fdtbus_bs_tag,
al_devmap_pa + serdes_base + group * SERDES_GROUP_SIZE,
(SERDES_NUM_GROUPS - group) * SERDES_GROUP_SIZE, 0, baddr);
if (ret)
return (ret);
baddr_mapped[group] = *baddr;
return (0);
}

View File

@ -0,0 +1,129 @@
/*-
* Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates
* All rights reserved.
*
* Developed by Semihalf.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/rman.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/resource.h>
#include <sys/systm.h>
#include <machine/bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#define AL_NB_ACF_MISC_OFFSET 0xD0
#define AL_NB_ACF_MISC_READ_BYPASS (1 << 30)
static struct resource_spec nb_service_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ -1, 0 }
};
struct nb_service_softc {
struct resource *res;
};
static int nb_service_probe(device_t dev);
static int nb_service_attach(device_t dev);
static int nb_service_detach(device_t dev);
static device_method_t nb_service_methods[] = {
DEVMETHOD(device_probe, nb_service_probe),
DEVMETHOD(device_attach, nb_service_attach),
DEVMETHOD(device_detach, nb_service_detach),
{ 0, 0 }
};
static driver_t nb_service_driver = {
"nb_service",
nb_service_methods,
sizeof(struct nb_service_softc)
};
static devclass_t nb_service_devclass;
EARLY_DRIVER_MODULE(nb_service, simplebus, nb_service_driver,
nb_service_devclass, 0, 0, BUS_PASS_CPU + BUS_PASS_ORDER_MIDDLE);
EARLY_DRIVER_MODULE(nb_service, ofwbus, nb_service_driver,
nb_service_devclass, 0, 0, BUS_PASS_CPU + BUS_PASS_ORDER_MIDDLE);
static int
nb_service_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "annapurna-labs,al-nb-service"))
return (ENXIO);
device_set_desc(dev, "Alpine North Bridge Service");
return (BUS_PROBE_DEFAULT);
}
static int
nb_service_attach(device_t dev)
{
struct nb_service_softc *sc;
uint32_t val;
int err;
sc = device_get_softc(dev);
err = bus_alloc_resources(dev, nb_service_spec, &sc->res);
if (err != 0) {
device_printf(dev, "could not allocate resources\n");
return (err);
}
/* Do not allow reads to bypass writes to different addresses */
val = bus_read_4(sc->res, AL_NB_ACF_MISC_OFFSET);
val &= ~AL_NB_ACF_MISC_READ_BYPASS;
bus_write_4(sc->res, AL_NB_ACF_MISC_OFFSET, val);
return (0);
}
static int
nb_service_detach(device_t dev)
{
struct nb_service_softc *sc;
sc = device_get_softc(dev);
bus_release_resources(dev, nb_service_spec, &sc->res);
return (0);
}

View File

@ -52,6 +52,9 @@ device phy
device hwreset
device regulator
# CPU frequency control
device cpufreq
# Interrupt controller
device gic

View File

@ -32,6 +32,10 @@ options SMP # Enable multiple cores
device gic
options INTRNG
# Annapurna Alpine drivers
device al_ccu # Alpine Cache Coherency Unit
device al_nb_service # Alpine North Bridge Service
# Pseudo devices
device loop
device random

View File

@ -80,6 +80,7 @@ nodevice star_saver
nodevice warp_saver
nodevice cxgbe
nodevice cxgbev
nodevice snd_cmi
#

View File

@ -42,6 +42,8 @@
#define SCM_USB_STS0 0x624
#define SCM_USB_CTRL1 0x628
#define SCM_USB_STS1 0x62C
#define SCM_MAC_ID0_LO 0x630
#define SCM_MAC_ID0_HI 0x634
#define SCM_PWMSS_CTRL 0x664
#endif /* __AM335X_SCM_H__ */

View File

@ -78,6 +78,9 @@ __FBSDID("$FreeBSD$");
#include <sys/rman.h>
#include <machine/resource.h>
#include <arm/ti/ti_scm.h>
#include <arm/ti/am335x/am335x_scm.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
@ -87,8 +90,6 @@ __FBSDID("$FreeBSD$");
#include "if_cpswreg.h"
#include "if_cpswvar.h"
#include <arm/ti/ti_scm.h>
#include "miibus_if.h"
@ -1019,14 +1020,14 @@ cpswp_attach(device_t dev)
IFQ_SET_READY(&ifp->if_snd);
/* Get high part of MAC address from control module (mac_id[0|1]_hi) */
ti_scm_reg_read_4(CPSW_MAC_ID0_HI + sc->unit * 8, &reg);
ti_scm_reg_read_4(SCM_MAC_ID0_HI + sc->unit * 8, &reg);
mac_addr[0] = reg & 0xFF;
mac_addr[1] = (reg >> 8) & 0xFF;
mac_addr[2] = (reg >> 16) & 0xFF;
mac_addr[3] = (reg >> 24) & 0xFF;
/* Get low part of MAC address from control module (mac_id[0|1]_lo) */
ti_scm_reg_read_4(CPSW_MAC_ID0_LO + sc->unit * 8, &reg);
ti_scm_reg_read_4(SCM_MAC_ID0_LO + sc->unit * 8, &reg);
mac_addr[4] = reg & 0xFF;
mac_addr[5] = (reg >> 8) & 0xFF;

View File

@ -46,9 +46,6 @@
#define CPSW_PORT_P_SA_LO(p) (CPSW_PORT_OFFSET + 0x120 + ((p-1) * 0x100))
#define CPSW_PORT_P_SA_HI(p) (CPSW_PORT_OFFSET + 0x124 + ((p-1) * 0x100))
#define CPSW_MAC_ID0_LO 0x0630
#define CPSW_MAC_ID0_HI 0x0634
#define CPSW_CPDMA_OFFSET 0x0800
#define CPSW_CPDMA_TX_CONTROL (CPSW_CPDMA_OFFSET + 0x04)
#define CPSW_CPDMA_TX_TEARDOWN (CPSW_CPDMA_OFFSET + 0x08)

View File

@ -310,9 +310,7 @@ ti_pruss_kqfilter(struct cdev *cdev, struct knote *kn)
case EVFILT_READ:
kn->kn_hook = sc;
kn->kn_fop = &ti_pruss_kq_read;
mtx_lock(&sc->sc_mtx);
knlist_add(&sc->sc_selinfo.si_note, kn, 1);
mtx_unlock(&sc->sc_mtx);
knlist_add(&sc->sc_selinfo.si_note, kn, 0);
break;
default:
return (EINVAL);

View File

@ -151,5 +151,5 @@ END(arm64_idcache_wbinv_range)
* void arm64_icache_sync_range(vm_offset_t, vm_size_t)
*/
ENTRY(arm64_icache_sync_range)
cache_handle_range dcop = cvac, ic = 1, icop = ivau
cache_handle_range dcop = cvau, ic = 1, icop = ivau
END(arm64_icache_sync_range)

View File

@ -2939,8 +2939,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pmap_invalidate_page(pmap, va);
if (pmap != pmap_kernel()) {
if (pmap == &curproc->p_vmspace->vm_pmap)
cpu_icache_sync_range(va, PAGE_SIZE);
if (pmap == &curproc->p_vmspace->vm_pmap &&
(prot & VM_PROT_EXECUTE) != 0)
cpu_icache_sync_range(va, PAGE_SIZE);
if ((mpte == NULL || mpte->wire_count == NL3PG) &&
pmap_superpages_enabled() &&

View File

@ -391,6 +391,10 @@ do_el0_sync(struct trapframe *frame)
call_trapsignal(td, SIGTRAP, TRAP_BRKPT, (void *)frame->tf_elr);
userret(td, frame);
break;
case EXCP_MSR:
call_trapsignal(td, SIGILL, ILL_PRVOPC, (void *)frame->tf_elr);
userret(td, frame);
break;
case EXCP_SOFTSTP_EL0:
td->td_frame->tf_spsr &= ~PSR_SS;
td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
@ -401,9 +405,9 @@ do_el0_sync(struct trapframe *frame)
userret(td, frame);
break;
default:
print_registers(frame);
panic("Unknown userland exception %x esr_el1 %lx\n", exception,
esr);
call_trapsignal(td, SIGBUS, BUS_OBJERR, (void *)frame->tf_elr);
userret(td, frame);
break;
}
}

View File

@ -90,12 +90,19 @@ options SOC_ALLWINNER_A64
options SOC_CAVM_THUNDERX
options SOC_HISI_HI6220
# Annapurna Alpine drivers
device al_ccu # Alpine Cache Coherency Unit
device al_nb_service # Alpine North Bridge Service
# VirtIO support
device virtio
device virtio_mmio
device virtio_blk
device vtnet
# CPU frequency control
device cpufreq
# Bus drivers
device pci
options PCI_HP # PCI-Express native HotPlug

View File

@ -40,7 +40,7 @@ __FBSDID("$FreeBSD$");
#include "loader_efi.h"
#ifndef EFI_STAGING_SIZE
#define EFI_STAGING_SIZE 48
#define EFI_STAGING_SIZE 64
#endif
#define STAGE_PAGES EFI_SIZE_TO_PAGES((EFI_STAGING_SIZE) * 1024 * 1024)

View File

@ -27,6 +27,18 @@
*/
/ {
cpus {
cpu@0 {
clocks = <&c0_cpux_clk>;
clock-latency = <2000000>;
};
cpu@100 {
clocks = <&c1_cpux_clk>;
clock-latency = <2000000>;
};
};
pmu {
compatible = "arm,cortex-a7-pmu", "arm,cortex-a15-pmu";
@ -38,6 +50,38 @@
};
clocks {
pll_c0cpux: clk@01c20000 {
#clock-cells = <0>;
compatible = "allwinner,sun8i-a83t-pllcpux-clk";
reg = <0x01c20000 0x4>;
clocks = <&osc24M>;
clock-output-names = "pll_c0cpux";
};
pll_c1cpux: clk@01c20004 {
#clock-cells = <0>;
compatible = "allwinner,sun8i-a83t-pllcpux-clk";
reg = <0x01c20004 0x4>;
clocks = <&osc24M>;
clock-output-names = "pll_c1cpux";
};
c0_cpux_clk: c0clk@01c20050 {
#clock-cells = <0>;
compatible = "allwinner,sun8i-a83t-c0cpu-clk";
reg = <0x01c20050 0x4>;
clocks = <&osc24M>, <&pll_c0cpux>;
clock-output-names = "c0_cpux";
};
c1_cpux_clk: c1clk@01c20050 {
#clock-cells = <0>;
compatible = "allwinner,sun8i-a83t-c1cpu-clk";
reg = <0x01c20050 0x4>;
clocks = <&osc24M>, <&pll_c1cpux>;
clock-output-names = "c1_cpux";
};
/* cpus_clk compatible in gnu dt is incorrect */
cpus_clk: clk@01f01400 {
compatible = "allwinner,sun8i-a83t-cpus-clk";

View File

@ -137,6 +137,12 @@
reg = <0x00ff5ec0 0x30>;
};
ccu {
compatible = "annapurna-labs,al-ccu";
reg = <0x00090000 0x10000>;
io_coherency = <1>;
};
nb_service {
compatible = "annapurna-labs,al-nb-service";
reg = <0x00070000 0x10000>;

View File

@ -29,6 +29,32 @@
#include "sun8i-a83t-sinovoip-bpi-m3.dts"
#include "a83t.dtsi"
/ {
cpus {
cpu@0 {
cpu-supply = <&reg_dcdc2>;
operating-points = <
/* kHz uV */
1200000 840000
1008000 840000
648000 840000
408000 840000
>;
};
cpu@100 {
cpu-supply = <&reg_dcdc3>;
operating-points = <
/* kHz uV */
1200000 840000
1008000 840000
648000 840000
408000 840000
>;
};
};
};
&ehci0 {
status = "okay";
};
@ -115,6 +141,16 @@
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
gpio-controller;
#gpio-cells = <1>;
regulators {
reg_dcdc2: dcdc2 {
regulator-name = "dcdc2";
};
reg_dcdc3: dcdc3 {
regulator-name = "dcdc3";
};
};
};
};

View File

@ -37,24 +37,39 @@
/dts-v1/;
/ {
model = "QEMU RV64I";
compatible = "riscv,rv64i";
model = "QEMU RV64";
compatible = "riscv,rv64";
#address-cells = <1>;
#size-cells = <1>;
#interrupt-cells = <1>;
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
device_type = "cpu";
compatible = "riscv,rv64";
reg = <0x0>;
};
};
aliases {
console0 = &console0;
};
memory {
/*
* This is not used currently.
* We take information from sbi_query_memory.
*/
device_type = "memory";
reg = <0x0 0x40000000>; /* 1GB at 0x0 */
reg = <0x80000000 0x40000000>; /* 1GB at 0x80000000 */
};
soc {
#address-cells = <2>;
#size-cells = <2>;
#address-cells = <1>;
#size-cells = <1>;
#interrupt-cells = <1>;
compatible = "simple-bus";
@ -67,7 +82,9 @@
timer0: timer@0 {
compatible = "riscv,timer";
interrupts = < 1 >;
reg = < 0x40000000 0x0008 >, /* rtc */
< 0x40000008 0x1000 >; /* timecmp */
interrupts = < 5 >;
interrupt-parent = < &pic0 >;
clock-frequency = < 400000000 >;
};

View File

@ -85,6 +85,14 @@ expects to fetch
.Pa /boot/loader.rc
from the specified server before loading any other files.
.Pp
.Nm
defaults to a conservative 1024 byte NFS data packet size.
This may be changed by setting the
.Va nfs.read_size
variable in
.Pa /boot/loader.conf .
Valid values range from 1024 to 4096 bytes.
.Pp
In all other respects,
.Nm
acts just like

View File

@ -1772,7 +1772,7 @@ cam_periph_error(union ccb *ccb, cam_flags camflags,
xpt_print(ccb->ccb_h.path, "Retrying command\n");
}
if (devctl_err)
if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0))
cam_periph_devctl_notify(orig_ccb);
if ((action & SSQ_LOST) != 0) {

View File

@ -1056,7 +1056,7 @@ static struct asc_table_entry asc_table[] = {
{ SST(0x00, 0x1C, SS_RDEF, /* XXX TBD */
"Verify operation in progress") },
/* DT B */
{ SST(0x00, 0x1D, SS_RDEF, /* XXX TBD */
{ SST(0x00, 0x1D, SS_NOP,
"ATA pass through information available") },
/* DT R MAEBKV */
{ SST(0x00, 0x1E, SS_RDEF, /* XXX TBD */
@ -1065,7 +1065,7 @@ static struct asc_table_entry asc_table[] = {
{ SST(0x00, 0x1F, SS_RDEF, /* XXX TBD */
"Logical unit transitioning to another power condition") },
/* DT P B */
{ SST(0x00, 0x20, SS_RDEF, /* XXX TBD */
{ SST(0x00, 0x20, SS_NOP,
"Extended copy information available") },
/* D */
{ SST(0x00, 0x21, SS_RDEF, /* XXX TBD */
@ -4652,6 +4652,53 @@ scsi_sense_progress_sbuf(struct sbuf *sb, struct scsi_sense_data *sense,
scsi_progress_sbuf(sb, progress_val);
}
void
scsi_sense_ata_sbuf(struct sbuf *sb, struct scsi_sense_data *sense,
u_int sense_len, uint8_t *cdb, int cdb_len,
struct scsi_inquiry_data *inq_data,
struct scsi_sense_desc_header *header)
{
struct scsi_sense_ata_ret_desc *res;
res = (struct scsi_sense_ata_ret_desc *)header;
sbuf_printf(sb, "ATA status: %02x (%s%s%s%s%s%s%s%s), ",
res->status,
(res->status & 0x80) ? "BSY " : "",
(res->status & 0x40) ? "DRDY " : "",
(res->status & 0x20) ? "DF " : "",
(res->status & 0x10) ? "SERV " : "",
(res->status & 0x08) ? "DRQ " : "",
(res->status & 0x04) ? "CORR " : "",
(res->status & 0x02) ? "IDX " : "",
(res->status & 0x01) ? "ERR" : "");
if (res->status & 1) {
sbuf_printf(sb, "error: %02x (%s%s%s%s%s%s%s%s), ",
res->error,
(res->error & 0x80) ? "ICRC " : "",
(res->error & 0x40) ? "UNC " : "",
(res->error & 0x20) ? "MC " : "",
(res->error & 0x10) ? "IDNF " : "",
(res->error & 0x08) ? "MCR " : "",
(res->error & 0x04) ? "ABRT " : "",
(res->error & 0x02) ? "NM " : "",
(res->error & 0x01) ? "ILI" : "");
}
if (res->flags & SSD_DESC_ATA_FLAG_EXTEND) {
sbuf_printf(sb, "count: %02x%02x, ",
res->count_15_8, res->count_7_0);
sbuf_printf(sb, "LBA: %02x%02x%02x%02x%02x%02x, ",
res->lba_47_40, res->lba_39_32, res->lba_31_24,
res->lba_23_16, res->lba_15_8, res->lba_7_0);
} else {
sbuf_printf(sb, "count: %02x, ", res->count_7_0);
sbuf_printf(sb, "LBA: %02x%02x%02x, ",
res->lba_23_16, res->lba_15_8, res->lba_7_0);
}
sbuf_printf(sb, "device: %02x, ", res->device);
}
/*
* Generic sense descriptor printing routine. This is used when we have
* not yet implemented a specific printing routine for this descriptor.
@ -4698,6 +4745,7 @@ struct scsi_sense_desc_printer {
{SSD_DESC_FRU, scsi_sense_fru_sbuf},
{SSD_DESC_STREAM, scsi_sense_stream_sbuf},
{SSD_DESC_BLOCK, scsi_sense_block_sbuf},
{SSD_DESC_ATA, scsi_sense_ata_sbuf},
{SSD_DESC_PROGRESS, scsi_sense_progress_sbuf}
};

View File

@ -3682,6 +3682,10 @@ void scsi_sense_progress_sbuf(struct sbuf *sb, struct scsi_sense_data *sense,
u_int sense_len, uint8_t *cdb, int cdb_len,
struct scsi_inquiry_data *inq_data,
struct scsi_sense_desc_header *header);
void scsi_sense_ata_sbuf(struct sbuf *sb, struct scsi_sense_data *sense,
u_int sense_len, uint8_t *cdb, int cdb_len,
struct scsi_inquiry_data *inq_data,
struct scsi_sense_desc_header *header);
void scsi_sense_generic_sbuf(struct sbuf *sb, struct scsi_sense_data *sense,
u_int sense_len, uint8_t *cdb, int cdb_len,
struct scsi_inquiry_data *inq_data,

View File

@ -6062,8 +6062,10 @@ arc_init(void)
* Allow the tunables to override our calculations if they are
* reasonable.
*/
if (zfs_arc_max > arc_abs_min && zfs_arc_max < kmem_size())
if (zfs_arc_max > arc_abs_min && zfs_arc_max < kmem_size()) {
arc_c_max = zfs_arc_max;
arc_c_min = MIN(arc_c_min, arc_c_max);
}
if (zfs_arc_min > arc_abs_min && zfs_arc_min <= arc_c_max)
arc_c_min = zfs_arc_min;
#endif

View File

@ -85,13 +85,6 @@ struct dsl_pool;
*/
#define DS_FIELD_BOOKMARK_NAMES "com.delphix:bookmarks"
/*
* This field is present (with value=0) if this dataset may contain large
* blocks (>128KB). If it is present, then this dataset
* is counted in the refcount of the SPA_FEATURE_LARGE_BLOCKS feature.
*/
#define DS_FIELD_LARGE_BLOCKS "org.open-zfs:large_blocks"
/*
* These fields are set on datasets that are in the middle of a resumable
* receive, and allow the sender to resume the send if it is interrupted.

View File

@ -777,7 +777,8 @@ vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
if (vd->vdev_spa->spa_splitting_newspa ||
(vd->vdev_prevstate == VDEV_STATE_UNKNOWN &&
vd->vdev_spa->spa_load_state == SPA_LOAD_NONE)) {
vd->vdev_spa->spa_load_state == SPA_LOAD_NONE ||
vd->vdev_spa->spa_load_state == SPA_LOAD_CREATE)) {
/*
* We are dealing with a vdev that hasn't been previously
* opened (since boot), and we are not loading an

View File

@ -58,7 +58,6 @@ __FBSDID("$FreeBSD$");
/* Cannot get exact size in 64-bit due to alignment issue of entire struct. */
CTASSERT((sizeof(struct md_ioctl32)+4) == 436);
CTASSERT(sizeof(struct ioc_read_toc_entry32) == 8);
CTASSERT(sizeof(struct ioc_toc_header32) == 4);
CTASSERT(sizeof(struct mem_range_op32) == 12);
CTASSERT(sizeof(struct pci_conf_io32) == 36);
CTASSERT(sizeof(struct pci_match_conf32) == 44);
@ -138,25 +137,6 @@ freebsd32_ioctl_md(struct thread *td, struct freebsd32_ioctl_args *uap,
}
static int
freebsd32_ioctl_ioc_toc_header(struct thread *td,
struct freebsd32_ioctl_args *uap, struct file *fp)
{
struct ioc_toc_header toch;
struct ioc_toc_header32 toch32;
int error;
if ((error = copyin(uap->data, &toch32, sizeof(toch32))))
return (error);
CP(toch32, toch, len);
CP(toch32, toch, starting_track);
CP(toch32, toch, ending_track);
error = fo_ioctl(fp, CDIOREADTOCHEADER, (caddr_t)&toch,
td->td_ucred, td);
return (error);
}
static int
freebsd32_ioctl_ioc_read_toc(struct thread *td,
struct freebsd32_ioctl_args *uap, struct file *fp)
@ -441,10 +421,6 @@ freebsd32_ioctl(struct thread *td, struct freebsd32_ioctl_args *uap)
error = freebsd32_ioctl_ioc_read_toc(td, uap, fp);
break;
case CDIOREADTOCHEADER_32:
error = freebsd32_ioctl_ioc_toc_header(td, uap, fp);
break;
case FIODGNAME_32:
error = freebsd32_ioctl_fiodgname(td, uap, fp);
break;

View File

@ -36,12 +36,6 @@
typedef __uint32_t caddr_t32;
struct ioc_toc_header32 {
u_short len;
u_char starting_track;
u_char ending_track;
};
struct ioc_read_toc_entry32 {
u_char address_format;
u_char starting_track;
@ -115,7 +109,6 @@ struct pci_conf_io32 {
};
#define CDIOREADTOCENTRYS_32 _IOWR('c', 5, struct ioc_read_toc_entry32)
#define CDIOREADTOCHEADER_32 _IOR('c', 4, struct ioc_toc_header32)
#define MDIOCATTACH_32 _IOC(IOC_INOUT, 'm', 0, sizeof(struct md_ioctl32) + 4)
#define MDIOCDETACH_32 _IOC(IOC_INOUT, 'm', 1, sizeof(struct md_ioctl32) + 4)
#define MDIOCQUERY_32 _IOC(IOC_INOUT, 'm', 2, sizeof(struct md_ioctl32) + 4)

View File

@ -1949,6 +1949,7 @@ device xmphy # XaQti XMAC II
# (and SMC COM90c66 in '56 compatibility mode) adapters.
# cxgb: Chelsio T3 based 1GbE/10GbE PCIe Ethernet adapters.
# cxgbe:Chelsio T4 and T5 based 1GbE/10GbE/40GbE PCIe Ethernet adapters.
# cxgbev: Chelsio T4 and T5 based PCIe Virtual Functions.
# dc: Support for PCI fast ethernet adapters based on the DEC/Intel 21143
# and various workalikes including:
# the ADMtek AL981 Comet and AN985 Centaur, the ASIX Electronics
@ -2132,6 +2133,7 @@ device xl # 3Com 3c90x (``Boomerang'', ``Cyclone'')
device cxgb # Chelsio T3 10 Gigabit Ethernet
device cxgb_t3fw # Chelsio T3 10 Gigabit Ethernet firmware
device cxgbe # Chelsio T4 and T5 1GbE/10GbE/40GbE
device cxgbev # Chelsio T4 and T5 1GbE/10GbE/40GbE VF
device de # DEC/Intel DC21x4x (``Tulip'')
device em # Intel Pro/1000 Gigabit Ethernet
device igb # Intel Pro/1000 PCIE Gigabit Ethernet

View File

@ -1236,7 +1236,7 @@ dev/ciss/ciss.c optional ciss
dev/cm/smc90cx6.c optional cm
dev/cmx/cmx.c optional cmx
dev/cmx/cmx_pccard.c optional cmx pccard
dev/cpufreq/ichss.c optional cpufreq
dev/cpufreq/ichss.c optional cpufreq pci
dev/cs/if_cs.c optional cs
dev/cs/if_cs_isa.c optional cs isa
dev/cs/if_cs_pccard.c optional cs pccard
@ -1281,8 +1281,12 @@ dev/cxgbe/t4_l2t.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_tracer.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_vf.c optional cxgbev pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/common/t4_hw.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/common/t4vf_hw.c optional cxgbev pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
t4fw_cfg.c optional cxgbe \
compile-with "${AWK} -f $S/tools/fw_stub.awk t4fw_cfg.fw:t4fw_cfg t4fw_cfg_uwire.fw:t4fw_cfg_uwire t4fw.fw:t4fw -mt4fw_cfg -c${.TARGET}" \
no-implicit-rule before-depend local \

View File

@ -1,4 +1,6 @@
# $FreeBSD$
arm/annapurna/alpine/alpine_ccu.c optional al_ccu fdt
arm/annapurna/alpine/alpine_nb_service.c optional al_nb_service fdt
arm/arm/autoconf.c standard
arm/arm/bcopy_page.S standard
arm/arm/bcopyinout.S standard
@ -94,6 +96,7 @@ cddl/dev/dtrace/arm/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}"
cddl/dev/fbt/arm/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}"
crypto/blowfish/bf_enc.c optional crypto | ipsec
crypto/des/des_enc.c optional crypto | ipsec | netsmb
dev/cpufreq/cpufreq_dt.c optional cpufreq fdt
dev/dwc/if_dwc.c optional dwc
dev/dwc/if_dwc_if.m optional dwc
dev/fb/fb.c optional sc

View File

@ -38,6 +38,8 @@ arm/allwinner/clk/aw_pll.c optional aw_ccu \
arm/allwinner/clk/aw_thsclk.c optional aw_ccu
arm/allwinner/clk/aw_usbclk.c optional aw_ccu
arm/allwinner/if_awg.c optional awg
arm/annapurna/alpine/alpine_ccu.c optional al_ccu fdt
arm/annapurna/alpine/alpine_nb_service.c optional al_nb_service fdt
arm/arm/generic_timer.c standard
arm/arm/gic.c standard
arm/arm/gic_fdt.c optional fdt
@ -97,6 +99,7 @@ crypto/blowfish/bf_enc.c optional crypto | ipsec
crypto/des/des_enc.c optional crypto | ipsec | netsmb
dev/acpica/acpi_if.m optional acpi
dev/ahci/ahci_generic.c optional ahci fdt
dev/cpufreq/cpufreq_dt.c optional cpufreq fdt
dev/hwpmc/hwpmc_arm64.c optional hwpmc
dev/hwpmc/hwpmc_arm64_md.c optional hwpmc
dev/mmc/host/dwmmc.c optional dwmmc fdt

View File

@ -6,7 +6,6 @@ AUTO_EOI_2 opt_auto_eoi.h
COUNT_XINVLTLB_HITS opt_smp.h
COUNT_IPIS opt_smp.h
MAXMEM
PERFMON
MPTABLE_FORCE_HTT
MP_WATCHDOG
NKPT opt_pmap.h

View File

@ -129,10 +129,10 @@ int al_iofic_msix_moder_interval_config(void __iomem *regs_base, int group,
}
/*
* configure the vmid attributes for a given msix vector.
* configure the target-id attributes for a given msix vector.
*/
int al_iofic_msix_vmid_attributes_config(void __iomem *regs_base, int group,
uint8_t vector, uint32_t vmid, uint8_t vmid_en)
int al_iofic_msix_tgtid_attributes_config(void __iomem *regs_base, int group,
uint8_t vector, uint32_t tgtid, uint8_t tgtid_en)
{
struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
uint32_t reg = 0;
@ -141,14 +141,14 @@ int al_iofic_msix_vmid_attributes_config(void __iomem *regs_base, int group,
al_assert(group < AL_IOFIC_MAX_GROUPS);
AL_REG_FIELD_SET(reg,
INT_MSIX_VMID_MASK,
INT_MSIX_VMID_SHIFT,
vmid);
INT_MSIX_TGTID_MASK,
INT_MSIX_TGTID_SHIFT,
tgtid);
AL_REG_BIT_VAL_SET(reg,
INT_MSIX_VMID_EN_SHIFT,
vmid_en);
INT_MSIX_TGTID_EN_SHIFT,
tgtid_en);
al_reg_write32(&regs->grp_int_mod[group][vector].grp_int_vmid_reg, reg);
al_reg_write32(&regs->grp_int_mod[group][vector].grp_int_tgtid_reg, reg);
return 0;
}

View File

@ -117,17 +117,17 @@ int al_iofic_msix_moder_interval_config(void __iomem *regs_base, int group,
uint8_t vector, uint8_t interval);
/**
* configure the vmid attributes for a given msix vector.
* configure the tgtid attributes for a given msix vector.
*
* @param group the interrupt group
* @param vector index
* @param vmid the vmid value
* @param vmid_en take vmid from the intc
* @param tgtid the target-id value
* @param tgtid_en take target-id from the intc
*
* @return 0 on success. -EINVAL otherwise.
*/
int al_iofic_msix_vmid_attributes_config(void __iomem *regs_base, int group,
uint8_t vector, uint32_t vmid, uint8_t vmid_en);
int al_iofic_msix_tgtid_attributes_config(void __iomem *regs_base, int group,
uint8_t vector, uint32_t tgtid, uint8_t tgtid_en);
/**
* return the offset of the unmask register for a given group.

View File

@ -66,7 +66,7 @@ struct al_iofic_grp_ctrl {
struct al_iofic_grp_mod {
uint32_t grp_int_mod_reg; /* Interrupt moderation registerDedicated moderation in ... */
uint32_t grp_int_vmid_reg;
uint32_t grp_int_tgtid_reg;
};
struct al_iofic_regs {
@ -109,12 +109,12 @@ struct al_iofic_regs {
#define INT_MOD_INTV_MASK 0x000000FF
#define INT_MOD_INTV_SHIFT 0
/**** grp_int_vmid_reg register ****/
/* Interrupt vmid value registerDedicated reg ... */
#define INT_MSIX_VMID_MASK 0x0000FFFF
#define INT_MSIX_VMID_SHIFT 0
/* Interrupt vmid_en value registerDedicated reg ... */
#define INT_MSIX_VMID_EN_SHIFT 31
/**** grp_int_tgtid_reg register ****/
/* Interrupt tgtid value registerDedicated reg ... */
#define INT_MSIX_TGTID_MASK 0x0000FFFF
#define INT_MSIX_TGTID_SHIFT 0
/* Interrupt tgtid_en value registerDedicated reg ... */
#define INT_MSIX_TGTID_EN_SHIFT 31
#ifdef __cplusplus
}

View File

@ -355,7 +355,7 @@ struct al_nb_nb_version {
};
struct al_nb_sriov {
/* [0x0] */
uint32_t cpu_vmid[4];
uint32_t cpu_tgtid[4];
uint32_t rsrvd[4];
};
struct al_nb_dram_channels {
@ -403,7 +403,7 @@ struct al_nb_push_packet {
uint32_t pp_config;
uint32_t rsrvd_0[3];
/* [0x10] */
uint32_t pp_ext_awuser;
uint32_t pp_ext_attr;
uint32_t rsrvd_1[3];
/* [0x20] */
uint32_t pp_base_low;
@ -411,7 +411,7 @@ struct al_nb_push_packet {
uint32_t pp_base_high;
uint32_t rsrvd_2[2];
/* [0x30] */
uint32_t pp_sel_awuser;
uint32_t pp_sel_attr;
uint32_t rsrvd[51];
};
@ -853,8 +853,8 @@ Enables 4k hazard of post-barrier vs pre-barrier transactions. Otherwise, 64B ha
This value is sampled into the CP15 Configuration Base Address Register (CBAR) at reset. */
#define NB_GLOBAL_LGIC_BASE_HIGH_BASE_39_32_MASK 0x000000FF
#define NB_GLOBAL_LGIC_BASE_HIGH_BASE_39_32_SHIFT 0
#define NB_GLOBAL_LGIC_BASE_HIGH_BASE_43_32_MASK_PKR 0x00000FFF
#define NB_GLOBAL_LGIC_BASE_HIGH_BASE_43_32_SHIFT_PKR 0
#define NB_GLOBAL_LGIC_BASE_HIGH_BASE_43_32_MASK_ALPINE_V2 0x00000FFF
#define NB_GLOBAL_LGIC_BASE_HIGH_BASE_43_32_SHIFT_ALPINE_V2 0
/* GIC registers base [31:15].
This value is sampled into the CP15 Configuration Base Address Register (CBAR) at reset */
#define NB_GLOBAL_LGIC_BASE_LOW_BASED_31_15_MASK 0xFFFF8000
@ -1055,9 +1055,9 @@ Other access types are hazard check against the pre-barrier requests. */
/* Disable counter (wait 1000 NB cycles) before applying PoS enable/disable configuration */
#define NB_GLOBAL_ACF_MISC_POS_CONFIG_CNT_DIS (1 << 14)
/* Disable wr spliter A0 bug fixes */
#define NB_GLOBAL_ACF_MISC_WRSPLT_ALPINE_M0_MODE (1 << 16)
/* Disable wr spliter PKR bug fixes */
#define NB_GLOBAL_ACF_MISC_WRSPLT_ALPINE_A0_MODE (1 << 17)
#define NB_GLOBAL_ACF_MISC_WRSPLT_ALPINE_V1_M0_MODE (1 << 16)
/* Disable wr spliter ALPINE_V2 bug fixes */
#define NB_GLOBAL_ACF_MISC_WRSPLT_ALPINE_V1_A0_MODE (1 << 17)
/* Override the address parity calucation for write transactions going to IO-fabric */
#define NB_GLOBAL_ACF_MISC_NB_NIC_AWADDR_PAR_OVRD (1 << 18)
/* Override the data parity calucation for write transactions going to IO-fabric */
@ -1074,7 +1074,7 @@ Other access types are hazard check against the pre-barrier requests. */
#define NB_GLOBAL_ACF_MISC_CPU_DSB_FLUSH_DIS (1 << 26)
/* Enable DMB flush request to NB to SB PoS when barrier is terminted inside the processor cluster */
#define NB_GLOBAL_ACF_MISC_CPU_DMB_FLUSH_DIS (1 << 27)
/* Peakrock only: remap CPU address above 40 bits to Slave Error
/* Alpine V2 only: remap CPU address above 40 bits to Slave Error
INTERNAL */
#define NB_GLOBAL_ACF_MISC_ADDR43_40_REMAP_DIS (1 << 28)
/* Enable CPU WriteUnique to WriteNoSnoop trasform */
@ -1586,7 +1586,7 @@ enable - 0x1: Enable interrupt on overflow. */
/* Number of monitored events supported by the PMU. */
#define NB_MC_PMU_PMU_CONTROL_NUM_OF_EVENTS_MASK 0x00FC0000
#define NB_MC_PMU_PMU_CONTROL_NUM_OF_EVENTS_SHIFT 18
#define NB_MC_PMU_PMU_CONTROL_NUM_OF_EVENTS_SHIFT_ALPINE 19
#define NB_MC_PMU_PMU_CONTROL_NUM_OF_EVENTS_SHIFT_ALPINE_V1 19
/* Number of counters implemented by PMU. */
#define NB_MC_PMU_PMU_CONTROL_NUM_OF_CNTS_MASK 0x0F000000
#define NB_MC_PMU_PMU_CONTROL_NUM_OF_CNTS_SHIFT 24
@ -1659,6 +1659,9 @@ Note: This field must be changed for larger counters. */
/* Revision number (Major) */
#define NB_NB_VERSION_VERSION_RELEASE_NUM_MAJOR_MASK 0x0000FF00
#define NB_NB_VERSION_VERSION_RELEASE_NUM_MAJOR_SHIFT 8
#define NB_NB_VERSION_VERSION_RELEASE_NUM_MAJOR_VAL_ALPINE_V1 2
#define NB_NB_VERSION_VERSION_RELEASE_NUM_MAJOR_VAL_ALPINE_V2 3
#define NB_NB_VERSION_VERSION_RELEASE_NUM_MAJOR_VAL_ALPINE_V3 4
/* Date of release */
#define NB_NB_VERSION_VERSION_DATE_DAY_MASK 0x001F0000
#define NB_NB_VERSION_VERSION_DATE_DAY_SHIFT 16
@ -1672,10 +1675,10 @@ Note: This field must be changed for larger counters. */
#define NB_NB_VERSION_VERSION_RESERVED_MASK 0xC0000000
#define NB_NB_VERSION_VERSION_RESERVED_SHIFT 30
/**** cpu_vmid register ****/
/* Target VMID */
#define NB_SRIOV_CPU_VMID_VAL_MASK 0x000000FF
#define NB_SRIOV_CPU_VMID_VAL_SHIFT 0
/**** cpu_tgtid register ****/
/* Target-ID */
#define NB_SRIOV_CPU_TGTID_VAL_MASK 0x000000FF
#define NB_SRIOV_CPU_TGTID_VAL_SHIFT 0
/**** DRAM_0_Control register ****/
/* Controller Idle
@ -1807,7 +1810,7 @@ Parity bits are still generated per transaction */
#define NB_PUSH_PACKET_PP_EXT_AWUSER_AWUSER_SHIFT 0
/**** pp_sel_awuser register ****/
/* Select whether to use addr[63:48] or PP awmisc as vmid.
/* Select whether to use addr[63:48] or PP awmisc as tgtid.
Each bit if set to 1 selects the corresponding address bit. Otherwise, selects the corersponding awmis bit. */
#define NB_PUSH_PACKET_PP_SEL_AWUSER_SEL_MASK 0x0000FFFF
#define NB_PUSH_PACKET_PP_SEL_AWUSER_SEL_SHIFT 0

View File

@ -447,11 +447,12 @@ struct al_pbs_target_id_enforcement {
};
struct al_pbs_regs {
struct al_pbs_unit unit; /* [0x0] */
struct al_pbs_low_latency_sram_remap low_latency_sram_remap;
/* [0x250] */
uint32_t rsrvd_0[88];
struct al_pbs_target_id_enforcement target_id_enforcement; /* [0x400] */
struct al_pbs_unit unit; /* [0x0] */
struct al_pbs_low_latency_sram_remap low_latency_sram_remap; /* [0x250] */
uint32_t rsrvd_0[24];
uint32_t iofic_base; /* [0x300] */
uint32_t rsrvd_1[63];
struct al_pbs_target_id_enforcement target_id_enforcement; /* [0x400] */
};
@ -849,50 +850,50 @@ struct al_pbs_low_latency_sram_remap low_latency_sram_remap;
* 2'b01 - select pcie_b[0]
* 2'b10 - select pcie_a[2]
*/
#define PBS_UNIT_SERDES_MUX_PIPE_PKR_SELECT_OH_SERDES_2_MASK 0x00000003
#define PBS_UNIT_SERDES_MUX_PIPE_PKR_SELECT_OH_SERDES_2_SHIFT 0
#define PBS_UNIT_SERDES_MUX_PIPE_ALPINE_V2_SELECT_OH_SERDES_2_MASK 0x00000003
#define PBS_UNIT_SERDES_MUX_PIPE_ALPINE_V2_SELECT_OH_SERDES_2_SHIFT 0
/*
* 2'b01 - select pcie_b[1]
* 2'b10 - select pcie_a[3]
*/
#define PBS_UNIT_SERDES_MUX_PIPE_PKR_SELECT_OH_SERDES_3_MASK 0x00000030
#define PBS_UNIT_SERDES_MUX_PIPE_PKR_SELECT_OH_SERDES_3_SHIFT 4
#define PBS_UNIT_SERDES_MUX_PIPE_ALPINE_V2_SELECT_OH_SERDES_3_MASK 0x00000030
#define PBS_UNIT_SERDES_MUX_PIPE_ALPINE_V2_SELECT_OH_SERDES_3_SHIFT 4
/*
* 2'b01 - select pcie_b[0]
* 2'b10 - select pcie_a[4]
*/
#define PBS_UNIT_SERDES_MUX_PIPE_PKR_SELECT_OH_SERDES_4_MASK 0x00000300
#define PBS_UNIT_SERDES_MUX_PIPE_PKR_SELECT_OH_SERDES_4_SHIFT 8
#define PBS_UNIT_SERDES_MUX_PIPE_ALPINE_V2_SELECT_OH_SERDES_4_MASK 0x00000300
#define PBS_UNIT_SERDES_MUX_PIPE_ALPINE_V2_SELECT_OH_SERDES_4_SHIFT 8
/*
* 2'b01 - select pcie_b[1]
* 2'b10 - select pcie_a[5]
*/
#define PBS_UNIT_SERDES_MUX_PIPE_PKR_SELECT_OH_SERDES_5_MASK 0x00003000
#define PBS_UNIT_SERDES_MUX_PIPE_PKR_SELECT_OH_SERDES_5_SHIFT 12
#define PBS_UNIT_SERDES_MUX_PIPE_ALPINE_V2_SELECT_OH_SERDES_5_MASK 0x00003000
#define PBS_UNIT_SERDES_MUX_PIPE_ALPINE_V2_SELECT_OH_SERDES_5_SHIFT 12
/*
* 2'b01 - select pcie_b[2]
* 2'b10 - select pcie_a[6]
*/
#define PBS_UNIT_SERDES_MUX_PIPE_PKR_SELECT_OH_SERDES_6_MASK 0x00030000
#define PBS_UNIT_SERDES_MUX_PIPE_PKR_SELECT_OH_SERDES_6_SHIFT 16
#define PBS_UNIT_SERDES_MUX_PIPE_ALPINE_V2_SELECT_OH_SERDES_6_MASK 0x00030000
#define PBS_UNIT_SERDES_MUX_PIPE_ALPINE_V2_SELECT_OH_SERDES_6_SHIFT 16
/*
* 2'b01 - select pcie_b[3]
* 2'b10 - select pcie_a[7]
*/
#define PBS_UNIT_SERDES_MUX_PIPE_PKR_SELECT_OH_SERDES_7_MASK 0x00300000
#define PBS_UNIT_SERDES_MUX_PIPE_PKR_SELECT_OH_SERDES_7_SHIFT 20
#define PBS_UNIT_SERDES_MUX_PIPE_ALPINE_V2_SELECT_OH_SERDES_7_MASK 0x00300000
#define PBS_UNIT_SERDES_MUX_PIPE_ALPINE_V2_SELECT_OH_SERDES_7_SHIFT 20
/*
* 2'b01 - select pcie_d[0]
* 2'b10 - select pcie_c[2]
*/
#define PBS_UNIT_SERDES_MUX_PIPE_PKR_SELECT_OH_SERDES_10_MASK 0x03000000
#define PBS_UNIT_SERDES_MUX_PIPE_PKR_SELECT_OH_SERDES_10_SHIFT 24
#define PBS_UNIT_SERDES_MUX_PIPE_ALPINE_V2_SELECT_OH_SERDES_10_MASK 0x03000000
#define PBS_UNIT_SERDES_MUX_PIPE_ALPINE_V2_SELECT_OH_SERDES_10_SHIFT 24
/*
* 2'b01 - select pcie_d[1]
* 2'b10 - select pcie_c[3]
*/
#define PBS_UNIT_SERDES_MUX_PIPE_PKR_SELECT_OH_SERDES_11_MASK 0x30000000
#define PBS_UNIT_SERDES_MUX_PIPE_PKR_SELECT_OH_SERDES_11_SHIFT 28
#define PBS_UNIT_SERDES_MUX_PIPE_ALPINE_V2_SELECT_OH_SERDES_11_MASK 0x30000000
#define PBS_UNIT_SERDES_MUX_PIPE_ALPINE_V2_SELECT_OH_SERDES_11_SHIFT 28
/**** dma_io_master_map register ****/
/*
@ -978,6 +979,14 @@ struct al_pbs_low_latency_sram_remap low_latency_sram_remap;
#define PBS_UNIT_CFG_AXI_CONF_2_DBG_AWQOS_MASK 0x3C000000
#define PBS_UNIT_CFG_AXI_CONF_2_DBG_AWQOS_SHIFT 26
/**** cfg_axi_conf_3 register ****/
#define PBS_UNIT_CFG_AXI_CONF_3_TIMEOUT_LOW_MASK 0xFFFF
#define PBS_UNIT_CFG_AXI_CONF_3_TIMEOUT_LOW_SHIFT 0
#define PBS_UNIT_CFG_AXI_CONF_3_TIMEOUT_HI_MASK 0xFF0000
#define PBS_UNIT_CFG_AXI_CONF_3_TIMEOUT_HI_SHIFT 16
#define PBS_UNIT_CFG_AXI_CONF_3_TIMEOUT_SPI_HI_MASK 0xFF000000
#define PBS_UNIT_CFG_AXI_CONF_3_TIMEOUT_SPI_HI_SHIFT 24
/**** spi_mst_conf_0 register ****/
/*
* Sets the SPI master Configuration. For details see the SPI section in the
@ -1137,9 +1146,9 @@ struct al_pbs_low_latency_sram_remap low_latency_sram_remap;
#define PBS_UNIT_CHIP_ID_DEV_ID_MASK 0xFFFF0000
#define PBS_UNIT_CHIP_ID_DEV_ID_SHIFT 16
#define PBS_UNIT_CHIP_ID_DEV_ID_ALPINE 0
#define PBS_UNIT_CHIP_ID_DEV_ID_PEAKROCK 1
#define PBS_UNIT_CHIP_ID_DEV_ID_COYOTE 2
#define PBS_UNIT_CHIP_ID_DEV_ID_ALPINE_V1 0
#define PBS_UNIT_CHIP_ID_DEV_ID_ALPINE_V2 1
#define PBS_UNIT_CHIP_ID_DEV_ID_ALPINE_V3 2
/**** uart0_conf_status register ****/
/*
@ -1420,56 +1429,56 @@ struct al_pbs_low_latency_sram_remap low_latency_sram_remap;
* 2'b01 - select sata_b[0]
* 2'b10 - select eth_a[0]
*/
#define PBS_UNIT_SERDES_MUX_MULTI_0_PKR_SELECT_OH_SERDES_8_MASK 0x00000003
#define PBS_UNIT_SERDES_MUX_MULTI_0_PKR_SELECT_OH_SERDES_8_SHIFT 0
#define PBS_UNIT_SERDES_MUX_MULTI_0_ALPINE_V2_SELECT_OH_SERDES_8_MASK 0x00000003
#define PBS_UNIT_SERDES_MUX_MULTI_0_ALPINE_V2_SELECT_OH_SERDES_8_SHIFT 0
/*
* 3'b001 - select sata_b[1]
* 3'b010 - select eth_b[0]
* 3'b100 - select eth_a[1]
*/
#define PBS_UNIT_SERDES_MUX_MULTI_0_PKR_SELECT_OH_SERDES_9_MASK 0x00000070
#define PBS_UNIT_SERDES_MUX_MULTI_0_PKR_SELECT_OH_SERDES_9_SHIFT 4
#define PBS_UNIT_SERDES_MUX_MULTI_0_ALPINE_V2_SELECT_OH_SERDES_9_MASK 0x00000070
#define PBS_UNIT_SERDES_MUX_MULTI_0_ALPINE_V2_SELECT_OH_SERDES_9_SHIFT 4
/*
* 3'b001 - select sata_b[2]
* 3'b010 - select eth_c[0]
* 3'b100 - select eth_a[2]
*/
#define PBS_UNIT_SERDES_MUX_MULTI_0_PKR_SELECT_OH_SERDES_10_MASK 0x00000700
#define PBS_UNIT_SERDES_MUX_MULTI_0_PKR_SELECT_OH_SERDES_10_SHIFT 8
#define PBS_UNIT_SERDES_MUX_MULTI_0_ALPINE_V2_SELECT_OH_SERDES_10_MASK 0x00000700
#define PBS_UNIT_SERDES_MUX_MULTI_0_ALPINE_V2_SELECT_OH_SERDES_10_SHIFT 8
/*
* 3'b001 - select sata_b[3]
* 3'b010 - select eth_d[0]
* 3'b100 - select eth_a[3]
*/
#define PBS_UNIT_SERDES_MUX_MULTI_0_PKR_SELECT_OH_SERDES_11_MASK 0x00007000
#define PBS_UNIT_SERDES_MUX_MULTI_0_PKR_SELECT_OH_SERDES_11_SHIFT 12
#define PBS_UNIT_SERDES_MUX_MULTI_0_ALPINE_V2_SELECT_OH_SERDES_11_MASK 0x00007000
#define PBS_UNIT_SERDES_MUX_MULTI_0_ALPINE_V2_SELECT_OH_SERDES_11_SHIFT 12
/*
* 2'b01 - select eth_a[0]
* 2'b10 - select sata_a[0]
*/
#define PBS_UNIT_SERDES_MUX_MULTI_0_PKR_SELECT_OH_SERDES_12_MASK 0x00030000
#define PBS_UNIT_SERDES_MUX_MULTI_0_PKR_SELECT_OH_SERDES_12_SHIFT 16
#define PBS_UNIT_SERDES_MUX_MULTI_0_ALPINE_V2_SELECT_OH_SERDES_12_MASK 0x00030000
#define PBS_UNIT_SERDES_MUX_MULTI_0_ALPINE_V2_SELECT_OH_SERDES_12_SHIFT 16
/*
* 3'b001 - select eth_b[0]
* 3'b010 - select eth_c[1]
* 3'b100 - select sata_a[1]
*/
#define PBS_UNIT_SERDES_MUX_MULTI_0_PKR_SELECT_OH_SERDES_13_MASK 0x00700000
#define PBS_UNIT_SERDES_MUX_MULTI_0_PKR_SELECT_OH_SERDES_13_SHIFT 20
#define PBS_UNIT_SERDES_MUX_MULTI_0_ALPINE_V2_SELECT_OH_SERDES_13_MASK 0x00700000
#define PBS_UNIT_SERDES_MUX_MULTI_0_ALPINE_V2_SELECT_OH_SERDES_13_SHIFT 20
/*
* 3'b001 - select eth_a[0]
* 3'b010 - select eth_c[2]
* 3'b100 - select sata_a[2]
*/
#define PBS_UNIT_SERDES_MUX_MULTI_0_PKR_SELECT_OH_SERDES_14_MASK 0x07000000
#define PBS_UNIT_SERDES_MUX_MULTI_0_PKR_SELECT_OH_SERDES_14_SHIFT 24
#define PBS_UNIT_SERDES_MUX_MULTI_0_ALPINE_V2_SELECT_OH_SERDES_14_MASK 0x07000000
#define PBS_UNIT_SERDES_MUX_MULTI_0_ALPINE_V2_SELECT_OH_SERDES_14_SHIFT 24
/*
* 3'b001 - select eth_d[0]
* 3'b010 - select eth_c[3]
* 3'b100 - select sata_a[3]
*/
#define PBS_UNIT_SERDES_MUX_MULTI_0_PKR_SELECT_OH_SERDES_15_MASK 0x70000000
#define PBS_UNIT_SERDES_MUX_MULTI_0_PKR_SELECT_OH_SERDES_15_SHIFT 28
#define PBS_UNIT_SERDES_MUX_MULTI_0_ALPINE_V2_SELECT_OH_SERDES_15_MASK 0x70000000
#define PBS_UNIT_SERDES_MUX_MULTI_0_ALPINE_V2_SELECT_OH_SERDES_15_SHIFT 28
/**** serdes_mux_multi_1 register ****/
/* SerDes one hot mux control. For details see datasheet. */
@ -1632,62 +1641,62 @@ struct al_pbs_low_latency_sram_remap low_latency_sram_remap;
* 2'b01 - eth_a[0] from serdes_8
* 2'b10 - eth_a[0] from serdes_14
*/
#define PBS_UNIT_SERDES_MUX_ETH_PKR_SELECT_OH_ETH_A_0_MASK 0x00000003
#define PBS_UNIT_SERDES_MUX_ETH_PKR_SELECT_OH_ETH_A_0_SHIFT 0
#define PBS_UNIT_SERDES_MUX_ETH_ALPINE_V2_SELECT_OH_ETH_A_0_MASK 0x00000003
#define PBS_UNIT_SERDES_MUX_ETH_ALPINE_V2_SELECT_OH_ETH_A_0_SHIFT 0
/*
* 2'b01 - eth_b[0] from serdes_9
* 2'b10 - eth_b[0] from serdes_13
*/
#define PBS_UNIT_SERDES_MUX_ETH_PKR_SELECT_OH_ETH_B_0_MASK 0x00000030
#define PBS_UNIT_SERDES_MUX_ETH_PKR_SELECT_OH_ETH_B_0_SHIFT 4
#define PBS_UNIT_SERDES_MUX_ETH_ALPINE_V2_SELECT_OH_ETH_B_0_MASK 0x00000030
#define PBS_UNIT_SERDES_MUX_ETH_ALPINE_V2_SELECT_OH_ETH_B_0_SHIFT 4
/*
* 2'b01 - eth_c[0] from serdes_10
* 2'b10 - eth_c[0] from serdes_12
*/
#define PBS_UNIT_SERDES_MUX_ETH_PKR_SELECT_OH_ETH_C_0_MASK 0x00000300
#define PBS_UNIT_SERDES_MUX_ETH_PKR_SELECT_OH_ETH_C_0_SHIFT 8
#define PBS_UNIT_SERDES_MUX_ETH_ALPINE_V2_SELECT_OH_ETH_C_0_MASK 0x00000300
#define PBS_UNIT_SERDES_MUX_ETH_ALPINE_V2_SELECT_OH_ETH_C_0_SHIFT 8
/*
* 2'b01 - eth_d[0] from serdes_11
* 2'b10 - eth_d[0] from serdes_15
*/
#define PBS_UNIT_SERDES_MUX_ETH_PKR_SELECT_OH_ETH_D_0_MASK 0x00003000
#define PBS_UNIT_SERDES_MUX_ETH_PKR_SELECT_OH_ETH_D_0_SHIFT 12
#define PBS_UNIT_SERDES_MUX_ETH_ALPINE_V2_SELECT_OH_ETH_D_0_MASK 0x00003000
#define PBS_UNIT_SERDES_MUX_ETH_ALPINE_V2_SELECT_OH_ETH_D_0_SHIFT 12
/* which lane's is master clk */
#define PBS_UNIT_SERDES_MUX_ETH_PKR_SELECT_OH_ETH_A_ICK_MASTER_MASK 0x00030000
#define PBS_UNIT_SERDES_MUX_ETH_PKR_SELECT_OH_ETH_A_ICK_MASTER_SHIFT 16
#define PBS_UNIT_SERDES_MUX_ETH_ALPINE_V2_SELECT_OH_ETH_A_ICK_MASTER_MASK 0x00030000
#define PBS_UNIT_SERDES_MUX_ETH_ALPINE_V2_SELECT_OH_ETH_A_ICK_MASTER_SHIFT 16
/* which lane's is master clk */
#define PBS_UNIT_SERDES_MUX_ETH_PKR_SELECT_OH_ETH_C_ICK_MASTER_MASK 0x00300000
#define PBS_UNIT_SERDES_MUX_ETH_PKR_SELECT_OH_ETH_C_ICK_MASTER_SHIFT 20
#define PBS_UNIT_SERDES_MUX_ETH_ALPINE_V2_SELECT_OH_ETH_C_ICK_MASTER_MASK 0x00300000
#define PBS_UNIT_SERDES_MUX_ETH_ALPINE_V2_SELECT_OH_ETH_C_ICK_MASTER_SHIFT 20
/* enable xlaui on eth a */
#define PBS_UNIT_SERDES_MUX_ETH_PKR_SELECT_OH_ETH_A_XLAUI_ENABLE (1 << 24)
#define PBS_UNIT_SERDES_MUX_ETH_ALPINE_V2_SELECT_OH_ETH_A_XLAUI_ENABLE (1 << 24)
/* enable xlaui on eth c */
#define PBS_UNIT_SERDES_MUX_ETH_PKR_SELECT_OH_ETH_C_XLAUI_ENABLE (1 << 28)
#define PBS_UNIT_SERDES_MUX_ETH_ALPINE_V2_SELECT_OH_ETH_C_XLAUI_ENABLE (1 << 28)
/**** serdes_mux_pcie register ****/
/*
* 2'b01 - select pcie_b[0] from serdes 2
* 2'b10 - select pcie_b[0] from serdes 4
*/
#define PBS_UNIT_SERDES_MUX_PCIE_PKR_SELECT_OH_PCIE_B_0_MASK 0x00000003
#define PBS_UNIT_SERDES_MUX_PCIE_PKR_SELECT_OH_PCIE_B_0_SHIFT 0
#define PBS_UNIT_SERDES_MUX_PCIE_ALPINE_V2_SELECT_OH_PCIE_B_0_MASK 0x00000003
#define PBS_UNIT_SERDES_MUX_PCIE_ALPINE_V2_SELECT_OH_PCIE_B_0_SHIFT 0
/*
* 2'b01 - select pcie_b[1] from serdes 3
* 2'b10 - select pcie_b[1] from serdes 5
*/
#define PBS_UNIT_SERDES_MUX_PCIE_PKR_SELECT_OH_PCIE_B_1_MASK 0x00000030
#define PBS_UNIT_SERDES_MUX_PCIE_PKR_SELECT_OH_PCIE_B_1_SHIFT 4
#define PBS_UNIT_SERDES_MUX_PCIE_ALPINE_V2_SELECT_OH_PCIE_B_1_MASK 0x00000030
#define PBS_UNIT_SERDES_MUX_PCIE_ALPINE_V2_SELECT_OH_PCIE_B_1_SHIFT 4
/*
* 2'b01 - select pcie_d[0] from serdes 10
* 2'b10 - select pcie_d[0] from serdes 12
*/
#define PBS_UNIT_SERDES_MUX_PCIE_PKR_SELECT_OH_PCIE_D_0_MASK 0x00000300
#define PBS_UNIT_SERDES_MUX_PCIE_PKR_SELECT_OH_PCIE_D_0_SHIFT 8
#define PBS_UNIT_SERDES_MUX_PCIE_ALPINE_V2_SELECT_OH_PCIE_D_0_MASK 0x00000300
#define PBS_UNIT_SERDES_MUX_PCIE_ALPINE_V2_SELECT_OH_PCIE_D_0_SHIFT 8
/*
* 2'b01 - select pcie_d[1] from serdes 11
* 2'b10 - select pcie_d[1] from serdes 13
*/
#define PBS_UNIT_SERDES_MUX_PCIE_PKR_SELECT_OH_PCIE_D_1_MASK 0x00003000
#define PBS_UNIT_SERDES_MUX_PCIE_PKR_SELECT_OH_PCIE_D_1_SHIFT 12
#define PBS_UNIT_SERDES_MUX_PCIE_ALPINE_V2_SELECT_OH_PCIE_D_1_MASK 0x00003000
#define PBS_UNIT_SERDES_MUX_PCIE_ALPINE_V2_SELECT_OH_PCIE_D_1_SHIFT 12
/**** serdes_mux_sata register ****/
/*

View File

@ -96,6 +96,8 @@ __FBSDID("$FreeBSD$");
#define AL_PCIE_PARSE_LANES(v) (((1 << v) - 1) << \
PCIE_REVX_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_SHIFT)
#define AL_PCIE_FLR_DONE_INTERVAL 10
/**
* Static functions
*/
@ -183,10 +185,6 @@ al_pcie_port_link_config(
return -EINVAL;
}
al_dbg("PCIe %d: link config: max speed gen %d, max lanes %d, reversal %s\n",
pcie_port->port_id, link_params->max_speed,
pcie_port->max_lanes, link_params->enable_reversal? "enable" : "disable");
al_pcie_port_link_speed_ctrl_set(pcie_port, link_params->max_speed);
/* Change Max Payload Size, if needed.
@ -220,12 +218,6 @@ al_pcie_port_link_config(
(max_lanes + (max_lanes-1))
<< PCIE_PORT_LINK_CTRL_LINK_CAPABLE_SHIFT);
/* TODO: add support for reversal mode */
if (link_params->enable_reversal) {
al_err("PCIe %d: enabling reversal mode not implemented\n",
pcie_port->port_id);
return -ENOSYS;
}
return 0;
}
@ -364,12 +356,9 @@ al_pcie_rev_id_get(
PBS_UNIT_CHIP_ID_DEV_ID_MASK,
PBS_UNIT_CHIP_ID_DEV_ID_SHIFT);
if (chip_id_dev == PBS_UNIT_CHIP_ID_DEV_ID_ALPINE) {
rev_id = AL_REG_FIELD_GET(
chip_id,
PBS_UNIT_CHIP_ID_DEV_REV_ID_MASK,
PBS_UNIT_CHIP_ID_DEV_REV_ID_SHIFT);
} else if (chip_id_dev == PBS_UNIT_CHIP_ID_DEV_ID_PEAKROCK) {
if (chip_id_dev == PBS_UNIT_CHIP_ID_DEV_ID_ALPINE_V1) {
rev_id = AL_PCIE_REV_ID_1;
} else if (chip_id_dev == PBS_UNIT_CHIP_ID_DEV_ID_ALPINE_V2) {
struct al_pcie_revx_regs __iomem *regs =
(struct al_pcie_revx_regs __iomem *)pcie_reg_base;
uint32_t dev_id;
@ -469,20 +458,6 @@ al_pcie_ib_hcrd_os_ob_reads_config_default(
al_pcie_port_ib_hcrd_os_ob_reads_config(pcie_port, &ib_hcrd_os_ob_reads_config);
};
/** return AL_TRUE is link started (LTSSM enabled) and AL_FALSE otherwise */
static al_bool
al_pcie_is_link_started(struct al_pcie_port *pcie_port)
{
struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
uint32_t port_init = al_reg_read32(regs->app.global_ctrl.port_init);
uint8_t ltssm_en = AL_REG_FIELD_GET(port_init,
PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK,
PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_SHIFT);
return ltssm_en;
}
/** return AL_TRUE if link is up, AL_FALSE otherwise */
static al_bool
al_pcie_check_link(
@ -650,18 +625,6 @@ al_pcie_port_gen3_params_config(struct al_pcie_port *pcie_port,
return 0;
}
static int
al_pcie_port_tl_credits_config(
struct al_pcie_port *pcie_port,
const struct al_pcie_tl_credits_params *tl_credits __attribute__((__unused__)))
{
al_err("PCIe %d: transport layer credits config not implemented\n",
pcie_port->port_id);
return -ENOSYS;
}
static int
al_pcie_port_pf_params_config(struct al_pcie_pf *pcie_pf,
const struct al_pcie_pf_config_params *pf_params)
@ -680,22 +643,21 @@ al_pcie_port_pf_params_config(struct al_pcie_pf *pcie_pf,
regs->core_space[pf_num].pcie_pm_cap_base,
AL_FIELD_MASK(26, 25) | AL_FIELD_MASK(31, 28), 0);
/* Disable FLR capability */
/* Set/Clear FLR bit */
if (pf_params->cap_flr_dis)
al_reg_write32_masked(
regs->core_space[pf_num].pcie_dev_cap_base,
AL_BIT(28), 0);
AL_PCI_EXP_DEVCAP_FLR, 0);
else
al_reg_write32_masked(
regs->core_space[pcie_pf->pf_num].pcie_dev_cap_base,
AL_PCI_EXP_DEVCAP_FLR, AL_PCI_EXP_DEVCAP_FLR);
/* Disable ASPM capability */
if (pf_params->cap_aspm_dis) {
al_reg_write32_masked(
regs->core_space[pf_num].pcie_cap_base + (AL_PCI_EXP_LNKCAP >> 2),
AL_PCI_EXP_LNKCAP_ASPMS, 0);
} else if (pcie_port->rev_id == AL_PCIE_REV_ID_0) {
al_warn("%s: ASPM support is enabled, please disable it\n",
__func__);
ret = -EINVAL;
goto done;
}
if (!pf_params->bar_params_valid) {
@ -743,8 +705,9 @@ al_pcie_port_pf_params_config(struct al_pcie_pf *pcie_pf,
if (params->memory_space) {
if (size < AL_PCIE_MIN_MEMORY_BAR_SIZE) {
al_err("PCIe %d: memory BAR %d: size (0x%llx) less that minimal allowed value\n",
pcie_port->port_id, bar_idx, size);
al_err("PCIe %d: memory BAR %d: size (0x%jx) less that minimal allowed value\n",
pcie_port->port_id, bar_idx,
(uintmax_t)size);
ret = -EINVAL;
goto done;
}
@ -756,8 +719,9 @@ al_pcie_port_pf_params_config(struct al_pcie_pf *pcie_pf,
}
if (size < AL_PCIE_MIN_IO_BAR_SIZE) {
al_err("PCIe %d: IO BAR %d: size (0x%llx) less that minimal allowed value\n",
pcie_port->port_id, bar_idx, size);
al_err("PCIe %d: IO BAR %d: size (0x%jx) less that minimal allowed value\n",
pcie_port->port_id, bar_idx,
(uintmax_t)size);
ret = -EINVAL;
goto done;
}
@ -765,9 +729,9 @@ al_pcie_port_pf_params_config(struct al_pcie_pf *pcie_pf,
/* size must be power of 2 */
if (size & (size - 1)) {
al_err("PCIe %d: BAR %d:size (0x%llx) must be "
al_err("PCIe %d: BAR %d:size (0x%jx) must be "
"power of 2\n",
pcie_port->port_id, bar_idx, size);
pcie_port->port_id, bar_idx, (uintmax_t)size);
ret = -EINVAL;
goto done;
}
@ -826,8 +790,7 @@ al_pcie_port_pf_params_config(struct al_pcie_pf *pcie_pf,
}
/* Open CPU generated msi and legacy interrupts in pcie wrapper logic */
if ((pcie_port->rev_id == AL_PCIE_REV_ID_0) ||
(pcie_port->rev_id == AL_PCIE_REV_ID_1)) {
if (pcie_port->rev_id == AL_PCIE_REV_ID_1) {
al_reg_write32(regs->app.soc_int[pf_num].mask_inta_leg_0, (1 << 21));
} else if ((pcie_port->rev_id == AL_PCIE_REV_ID_2) ||
(pcie_port->rev_id == AL_PCIE_REV_ID_3)) {
@ -853,13 +816,7 @@ al_pcie_port_pf_params_config(struct al_pcie_pf *pcie_pf,
* Restore the original value after the write to app.soc.mask_msi_leg_0
* register.
*/
if (pcie_port->rev_id == AL_PCIE_REV_ID_0) {
uint32_t backup;
backup = al_reg_read32(&regs->app.int_grp_a->mask);
al_reg_write32(regs->app.soc_int[pf_num].mask_msi_leg_0, (1 << 22));
al_reg_write32(&regs->app.int_grp_a->mask, backup);
} else if (pcie_port->rev_id == AL_PCIE_REV_ID_1) {
if (pcie_port->rev_id == AL_PCIE_REV_ID_1) {
al_reg_write32(regs->app.soc_int[pf_num].mask_msi_leg_0, (1 << 22));
} else if ((pcie_port->rev_id == AL_PCIE_REV_ID_2) ||
(pcie_port->rev_id == AL_PCIE_REV_ID_3)) {
@ -878,22 +835,6 @@ al_pcie_port_pf_params_config(struct al_pcie_pf *pcie_pf,
return ret;
}
static void
al_pcie_port_features_config(
struct al_pcie_port *pcie_port,
const struct al_pcie_features *features)
{
struct al_pcie_regs *regs = pcie_port->regs;
al_assert(pcie_port->rev_id > AL_PCIE_REV_ID_0);
al_reg_write32_masked(
&regs->app.ctrl_gen->features,
PCIE_W_CTRL_GEN_FEATURES_SATA_EP_MSI_FIX,
features->sata_ep_msi_fix ?
PCIE_W_CTRL_GEN_FEATURES_SATA_EP_MSI_FIX : 0);
}
static int
al_pcie_port_sris_config(
struct al_pcie_port *pcie_port,
@ -916,6 +857,9 @@ al_pcie_port_sris_config(
switch (pcie_port->rev_id) {
case AL_PCIE_REV_ID_3:
al_reg_write32_masked(&regs->app.cfg_func_ext->cfg,
PCIE_W_CFG_FUNC_EXT_CFG_APP_SRIS_MODE,
PCIE_W_CFG_FUNC_EXT_CFG_APP_SRIS_MODE);
case AL_PCIE_REV_ID_2:
al_reg_write32_masked(regs->app.global_ctrl.sris_kp_counter,
PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN3_SRIS_MASK |
@ -989,6 +933,34 @@ al_pcie_port_max_num_of_pfs_get(struct al_pcie_port *pcie_port)
return 1;
}
/** Enable ecrc generation in outbound atu (Addressing RMN: 5119) */
static void al_pcie_ecrc_gen_ob_atu_enable(struct al_pcie_port *pcie_port, unsigned int pf_num)
{
struct al_pcie_regs *regs = pcie_port->regs;
int max_ob_atu = (pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
AL_PCIE_REV_3_ATU_NUM_OUTBOUND_REGIONS : AL_PCIE_REV_1_2_ATU_NUM_OUTBOUND_REGIONS;
int i;
for (i = 0; i < max_ob_atu; i++) {
al_bool enable = 0;
uint32_t reg = 0;
unsigned int func_num;
AL_REG_FIELD_SET(reg, 0xF, 0, i);
AL_REG_BIT_VAL_SET(reg, 31, AL_PCIE_ATU_DIR_OUTBOUND);
al_reg_write32(&regs->port_regs->iatu.index, reg);
reg = al_reg_read32(&regs->port_regs->iatu.cr2);
enable = AL_REG_BIT_GET(reg, 31) ? AL_TRUE : AL_FALSE;
reg = al_reg_read32(&regs->port_regs->iatu.cr1);
func_num = AL_REG_FIELD_GET(reg,
PCIE_IATU_CR1_FUNC_NUM_MASK,
PCIE_IATU_CR1_FUNC_NUM_SHIFT);
if ((enable == AL_TRUE) && (pf_num == func_num)) {
/* Set TD bit */
AL_REG_BIT_SET(reg, 8);
al_reg_write32(&regs->port_regs->iatu.cr1, reg);
}
}
}
/******************************************************************************/
/***************************** API Implementation *****************************/
/******************************************************************************/
@ -1025,12 +997,13 @@ al_pcie_port_handle_init(
/* Zero all regs */
al_memset(pcie_port->regs, 0, sizeof(struct al_pcie_regs));
if ((pcie_port->rev_id == AL_PCIE_REV_ID_0) ||
(pcie_port->rev_id == AL_PCIE_REV_ID_1)) {
if (pcie_port->rev_id == AL_PCIE_REV_ID_1) {
struct al_pcie_rev1_regs __iomem *regs =
(struct al_pcie_rev1_regs __iomem *)pcie_reg_base;
pcie_port->regs->axi.ctrl.global = &regs->axi.ctrl.global;
pcie_port->regs->axi.ctrl.master_rctl = &regs->axi.ctrl.master_rctl;
pcie_port->regs->axi.ctrl.master_ctl = &regs->axi.ctrl.master_ctl;
pcie_port->regs->axi.ctrl.master_arctl = &regs->axi.ctrl.master_arctl;
pcie_port->regs->axi.ctrl.master_awctl = &regs->axi.ctrl.master_awctl;
pcie_port->regs->axi.ctrl.slv_ctl = &regs->axi.ctrl.slv_ctl;
@ -1059,20 +1032,21 @@ al_pcie_port_handle_init(
pcie_port->regs->app.global_ctrl.pm_control = &regs->app.global_ctrl.pm_control;
pcie_port->regs->app.global_ctrl.events_gen[0] = &regs->app.global_ctrl.events_gen;
pcie_port->regs->app.debug = &regs->app.debug;
pcie_port->regs->app.soc_int[0].status_0 = &regs->app.soc_int.status_0;
pcie_port->regs->app.soc_int[0].status_1 = &regs->app.soc_int.status_1;
pcie_port->regs->app.soc_int[0].status_2 = &regs->app.soc_int.status_2;
pcie_port->regs->app.soc_int[0].mask_inta_leg_0 = &regs->app.soc_int.mask_inta_leg_0;
pcie_port->regs->app.soc_int[0].mask_inta_leg_1 = &regs->app.soc_int.mask_inta_leg_1;
pcie_port->regs->app.soc_int[0].mask_inta_leg_2 = &regs->app.soc_int.mask_inta_leg_2;
pcie_port->regs->app.soc_int[0].mask_msi_leg_0 = &regs->app.soc_int.mask_msi_leg_0;
pcie_port->regs->app.soc_int[0].mask_msi_leg_1 = &regs->app.soc_int.mask_msi_leg_1;
pcie_port->regs->app.soc_int[0].mask_msi_leg_2 = &regs->app.soc_int.mask_msi_leg_2;
pcie_port->regs->app.ctrl_gen = &regs->app.ctrl_gen;
pcie_port->regs->app.parity = &regs->app.parity;
pcie_port->regs->app.atu.in_mask_pair = regs->app.atu.in_mask_pair;
pcie_port->regs->app.atu.out_mask_pair = regs->app.atu.out_mask_pair;
if (pcie_port->rev_id == AL_PCIE_REV_ID_0) {
pcie_port->regs->app.int_grp_a = &regs->app.int_grp_a_m0;
pcie_port->regs->app.int_grp_b = &regs->app.int_grp_b_m0;
} else {
pcie_port->regs->app.int_grp_a = &regs->app.int_grp_a;
pcie_port->regs->app.int_grp_b = &regs->app.int_grp_b;
}
pcie_port->regs->app.int_grp_a = &regs->app.int_grp_a;
pcie_port->regs->app.int_grp_b = &regs->app.int_grp_b;
pcie_port->regs->core_space[0].config_header = regs->core_space.config_header;
pcie_port->regs->core_space[0].pcie_pm_cap_base = &regs->core_space.pcie_pm_cap_base;
@ -1091,6 +1065,8 @@ al_pcie_port_handle_init(
(struct al_pcie_rev2_regs __iomem *)pcie_reg_base;
pcie_port->regs->axi.ctrl.global = &regs->axi.ctrl.global;
pcie_port->regs->axi.ctrl.master_rctl = &regs->axi.ctrl.master_rctl;
pcie_port->regs->axi.ctrl.master_ctl = &regs->axi.ctrl.master_ctl;
pcie_port->regs->axi.ctrl.master_arctl = &regs->axi.ctrl.master_arctl;
pcie_port->regs->axi.ctrl.master_awctl = &regs->axi.ctrl.master_awctl;
pcie_port->regs->axi.ctrl.slv_ctl = &regs->axi.ctrl.slv_ctl;
@ -1100,6 +1076,10 @@ al_pcie_port_handle_init(
pcie_port->regs->axi.ob_ctrl.io_start_h = &regs->axi.ob_ctrl.io_start_h;
pcie_port->regs->axi.ob_ctrl.io_limit_l = &regs->axi.ob_ctrl.io_limit_l;
pcie_port->regs->axi.ob_ctrl.io_limit_h = &regs->axi.ob_ctrl.io_limit_h;
pcie_port->regs->axi.ob_ctrl.tgtid_reg_ovrd = &regs->axi.ob_ctrl.tgtid_reg_ovrd;
pcie_port->regs->axi.ob_ctrl.addr_high_reg_ovrd_sel = &regs->axi.ob_ctrl.addr_high_reg_ovrd_sel;
pcie_port->regs->axi.ob_ctrl.addr_high_reg_ovrd_value = &regs->axi.ob_ctrl.addr_high_reg_ovrd_value;
pcie_port->regs->axi.ob_ctrl.addr_size_replace = &regs->axi.ob_ctrl.addr_size_replace;
pcie_port->regs->axi.pcie_global.conf = &regs->axi.pcie_global.conf;
pcie_port->regs->axi.conf.zero_lane0 = &regs->axi.conf.zero_lane0;
pcie_port->regs->axi.conf.zero_lane1 = &regs->axi.conf.zero_lane1;
@ -1120,11 +1100,20 @@ al_pcie_port_handle_init(
pcie_port->regs->app.global_ctrl.events_gen[0] = &regs->app.global_ctrl.events_gen;
pcie_port->regs->app.global_ctrl.corr_err_sts_int = &regs->app.global_ctrl.pended_corr_err_sts_int;
pcie_port->regs->app.global_ctrl.uncorr_err_sts_int = &regs->app.global_ctrl.pended_uncorr_err_sts_int;
pcie_port->regs->app.global_ctrl.sris_kp_counter = &regs->app.global_ctrl.sris_kp_counter_value;
pcie_port->regs->app.debug = &regs->app.debug;
pcie_port->regs->app.ap_user_send_msg = &regs->app.ap_user_send_msg;
pcie_port->regs->app.soc_int[0].status_0 = &regs->app.soc_int.status_0;
pcie_port->regs->app.soc_int[0].status_1 = &regs->app.soc_int.status_1;
pcie_port->regs->app.soc_int[0].status_2 = &regs->app.soc_int.status_2;
pcie_port->regs->app.soc_int[0].status_3 = &regs->app.soc_int.status_3;
pcie_port->regs->app.soc_int[0].mask_inta_leg_0 = &regs->app.soc_int.mask_inta_leg_0;
pcie_port->regs->app.soc_int[0].mask_inta_leg_1 = &regs->app.soc_int.mask_inta_leg_1;
pcie_port->regs->app.soc_int[0].mask_inta_leg_2 = &regs->app.soc_int.mask_inta_leg_2;
pcie_port->regs->app.soc_int[0].mask_inta_leg_3 = &regs->app.soc_int.mask_inta_leg_3;
pcie_port->regs->app.soc_int[0].mask_msi_leg_0 = &regs->app.soc_int.mask_msi_leg_0;
pcie_port->regs->app.soc_int[0].mask_msi_leg_1 = &regs->app.soc_int.mask_msi_leg_1;
pcie_port->regs->app.soc_int[0].mask_msi_leg_2 = &regs->app.soc_int.mask_msi_leg_2;
pcie_port->regs->app.soc_int[0].mask_msi_leg_3 = &regs->app.soc_int.mask_msi_leg_3;
pcie_port->regs->app.ctrl_gen = &regs->app.ctrl_gen;
pcie_port->regs->app.parity = &regs->app.parity;
@ -1150,6 +1139,8 @@ al_pcie_port_handle_init(
struct al_pcie_rev3_regs __iomem *regs =
(struct al_pcie_rev3_regs __iomem *)pcie_reg_base;
pcie_port->regs->axi.ctrl.global = &regs->axi.ctrl.global;
pcie_port->regs->axi.ctrl.master_rctl = &regs->axi.ctrl.master_rctl;
pcie_port->regs->axi.ctrl.master_ctl = &regs->axi.ctrl.master_ctl;
pcie_port->regs->axi.ctrl.master_arctl = &regs->axi.ctrl.master_arctl;
pcie_port->regs->axi.ctrl.master_awctl = &regs->axi.ctrl.master_awctl;
pcie_port->regs->axi.ctrl.slv_ctl = &regs->axi.ctrl.slv_ctl;
@ -1159,6 +1150,13 @@ al_pcie_port_handle_init(
pcie_port->regs->axi.ob_ctrl.io_start_h = &regs->axi.ob_ctrl.io_start_h;
pcie_port->regs->axi.ob_ctrl.io_limit_l = &regs->axi.ob_ctrl.io_limit_l;
pcie_port->regs->axi.ob_ctrl.io_limit_h = &regs->axi.ob_ctrl.io_limit_h;
pcie_port->regs->axi.ob_ctrl.io_addr_mask_h = &regs->axi.ob_ctrl.io_addr_mask_h;
pcie_port->regs->axi.ob_ctrl.ar_msg_addr_mask_h = &regs->axi.ob_ctrl.ar_msg_addr_mask_h;
pcie_port->regs->axi.ob_ctrl.aw_msg_addr_mask_h = &regs->axi.ob_ctrl.aw_msg_addr_mask_h;
pcie_port->regs->axi.ob_ctrl.tgtid_reg_ovrd = &regs->axi.ob_ctrl.tgtid_reg_ovrd;
pcie_port->regs->axi.ob_ctrl.addr_high_reg_ovrd_sel = &regs->axi.ob_ctrl.addr_high_reg_ovrd_sel;
pcie_port->regs->axi.ob_ctrl.addr_high_reg_ovrd_value = &regs->axi.ob_ctrl.addr_high_reg_ovrd_value;
pcie_port->regs->axi.ob_ctrl.addr_size_replace = &regs->axi.ob_ctrl.addr_size_replace;
pcie_port->regs->axi.pcie_global.conf = &regs->axi.pcie_global.conf;
pcie_port->regs->axi.conf.zero_lane0 = &regs->axi.conf.zero_lane0;
pcie_port->regs->axi.conf.zero_lane1 = &regs->axi.conf.zero_lane1;
@ -1213,9 +1211,17 @@ al_pcie_port_handle_init(
pcie_port->regs->app.debug = &regs->app.debug;
for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
pcie_port->regs->app.soc_int[i].status_0 = &regs->app.soc_int_per_func[i].status_0;
pcie_port->regs->app.soc_int[i].status_1 = &regs->app.soc_int_per_func[i].status_1;
pcie_port->regs->app.soc_int[i].status_2 = &regs->app.soc_int_per_func[i].status_2;
pcie_port->regs->app.soc_int[i].status_3 = &regs->app.soc_int_per_func[i].status_3;
pcie_port->regs->app.soc_int[i].mask_inta_leg_0 = &regs->app.soc_int_per_func[i].mask_inta_leg_0;
pcie_port->regs->app.soc_int[i].mask_inta_leg_1 = &regs->app.soc_int_per_func[i].mask_inta_leg_1;
pcie_port->regs->app.soc_int[i].mask_inta_leg_2 = &regs->app.soc_int_per_func[i].mask_inta_leg_2;
pcie_port->regs->app.soc_int[i].mask_inta_leg_3 = &regs->app.soc_int_per_func[i].mask_inta_leg_3;
pcie_port->regs->app.soc_int[i].mask_msi_leg_0 = &regs->app.soc_int_per_func[i].mask_msi_leg_0;
pcie_port->regs->app.soc_int[i].mask_msi_leg_1 = &regs->app.soc_int_per_func[i].mask_msi_leg_1;
pcie_port->regs->app.soc_int[i].mask_msi_leg_2 = &regs->app.soc_int_per_func[i].mask_msi_leg_2;
pcie_port->regs->app.soc_int[i].mask_msi_leg_3 = &regs->app.soc_int_per_func[i].mask_msi_leg_3;
}
@ -1224,6 +1230,7 @@ al_pcie_port_handle_init(
pcie_port->regs->app.parity = &regs->app.parity;
pcie_port->regs->app.atu.in_mask_pair = regs->app.atu.in_mask_pair;
pcie_port->regs->app.atu.out_mask_pair = regs->app.atu.out_mask_pair;
pcie_port->regs->app.cfg_func_ext = &regs->app.cfg_func_ext;
for (i = 0; i < AL_MAX_NUM_OF_PFS; i++)
pcie_port->regs->app.status_per_func[i] = &regs->app.status_per_func[i];
@ -1260,6 +1267,10 @@ al_pcie_port_handle_init(
/* set maximum number of physical functions */
pcie_port->max_num_of_pfs = al_pcie_port_max_num_of_pfs_get(pcie_port);
/* Clear 'nof_p_hdr' & 'nof_np_hdr' to later know if they where changed by the user */
pcie_port->ib_hcrd_config.nof_np_hdr = 0;
pcie_port->ib_hcrd_config.nof_p_hdr = 0;
al_dbg("pcie port handle initialized. port id: %d, rev_id %d, regs base %p\n",
port_id, pcie_port->rev_id, pcie_reg_base);
return 0;
@ -1294,6 +1305,12 @@ al_pcie_pf_handle_init(
return 0;
}
/** Get port revision ID */
int al_pcie_port_rev_id_get(struct al_pcie_port *pcie_port)
{
return pcie_port->rev_id;
}
/************************** Pre PCIe Port Enable API **************************/
/** configure pcie operating mode (root complex or endpoint) */
@ -1346,7 +1363,7 @@ al_pcie_port_operating_mode_config(
"EndPoint" : "Root Complex");
return 0;
}
al_info("PCIe %d: set operating mode to %s\n",
al_dbg("PCIe %d: set operating mode to %s\n",
pcie_port->port_id, (mode == AL_PCIE_OPERATING_MODE_EP) ?
"EndPoint" : "Root Complex");
AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK,
@ -1362,6 +1379,7 @@ int
al_pcie_port_max_lanes_set(struct al_pcie_port *pcie_port, uint8_t lanes)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint32_t active_lanes_val;
if (al_pcie_port_is_enabled(pcie_port)) {
al_err("PCIe %d: already enabled, cannot set max lanes\n",
@ -1370,7 +1388,7 @@ al_pcie_port_max_lanes_set(struct al_pcie_port *pcie_port, uint8_t lanes)
}
/* convert to bitmask format (4 ->'b1111, 2 ->'b11, 1 -> 'b1) */
uint32_t active_lanes_val = AL_PCIE_PARSE_LANES(lanes);
active_lanes_val = AL_PCIE_PARSE_LANES(lanes);
al_reg_write32_masked(regs->axi.pcie_global.conf,
(pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
@ -1387,11 +1405,7 @@ al_pcie_port_max_num_of_pfs_set(
struct al_pcie_port *pcie_port,
uint8_t max_num_of_pfs)
{
if (al_pcie_port_is_enabled(pcie_port)) {
al_err("PCIe %d: already enabled, cannot set max num of PFs\n",
pcie_port->port_id);
return -EINVAL;
}
struct al_pcie_regs *regs = pcie_port->regs;
if (pcie_port->rev_id == AL_PCIE_REV_ID_3)
al_assert(max_num_of_pfs <= REV3_MAX_NUM_OF_PFS);
@ -1400,6 +1414,33 @@ al_pcie_port_max_num_of_pfs_set(
pcie_port->max_num_of_pfs = max_num_of_pfs;
if (al_pcie_port_is_enabled(pcie_port) && (pcie_port->rev_id == AL_PCIE_REV_ID_3)) {
enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
al_bool is_multi_pf =
((op_mode == AL_PCIE_OPERATING_MODE_EP) && (pcie_port->max_num_of_pfs > 1));
/* Set maximum physical function numbers */
al_reg_write32_masked(
&regs->port_regs->timer_ctrl_max_func_num,
PCIE_PORT_GEN3_MAX_FUNC_NUM,
pcie_port->max_num_of_pfs - 1);
al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
/**
* in EP mode, when we have more than 1 PF we need to assert
* multi-pf support so the host scan all PFs
*/
al_reg_write32_masked((uint32_t __iomem *)
(&regs->core_space[0].config_header[0] +
(PCIE_BIST_HEADER_TYPE_BASE >> 2)),
PCIE_BIST_HEADER_TYPE_MULTI_FUNC_MASK,
is_multi_pf ? PCIE_BIST_HEADER_TYPE_MULTI_FUNC_MASK : 0);
al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
}
return 0;
}
@ -1503,6 +1544,28 @@ al_pcie_operating_mode_get(
return AL_PCIE_OPERATING_MODE_UNKNOWN;
}
/* PCIe AXI quality of service configuration */
void al_pcie_axi_qos_config(
struct al_pcie_port *pcie_port,
unsigned int arqos,
unsigned int awqos)
{
struct al_pcie_regs *regs = pcie_port->regs;
al_assert(pcie_port);
al_assert(arqos <= PCIE_AXI_CTRL_MASTER_ARCTL_ARQOS_VAL_MAX);
al_assert(awqos <= PCIE_AXI_CTRL_MASTER_AWCTL_AWQOS_VAL_MAX);
al_reg_write32_masked(
regs->axi.ctrl.master_arctl,
PCIE_AXI_CTRL_MASTER_ARCTL_ARQOS_MASK,
arqos << PCIE_AXI_CTRL_MASTER_ARCTL_ARQOS_SHIFT);
al_reg_write32_masked(
regs->axi.ctrl.master_awctl,
PCIE_AXI_CTRL_MASTER_AWCTL_AWQOS_MASK,
awqos << PCIE_AXI_CTRL_MASTER_AWCTL_AWQOS_SHIFT);
}
/**************************** PCIe Port Enable API ****************************/
/** Enable PCIe port (deassert reset) */
@ -1518,17 +1581,19 @@ al_pcie_port_enable(struct al_pcie_port *pcie_port)
/**
* Set inbound header credit and outstanding outbound reads defaults
* if the port initiator doesn't set it.
* Must be called before port enable (PCIE_EXIST)
*/
al_pcie_ib_hcrd_os_ob_reads_config_default(pcie_port);
if ((pcie_port->ib_hcrd_config.nof_np_hdr == 0) ||
(pcie_port->ib_hcrd_config.nof_p_hdr == 0))
al_pcie_ib_hcrd_os_ob_reads_config_default(pcie_port);
/*
* Disable ATS capability
* - must be done before core reset deasserted
* - rev_id 0 - no effect, but no harm
*/
if ((pcie_port->rev_id == AL_PCIE_REV_ID_0) ||
(pcie_port->rev_id == AL_PCIE_REV_ID_1) ||
if ((pcie_port->rev_id == AL_PCIE_REV_ID_1) ||
(pcie_port->rev_id == AL_PCIE_REV_ID_2)) {
al_reg_write32_masked(
regs->axi.ordering.pos_cntl,
@ -1679,26 +1744,8 @@ al_pcie_port_config(struct al_pcie_port *pcie_port,
}
if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
/* Set maximum physical function numbers */
al_reg_write32_masked(
&regs->port_regs->timer_ctrl_max_func_num,
PCIE_PORT_GEN3_MAX_FUNC_NUM,
pcie_port->max_num_of_pfs - 1);
al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
/**
* in EP mode, when we have more than 1 PF we need to assert
* multi-pf support so the host scan all PFs
*/
if ((op_mode == AL_PCIE_OPERATING_MODE_EP) && (pcie_port->max_num_of_pfs > 1)) {
al_reg_write32_masked((uint32_t __iomem *)
(&regs->core_space[0].config_header[0] +
(PCIE_BIST_HEADER_TYPE_BASE >> 2)),
PCIE_BIST_HEADER_TYPE_MULTI_FUNC_MASK,
PCIE_BIST_HEADER_TYPE_MULTI_FUNC_MASK);
}
/* Disable TPH next pointer */
for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
al_reg_write32_masked(regs->core_space[i].tph_cap_base,
@ -1713,6 +1760,8 @@ al_pcie_port_config(struct al_pcie_port *pcie_port,
if (status)
goto done;
al_pcie_port_max_num_of_pfs_set(pcie_port, pcie_port->max_num_of_pfs);
al_pcie_port_ram_parity_int_config(pcie_port, params->enable_ram_parity_int);
al_pcie_port_axi_parity_int_config(pcie_port, params->enable_axi_parity_int);
@ -1734,14 +1783,6 @@ al_pcie_port_config(struct al_pcie_port *pcie_port,
if (status)
goto done;
if (params->tl_credits)
status = al_pcie_port_tl_credits_config(pcie_port, params->tl_credits);
if (status)
goto done;
if (params->features)
al_pcie_port_features_config(pcie_port, params->features);
if (params->sris_params)
status = al_pcie_port_sris_config(pcie_port, params->sris_params,
params->link_params->max_speed);
@ -1904,6 +1945,19 @@ al_pcie_link_stop(struct al_pcie_port *pcie_port)
return 0;
}
/** return AL_TRUE is link started (LTSSM enabled) and AL_FALSE otherwise */
al_bool al_pcie_is_link_started(struct al_pcie_port *pcie_port)
{
struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
uint32_t port_init = al_reg_read32(regs->app.global_ctrl.port_init);
uint8_t ltssm_en = AL_REG_FIELD_GET(port_init,
PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK,
PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_SHIFT);
return ltssm_en;
}
/* wait for link up indication */
int
al_pcie_link_up_wait(struct al_pcie_port *pcie_port, uint32_t timeout_ms)
@ -1912,7 +1966,7 @@ al_pcie_link_up_wait(struct al_pcie_port *pcie_port, uint32_t timeout_ms)
while (wait_count-- > 0) {
if (al_pcie_check_link(pcie_port, NULL)) {
al_info("PCIe_%d: <<<<<<<<< Link up >>>>>>>>>\n", pcie_port->port_id);
al_dbg("PCIe_%d: <<<<<<<<< Link up >>>>>>>>>\n", pcie_port->port_id);
return 0;
} else
al_dbg("PCIe_%d: No link up, %d attempts remaining\n",
@ -1920,7 +1974,7 @@ al_pcie_link_up_wait(struct al_pcie_port *pcie_port, uint32_t timeout_ms)
al_udelay(AL_PCIE_LINKUP_WAIT_INTERVAL);
}
al_info("PCIE_%d: link is not established in time\n",
al_dbg("PCIE_%d: link is not established in time\n",
pcie_port->port_id);
return ETIMEDOUT;
@ -1936,6 +1990,15 @@ al_pcie_link_status(struct al_pcie_port *pcie_port,
al_assert(status);
if (!al_pcie_port_is_enabled(pcie_port)) {
al_dbg("PCIe %d: port not enabled, no link.\n", pcie_port->port_id);
status->link_up = AL_FALSE;
status->speed = AL_PCIE_LINK_SPEED_DEFAULT;
status->lanes = 0;
status->ltssm_state = 0;
return 0;
}
status->link_up = al_pcie_check_link(pcie_port, &status->ltssm_state);
if (!status->link_up) {
@ -1962,7 +2025,7 @@ al_pcie_link_status(struct al_pcie_port *pcie_port,
pcie_port->port_id, pcie_lnksta);
}
status->lanes = (pcie_lnksta & AL_PCI_EXP_LNKSTA_NLW) >> AL_PCI_EXP_LNKSTA_NLW_SHIFT;
al_info("PCIe %d: Link up. speed gen%d negotiated width %d\n",
al_dbg("PCIe %d: Link up. speed gen%d negotiated width %d\n",
pcie_port->port_id, status->speed, status->lanes);
return 0;
@ -2143,7 +2206,7 @@ al_pcie_port_snoop_config(struct al_pcie_port *pcie_port, al_bool enable_axi_sno
struct al_pcie_regs *regs = pcie_port->regs;
/* Set snoop mode */
al_info("PCIE_%d: snoop mode %s\n",
al_dbg("PCIE_%d: snoop mode %s\n",
pcie_port->port_id, enable_axi_snoop ? "enable" : "disable");
if (enable_axi_snoop) {
@ -2311,6 +2374,19 @@ al_pcie_app_req_retry_set(
mask, (en == AL_TRUE) ? mask : 0);
}
/* Check if deferring incoming configuration requests is enabled or not */
al_bool al_pcie_app_req_retry_get_status(struct al_pcie_port *pcie_port)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint32_t pm_control;
uint32_t mask = (pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
PCIE_W_REV3_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN :
PCIE_W_REV1_2_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN;
pm_control = al_reg_read32(regs->app.global_ctrl.pm_control);
return (pm_control & mask) ? AL_TRUE : AL_FALSE;
}
/*************** Internal Address Translation Unit (ATU) API ******************/
/** program internal ATU region entry */
@ -2345,6 +2421,7 @@ al_pcie_atu_region_set(
if (!atu_region->enforce_ob_atu_region_set) {
al_err("PCIe %d: setting OB iATU after link is started is not allowed\n",
pcie_port->port_id);
al_assert(AL_FALSE);
return -EINVAL;
} else {
al_info("PCIe %d: setting OB iATU even after link is started\n",
@ -2369,7 +2446,63 @@ al_pcie_atu_region_set(
/* configure the limit, not needed when working in BAR match mode */
if (atu_region->match_mode == 0) {
uint32_t limit_reg_val;
if (pcie_port->rev_id > AL_PCIE_REV_ID_0) {
uint32_t *limit_ext_reg =
(atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND) ?
&regs->app.atu.out_mask_pair[atu_region->index / 2] :
&regs->app.atu.in_mask_pair[atu_region->index / 2];
uint32_t limit_ext_reg_mask =
(atu_region->index % 2) ?
PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_MASK :
PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_MASK;
unsigned int limit_ext_reg_shift =
(atu_region->index % 2) ?
PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_SHIFT :
PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_SHIFT;
uint64_t limit_sz_msk =
atu_region->limit - atu_region->base_addr;
uint32_t limit_ext_reg_val = (uint32_t)(((limit_sz_msk) >>
32) & 0xFFFFFFFF);
if (limit_ext_reg_val) {
limit_reg_val = (uint32_t)((limit_sz_msk) & 0xFFFFFFFF);
al_assert(limit_reg_val == 0xFFFFFFFF);
} else {
limit_reg_val = (uint32_t)(atu_region->limit &
0xFFFFFFFF);
}
al_reg_write32_masked(
limit_ext_reg,
limit_ext_reg_mask,
limit_ext_reg_val << limit_ext_reg_shift);
al_reg_write32(&regs->port_regs->iatu.limit_addr,
limit_reg_val);
}
/**
* Addressing RMN: 3186
*
* RMN description:
* Bug in SNPS IP (versions 4.21 , 4.10a-ea02)
* In CFG request created via outbound atu (shift mode) bits [27:12] go to
* [31:16] , the shifting is correct , however the ATU leaves bit [15:12]
* to their original values, this is then transmited in the tlp .
* Those bits are currently reserved ,bit might be non-resv. in future generations .
*
* Software flow:
* Enable HW fix
* rev=REV1,REV2 set bit 15 in corresponding app_reg.atu.out_mask
* rev>REV2 set corresponding bit is app_reg.atu.reg_out_mask
*/
if ((atu_region->cfg_shift_mode == AL_TRUE) &&
(atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND)) {
if (pcie_port->rev_id > AL_PCIE_REV_ID_2) {
al_reg_write32_masked(regs->app.atu.reg_out_mask,
1 << (atu_region->index) ,
1 << (atu_region->index));
} else {
uint32_t *limit_ext_reg =
(atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND) ?
&regs->app.atu.out_mask_pair[atu_region->index / 2] :
@ -2382,29 +2515,12 @@ al_pcie_atu_region_set(
(atu_region->index % 2) ?
PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_SHIFT :
PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_SHIFT;
uint64_t limit_sz_msk =
atu_region->limit - atu_region->base_addr;
uint32_t limit_ext_reg_val = (uint32_t)(((limit_sz_msk) >>
32) & 0xFFFFFFFF);
if (limit_ext_reg_val) {
limit_reg_val = (uint32_t)((limit_sz_msk) & 0xFFFFFFFF);
al_assert(limit_reg_val == 0xFFFFFFFF);
} else {
limit_reg_val = (uint32_t)(atu_region->limit &
0xFFFFFFFF);
}
al_reg_write32_masked(
limit_ext_reg,
limit_ext_reg_mask,
limit_ext_reg_val << limit_ext_reg_shift);
} else {
limit_reg_val = (uint32_t)(atu_region->limit & 0xFFFFFFFF);
limit_ext_reg,
limit_ext_reg_mask,
(AL_BIT(15)) << limit_ext_reg_shift);
}
al_reg_write32(&regs->port_regs->iatu.limit_addr,
limit_reg_val);
}
reg = 0;
@ -2505,7 +2621,22 @@ al_pcie_axi_io_config(
PCIE_AXI_CTRL_SLV_CTRL_IO_BAR_EN);
}
/************** Interrupt generation (Endpoint mode Only) API *****************/
/************** Interrupt and Event generation (Endpoint mode Only) API *****************/
int al_pcie_pf_flr_done_gen(struct al_pcie_pf *pcie_pf)
{
struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
unsigned int pf_num = pcie_pf->pf_num;
al_reg_write32_masked(regs->app.global_ctrl.events_gen[pf_num],
PCIE_W_GLOBAL_CTRL_EVENTS_GEN_FLR_PF_DONE,
PCIE_W_GLOBAL_CTRL_EVENTS_GEN_FLR_PF_DONE);
al_udelay(AL_PCIE_FLR_DONE_INTERVAL);
al_reg_write32_masked(regs->app.global_ctrl.events_gen[pf_num],
PCIE_W_GLOBAL_CTRL_EVENTS_GEN_FLR_PF_DONE, 0);
return 0;
}
/** generate INTx Assert/DeAssert Message */
int
@ -2607,15 +2738,16 @@ al_pcie_msix_masked(struct al_pcie_pf *pcie_pf)
}
/******************** Advanced Error Reporting (AER) API **********************/
/** configure AER capability */
int
al_pcie_aer_config(
struct al_pcie_pf *pcie_pf,
struct al_pcie_aer_params *params)
/************************* Auxiliary functions ********************************/
/* configure AER capability */
static int
al_pcie_aer_config_aux(
struct al_pcie_port *pcie_port,
unsigned int pf_num,
struct al_pcie_aer_params *params)
{
struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pcie_pf->pf_num].aer;
struct al_pcie_regs *regs = pcie_port->regs;
struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pf_num].aer;
uint32_t reg_val;
reg_val = al_reg_read32(&aer_regs->header);
@ -2641,8 +2773,22 @@ al_pcie_aer_config(
(params->ecrc_gen_en ? PCIE_AER_CTRL_STAT_ECRC_GEN_EN : 0) |
(params->ecrc_chk_en ? PCIE_AER_CTRL_STAT_ECRC_CHK_EN : 0));
/**
* Addressing RMN: 5119
*
* RMN description:
* ECRC generation for outbound request translated by iATU is effected
* by iATU setting instead of ecrc_gen_bit in AER
*
* Software flow:
* When enabling ECRC generation, set the outbound iATU to generate ECRC
*/
if (params->ecrc_gen_en == AL_TRUE) {
al_pcie_ecrc_gen_ob_atu_enable(pcie_port, pf_num);
}
al_reg_write32_masked(
regs->core_space[pcie_pf->pf_num].pcie_dev_ctrl_status,
regs->core_space[pf_num].pcie_dev_ctrl_status,
PCIE_PORT_DEV_CTRL_STATUS_CORR_ERR_REPORT_EN |
PCIE_PORT_DEV_CTRL_STATUS_NON_FTL_ERR_REPORT_EN |
PCIE_PORT_DEV_CTRL_STATUS_FTL_ERR_REPORT_EN |
@ -2663,12 +2809,14 @@ al_pcie_aer_config(
return 0;
}
/** AER uncorretable errors get and clear */
unsigned int
al_pcie_aer_uncorr_get_and_clear(struct al_pcie_pf *pcie_pf)
/** AER uncorrectable errors get and clear */
static unsigned int
al_pcie_aer_uncorr_get_and_clear_aux(
struct al_pcie_port *pcie_port,
unsigned int pf_num)
{
struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pcie_pf->pf_num].aer;
struct al_pcie_regs *regs = pcie_port->regs;
struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pf_num].aer;
uint32_t reg_val;
reg_val = al_reg_read32(&aer_regs->uncorr_err_stat);
@ -2677,12 +2825,14 @@ al_pcie_aer_uncorr_get_and_clear(struct al_pcie_pf *pcie_pf)
return reg_val;
}
/** AER corretable errors get and clear */
unsigned int
al_pcie_aer_corr_get_and_clear(struct al_pcie_pf *pcie_pf)
/** AER correctable errors get and clear */
static unsigned int
al_pcie_aer_corr_get_and_clear_aux(
struct al_pcie_port *pcie_port,
unsigned int pf_num)
{
struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pcie_pf->pf_num].aer;
struct al_pcie_regs *regs = pcie_port->regs;
struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pf_num].aer;
uint32_t reg_val;
reg_val = al_reg_read32(&aer_regs->corr_err_stat);
@ -2696,19 +2846,123 @@ al_pcie_aer_corr_get_and_clear(struct al_pcie_pf *pcie_pf)
#endif
/** AER get the header for the TLP corresponding to a detected error */
void
al_pcie_aer_err_tlp_hdr_get(
struct al_pcie_pf *pcie_pf,
static void
al_pcie_aer_err_tlp_hdr_get_aux(
struct al_pcie_port *pcie_port,
unsigned int pf_num,
uint32_t hdr[AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS])
{
struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pcie_pf->pf_num].aer;
struct al_pcie_regs *regs = pcie_port->regs;
struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pf_num].aer;
int i;
for (i = 0; i < AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS; i++)
hdr[i] = al_reg_read32(&aer_regs->header_log[i]);
}
/******************** EP AER functions **********************/
/** configure EP physical function AER capability */
int al_pcie_aer_config(
struct al_pcie_pf *pcie_pf,
struct al_pcie_aer_params *params)
{
al_assert(pcie_pf);
al_assert(params);
return al_pcie_aer_config_aux(
pcie_pf->pcie_port, pcie_pf->pf_num, params);
}
/** EP physical function AER uncorrectable errors get and clear */
unsigned int al_pcie_aer_uncorr_get_and_clear(struct al_pcie_pf *pcie_pf)
{
al_assert(pcie_pf);
return al_pcie_aer_uncorr_get_and_clear_aux(
pcie_pf->pcie_port, pcie_pf->pf_num);
}
/** EP physical function AER correctable errors get and clear */
unsigned int al_pcie_aer_corr_get_and_clear(struct al_pcie_pf *pcie_pf)
{
al_assert(pcie_pf);
return al_pcie_aer_corr_get_and_clear_aux(
pcie_pf->pcie_port, pcie_pf->pf_num);
}
/**
* EP physical function AER get the header for
* the TLP corresponding to a detected error
* */
void al_pcie_aer_err_tlp_hdr_get(
struct al_pcie_pf *pcie_pf,
uint32_t hdr[AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS])
{
al_assert(pcie_pf);
al_assert(hdr);
al_pcie_aer_err_tlp_hdr_get_aux(
pcie_pf->pcie_port, pcie_pf->pf_num, hdr);
}
/******************** RC AER functions **********************/
/** configure RC port AER capability */
int al_pcie_port_aer_config(
struct al_pcie_port *pcie_port,
struct al_pcie_aer_params *params)
{
al_assert(pcie_port);
al_assert(params);
/**
* For RC mode there's no PFs (neither PF handles),
* therefore PF#0 is used
* */
return al_pcie_aer_config_aux(pcie_port, 0, params);
}
/** RC port AER uncorrectable errors get and clear */
unsigned int al_pcie_port_aer_uncorr_get_and_clear(
struct al_pcie_port *pcie_port)
{
al_assert(pcie_port);
/**
* For RC mode there's no PFs (neither PF handles),
* therefore PF#0 is used
* */
return al_pcie_aer_uncorr_get_and_clear_aux(pcie_port, 0);
}
/** RC port AER correctable errors get and clear */
unsigned int al_pcie_port_aer_corr_get_and_clear(
struct al_pcie_port *pcie_port)
{
al_assert(pcie_port);
/**
* For RC mode there's no PFs (neither PF handles),
* therefore PF#0 is used
* */
return al_pcie_aer_corr_get_and_clear_aux(pcie_port, 0);
}
/** RC port AER get the header for the TLP corresponding to a detected error */
void al_pcie_port_aer_err_tlp_hdr_get(
struct al_pcie_port *pcie_port,
uint32_t hdr[AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS])
{
al_assert(pcie_port);
al_assert(hdr);
/**
* For RC mode there's no PFs (neither PF handles),
* therefore PF#0 is used
* */
al_pcie_aer_err_tlp_hdr_get_aux(pcie_port, 0, hdr);
}
/********************** Loopback mode (RC and Endpoint modes) ************/
/** enter local pipe loopback mode */

View File

@ -85,7 +85,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* - Root Complex mode
* - Set the Max Link Speed to Gen2
* - Set the max lanes width to 2 (x2)
* - Disable reversal mode
* - Enable Snoops to support I/O Hardware cache coherency
* - Enable pcie core RAM parity
* - Enable pcie core AXI parity
@ -97,7 +96,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* @code
* - struct al_pcie_link_params link_params = {
* AL_PCIE_LINK_SPEED_GEN2,
* AL_FALSE, // disable reversal mode
* AL_PCIE_MPS_DEFAULT};
*
* - struct al_pcie_port_config_params config_params = {
@ -162,14 +160,29 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/********************************* Constants **********************************/
/******************************************************************************/
/** Inbound header credits sum - rev 0/1/2 */
#define AL_PCIE_REV_1_2_IB_HCRD_SUM 97
/** Inbound header credits sum - rev 3 */
#define AL_PCIE_REV3_IB_HCRD_SUM 259
/**
* PCIe Core revision IDs:
* ID_1: Alpine V1
* ID_2: Alpine V2 x4
* ID_3: Alpine V2 x8
*/
#define AL_PCIE_REV_ID_1 1
#define AL_PCIE_REV_ID_2 2
#define AL_PCIE_REV_ID_3 3
/** Number of extended registers */
#define AL_PCIE_EX_REGS_NUM 40
/*******************************************************************************
* The inbound flow control for headers is programmable per P, NP and CPL
* transactions types. The following parameters define the total number of
* available header flow controls for all types.
******************************************************************************/
/** Inbound header credits sum - rev1/2 */
#define AL_PCIE_REV_1_2_IB_HCRD_SUM 97
/** Inbound header credits sum - rev3 */
#define AL_PCIE_REV3_IB_HCRD_SUM 259
/*******************************************************************************
* PCIe AER uncorrectable error bits
* To be used with the following functions:
@ -232,9 +245,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/**
* al_pcie_ib_hcrd_config: data structure internally used in order to config
* inbound posted/non-posted parameters.
* Note: it's required to have this structure in pcie_port handle since it has
* a state (required/not-required) which is determined by outbound
* outstanding configuration
* Note: this is a private member in pcie_port handle and MUST NOT be modified
* by the user.
*/
struct al_pcie_ib_hcrd_config {
/* Internally used - see 'al_pcie_ib_hcrd_os_ob_reads_config' */
@ -251,10 +263,6 @@ enum al_pcie_max_payload_size {
AL_PCIE_MPS_DEFAULT,
AL_PCIE_MPS_128 = 0,
AL_PCIE_MPS_256 = 1,
AL_PCIE_MPS_512 = 2,
AL_PCIE_MPS_1024 = 3,
AL_PCIE_MPS_2048 = 4,
AL_PCIE_MPS_4096 = 5,
};
/**
@ -271,10 +279,12 @@ struct al_pcie_port {
void *ex_regs;
void __iomem *pbs_regs;
/* Revision ID */
/* Rev ID */
uint8_t rev_id;
unsigned int port_id;
uint8_t max_lanes;
/* For EP mode only */
uint8_t max_num_of_pfs;
/* Internally used */
@ -284,6 +294,8 @@ struct al_pcie_port {
/**
* al_pcie_pf: the pf handle, a data structure used to handle PF specific
* functionality. Initialized using "al_pcie_pf_handle_init()"
*
* Note: This structure should be used for EP mode only
*/
struct al_pcie_pf {
unsigned int pf_num;
@ -318,15 +330,13 @@ struct al_pcie_max_capability {
al_bool root_complex_mode_supported;
enum al_pcie_link_speed max_speed;
uint8_t max_lanes;
al_bool reversal_supported;
uint8_t atu_regions_num;
uint32_t atu_min_size;
uint32_t atu_min_size; /* Size granularity: 4 Kbytes */
};
/** PCIe link related parameters */
struct al_pcie_link_params {
enum al_pcie_link_speed max_speed;
al_bool enable_reversal;
enum al_pcie_link_speed max_speed;
enum al_pcie_max_payload_size max_payload_size;
};
@ -362,22 +372,22 @@ struct al_pcie_gen3_params {
uint8_t local_fs; /* Low Frequency (LF) Value for Gen3 Transmit Equalization */
};
/** Transport Layer credits parameters */
struct al_pcie_tl_credits_params {
};
/** Various configuration features */
struct al_pcie_features {
/**
* Enable MSI fix from the SATA to the PCIe EP
* Only valid for port 0, when enabled as EP
*/
al_bool sata_ep_msi_fix;
};
/**
* Inbound posted/non-posted header credits and outstanding outbound reads
* completion header configuration
* completion header configuration.
*
* This structure controls the resource partitioning of an important resource in
* the PCIe port. This resource includes the PCIe TLP headers coming on the PCIe
* port, and is shared between three types:
* - Inbound Non-posted, which are PCIe Reads as well as PCIe Config Cycles
* - Inbound Posted, i.e. PCIe Writes
* - Inbound Read-completion, which are the completions matching and outbound
* reads issued previously by the same core.
* The programmer need to take into consideration that a given outbound read
* request could be split on the return path into Ceiling[MPS_Size / 64] + 1
* of Read Completions.
* Programmers are not expected to modify these setting except for rare cases,
* where a different ratio between Posted-Writes and Read-Completions is desired
*
* Constraints:
* - nof_cpl_hdr + nof_np_hdr + nof_p_hdr ==
@ -411,13 +421,26 @@ struct al_pcie_ib_hcrd_os_ob_reads_config {
unsigned int nof_p_hdr;
};
/** PCIe Ack/Nak Latency and Replay timers */
/**
* PCIe Ack/Nak Latency and Replay timers
*
* Note: Programmer is not expected to modify these values unless working in
* very slow external devices like low-end FPGA or hardware devices
* emulated in software
*/
struct al_pcie_latency_replay_timers {
uint16_t round_trip_lat_limit;
uint16_t replay_timer_limit;
};
/* SRIS KP counter values */
/**
* SRIS KP counter values
*
* Description: SRIS is PCI SIG ECN, that enables the two peers on a given PCIe
* link to run with Separate Reference clock with Independent Spread spectrum
* clock and requires inserting PCIe SKP symbols on the link in faster frequency
* that original PCIe spec
*/
struct al_pcie_sris_params {
/** set to AL_TRUE to use defaults and ignore the other parameters */
al_bool use_defaults;
@ -425,7 +448,23 @@ struct al_pcie_sris_params {
uint16_t kp_counter_gen21;
};
/** Relaxed ordering params */
/**
* Relaxed ordering params
* Enable ordering relaxations for applications that does not require
* enforcement of 'completion must not bypass posted' ordering rule.
*
* Recommendation:
* - For downstream port, set enable_tx_relaxed_ordering
* - For upstream port
* - set enable_rx_relaxed_ordering
* - set enable tx_relaxed_ordering for emulated EP.
*
* Defaults:
* - For Root-Complex:
* - tx_relaxed_ordering = AL_FALSE, rx_relaxed_ordering = AL_TRUE
* - For End-Point:
* - tx_relaxed_ordering = AL_TRUE, rx_relaxed_ordering = AL_FALSE
*/
struct al_pcie_relaxed_ordering_params {
al_bool enable_tx_relaxed_ordering;
al_bool enable_rx_relaxed_ordering;
@ -445,20 +484,26 @@ struct al_pcie_port_config_params {
struct al_pcie_latency_replay_timers *lat_rply_timers;
struct al_pcie_gen2_params *gen2_params;
struct al_pcie_gen3_params *gen3_params;
struct al_pcie_tl_credits_params *tl_credits;
struct al_pcie_features *features;
/* Sets all internal timers to Fast Mode for speeding up simulation.*/
/*
* Sets all internal timers to Fast Mode for speeding up simulation.
* this varible should be set always to AL_FALSE unless user is running
* on simulation setup
*/
al_bool fast_link_mode;
/*
* when true, the PCI unit will return Slave Error/Decoding Error to the master unit in case
* of error. when false, the value 0xFFFFFFFF will be returned without error indication.
* when true, the PCI unit will return Slave Error/Decoding Error to any
* I/O Fabric master or Internal Processors in case of error.
* when false, the value 0xFFFFFFFF will be returned without error indication.
*/
al_bool enable_axi_slave_err_resp;
struct al_pcie_sris_params *sris_params;
struct al_pcie_relaxed_ordering_params *relaxed_ordering_params;
};
/** BAR register configuration parameters (Endpoint Mode only) */
/**
* BAR register configuration parameters
* Note: This structure should be used for EP mode only
*/
struct al_pcie_ep_bar_params {
al_bool enable;
al_bool memory_space; /**< memory or io */
@ -467,12 +512,30 @@ struct al_pcie_ep_bar_params {
uint64_t size; /* the bar size in bytes */
};
/** PF config params (EP mode only) */
/**
* PF config params (EP mode only)
* Note: This structure should be used for EP mode only
*/
struct al_pcie_pf_config_params {
/**
* disable advertising D1 and D3hot state
* Recommended to be AL_TRUE
*/
al_bool cap_d1_d3hot_dis;
/**
* disable advertising support for Function-Level-Reset
* Recommended to be AL_FALSE
*/
al_bool cap_flr_dis;
/*
* disable advertising Advanced power management states
*/
al_bool cap_aspm_dis;
al_bool bar_params_valid;
/*
* Note: only bar_params[0], [2] and [4] can have memory_64_bit enabled
* and in such case, the next bar ([1], [3], or [5] respectively) is not used
*/
struct al_pcie_ep_bar_params bar_params[6];
struct al_pcie_ep_bar_params exp_bar_params;/* expansion ROM BAR*/
};
@ -481,7 +544,7 @@ struct al_pcie_pf_config_params {
struct al_pcie_link_status {
al_bool link_up;
enum al_pcie_link_speed speed;
uint8_t lanes;
uint8_t lanes; /* Number of lanes */
uint8_t ltssm_state;
};
@ -491,18 +554,26 @@ struct al_pcie_lane_status {
enum al_pcie_link_speed requested_speed;
};
/** PCIe MSIX capability configuration parameters */
/**
* PCIe MSIX capability configuration parameters
* Note: This structure should be used for EP mode only
*/
struct al_pcie_msix_params {
/* Number of entries - size can be up to: 2024 */
uint16_t table_size;
uint16_t table_offset;
uint8_t table_bar;
uint16_t pba_offset;
/* which bar to use when calculating the PBA table address and adding offset to */
uint16_t pba_bar;
};
/** PCIE AER capability parameters */
struct al_pcie_aer_params {
/** ECRC Generation Enable */
/** ECRC Generation Enable
* while this feature is powerful, all known Chip-sets and processors
* do not support it as of 2015
*/
al_bool ecrc_gen_en;
/** ECRC Check Enable */
al_bool ecrc_chk_en;
@ -562,6 +633,13 @@ int al_pcie_pf_handle_init(
struct al_pcie_port *pcie_port,
unsigned int pf_num);
/**
* Get port revision ID
* @param pcie_port pcie port handle
* @return Port rev_id
*/
int al_pcie_port_rev_id_get(struct al_pcie_port *pcie_port);
/************************** Pre PCIe Port Enable API **************************/
/**
@ -582,7 +660,8 @@ int al_pcie_port_operating_mode_config(struct al_pcie_port *pcie_port,
* This function can be called only before enabling the controller using al_pcie_port_enable().
*
* @param pcie_port pcie port handle
* @param lanes number of lanes
* @param lanes number of lanes (must be 1,2,4,8,16 and not any other value)
*
* Note: this function must be called before any al_pcie_port_config() calls
*
* @return 0 if no error found.
@ -593,7 +672,12 @@ int al_pcie_port_max_lanes_set(struct al_pcie_port *pcie_port, uint8_t lanes);
* Set maximum physical function numbers
* @param pcie_port pcie port handle
* @param max_num_of_pfs number of physical functions
* Note: this function must be called before any al_pcie_pf_config() calls
*
* Notes:
* - this function must be called before any al_pcie_pf_config() calls
* - exposed on a given PCIe Endpoint port
* - PCIe rev1/rev2 supports only single Endpoint
* - PCIe rev3 can support up to 4
*/
int al_pcie_port_max_num_of_pfs_set(
struct al_pcie_port *pcie_port,
@ -619,9 +703,27 @@ int al_pcie_port_ib_hcrd_os_ob_reads_config(
enum al_pcie_operating_mode al_pcie_operating_mode_get(
struct al_pcie_port *pcie_port);
/**
* PCIe AXI quality of service configuration
*
* @param pcie_port
* Initialized PCIe port handle
* @param arqos
* AXI read quality of service (0 - 15)
* @param awqos
* AXI write quality of service (0 - 15)
*/
void al_pcie_axi_qos_config(
struct al_pcie_port *pcie_port,
unsigned int arqos,
unsigned int awqos);
/**************************** PCIe Port Enable API ****************************/
/** Enable PCIe unit (deassert reset)
/**
* Enable PCIe unit (deassert reset)
* This function only enables the port, without any configuration/link
* functionality. Should be called before starting any configuration/link API
*
* @param pcie_port pcie port handle
*
@ -637,6 +739,8 @@ void al_pcie_port_disable(struct al_pcie_port *pcie_port);
/**
* Port memory shutdown/up
* Memory shutdown should be called for an unused ports for power-saving
*
* Caution: This function can be called only when the controller is disabled
*
* @param pcie_port pcie port handle
@ -669,7 +773,7 @@ int al_pcie_port_config(struct al_pcie_port *pcie_port,
const struct al_pcie_port_config_params *params);
/**
* @brief Configure a specific PF (EP params, sriov params, ...)
* @brief Configure a specific PF
* this function must be called before any datapath transactions
*
* @param pcie_pf pcie pf handle
@ -685,7 +789,8 @@ int al_pcie_pf_config(
/**
* @brief start pcie link
*
* This function starts the link and should be called only after port is enabled
* and pre port-enable and configurations are done
* @param pcie_port pcie port handle
*
* @return 0 if no error found
@ -701,6 +806,14 @@ int al_pcie_link_start(struct al_pcie_port *pcie_port);
*/
int al_pcie_link_stop(struct al_pcie_port *pcie_port);
/**
* @brief check if pcie link is started
* Note that this function checks if link is started rather than link is up
* @param pcie_port pcie port handle
* @return AL_TRUE if link is started and AL_FALSE otherwise
*/
al_bool al_pcie_is_link_started(struct al_pcie_port *pcie_port);
/**
* @brief trigger link-disable
*
@ -753,6 +866,10 @@ void al_pcie_lane_status_get(
/**
* @brief trigger hot reset
* this function initiates In-Band reset while link is up.
* to initiate hot reset: call this function with AL_TRUE
* to exit from hos reset: call this function with AL_FALSE
* Note: This function should be called in RC mode only
*
* @param pcie_port pcie port handle
* @param enable AL_TRUE to enable hot-reset and AL_FALSE to disable it
@ -766,6 +883,7 @@ int al_pcie_link_hot_reset(struct al_pcie_port *pcie_port, al_bool enable);
* this function initiates Link retraining by directing the Physical Layer LTSSM
* to the Recovery state. If the LTSSM is already in Recovery or Configuration,
* re-entering Recovery is permitted but not required.
* Note: This function should be called in RC mode only
* @param pcie_port pcie port handle
*
@ -793,7 +911,9 @@ int al_pcie_link_change_width(struct al_pcie_port *pcie_port, uint8_t width);
/************************** Snoop Configuration API ***************************/
/**
* @brief configure pcie port axi snoop
* @brief configure pcie port axi snoop
* This enable the inbound PCIe posted write data or the Read completion data to
* snoop the internal processor caches for I/O cache coherency
*
* @param pcie_port pcie port handle
* @param enable_axi_snoop enable snoop.
@ -807,7 +927,10 @@ int al_pcie_port_snoop_config(struct al_pcie_port *pcie_port,
/************************** Configuration Space API ***************************/
/**
* Configuration Space Access Through PCI-E_ECAM_Ext PASW (RC mode only)
* Configuration Space Access Through PCI-E_ECAM_Ext PASW
* This feature enables the internal processors to generate configuration cycles
* on the PCIe ports by writing to part of the processor memory space marked by
* the PCI-E_EXCAM_Ext address window
*/
/**
@ -852,6 +975,11 @@ void al_pcie_local_cfg_space_write(
/**
* @brief set target_bus and mask_target_bus
*
* Call this function with target_bus set to the required bus of the next
* outbound config access to be issued. No need to call that function if the
* next config access bus equals to the last one.
*
* @param pcie_port pcie port handle
* @param target_bus
* @param mask_target_bus
@ -875,6 +1003,8 @@ int al_pcie_target_bus_get(struct al_pcie_port *pcie_port,
/**
* Set secondary bus number
*
* Same as al_pcie_target_bus_set but with secondary bus
*
* @param pcie_port pcie port handle
* @param secbus pci secondary bus number
*
@ -885,6 +1015,8 @@ int al_pcie_secondary_bus_set(struct al_pcie_port *pcie_port, uint8_t secbus);
/**
* Set subordinary bus number
*
* Same as al_pcie_target_bus_set but with subordinary bus
*
* @param pcie_port pcie port handle
* @param subbus the highest bus number of all of the buses that can be reached
* downstream of the PCIE instance.
@ -897,13 +1029,22 @@ int al_pcie_subordinary_bus_set(struct al_pcie_port *pcie_port,uint8_t subbus);
* @brief Enable/disable deferring incoming configuration requests until
* initialization is complete. When enabled, the core completes incoming
* configuration requests with a Configuration Request Retry Status.
* Other incoming Requests complete with Unsupported Request status.
* Other incoming non-configuration Requests complete with Unsupported Request status.
*
* Note: This function should be used for EP mode only
*
* @param pcie_port pcie port handle
* @param en enable/disable
*/
void al_pcie_app_req_retry_set(struct al_pcie_port *pcie_port, al_bool en);
/**
* @brief Check if deferring incoming configuration requests is enabled or not
* @param pcie_port pcie port handle
* @return AL_TRUE is it's enabled and AL_FALSE otherwise
*/
al_bool al_pcie_app_req_retry_get_status(struct al_pcie_port *pcie_port);
/*************** Internal Address Translation Unit (ATU) API ******************/
enum al_pcie_atu_dir {
@ -911,6 +1052,7 @@ enum al_pcie_atu_dir {
AL_PCIE_ATU_DIR_INBOUND = 1,
};
/** decoding of the PCIe TLP Type as appears on the wire */
enum al_pcie_atu_tlp {
AL_PCIE_TLP_TYPE_MEM = 0,
AL_PCIE_TLP_TYPE_IO = 2,
@ -920,57 +1062,134 @@ enum al_pcie_atu_tlp {
AL_PCIE_TLP_TYPE_RESERVED = 0x1f
};
/** default response types */
enum al_pcie_atu_response {
AL_PCIE_RESPONSE_NORMAL = 0,
AL_PCIE_RESPONSE_UR = 1,
AL_PCIE_RESPONSE_CA = 2
AL_PCIE_RESPONSE_UR = 1, /* UR == Unsupported Request */
AL_PCIE_RESPONSE_CA = 2 /* CA == Completion Abort */
};
struct al_pcie_atu_region {
/**********************************************************************
* General Parameters *
**********************************************************************/
al_bool enable;
/* outbound or inbound */
enum al_pcie_atu_dir direction;
/* region index */
uint8_t index;
/* the 64-bit address that get matched with the 64-bit address incoming
* on the PCIe TLP
*/
uint64_t base_addr;
/** limit marks the region's end address. only bits [39:0] are valid
* given the Alpine PoC maximum physical address space
/**
* limit marks the region's end address.
* For Alpine V1 (PCIe rev1): only bits [39:0] are valid
* For Alpine V2 (PCIe rev2/rev3): only bits [47:0] are valid
* an access is a hit in iATU if the:
* - address >= base_addr
* - address <= base_addr + limit
*/
uint64_t limit;
/** the address that matches will be translated to this address + offset
/**
* the address that matches (hit) will be translated to:
* target_addr + offset
*
* Exmaple: accessing (base_addr + 0x1000) will be translated to:
* (target_addr + 0x1000) in case limit >= 0x1000
*/
uint64_t target_addr;
/**
* When the Invert feature is activated, an address match occurs when
* the untranslated address is not in the region bounded by the Base
* address and Limit address. Match occurs when the untranslated address
* is not in the region bounded by the base address and limit address
*/
al_bool invert_matching;
/* pcie tlp type*/
/**
* PCIe TLP type
* Can be: Mem, IO, CGF0, CFG1 or MSG
*/
enum al_pcie_atu_tlp tlp_type;
/* pcie frame header attr field*/
/**
* PCIe frame header attr field.
* When the address of a TLP is matched to this region, then the ATTR
* field of the TLP is changed to the value in this register.
*/
uint8_t attr;
/**********************************************************************
* Outbound specific Parameters *
**********************************************************************/
/**
* outbound specific params
* PCIe Message code
* MSG TLPs (Message Code). When the address of an outbound TLP is
* matched to this region, and the translated TLP TYPE field is Msg
* then the message field of the TLP is changed to the value in this
* register.
*/
/* pcie message code */
uint8_t msg_code;
al_bool cfg_shift_mode;
/**
* inbound specific params
* CFG Shift Mode. This is useful for CFG transactions where the PCIe
* configuration mechanism maps bits [27:12] of the address to the
* bus/device and function number. This allows a CFG configuration space
* to be located in any 256MB window of your application memory space
* using a 28-bit effective address.Shifts bits [27:12] of the
* untranslated address to form bits [31:16] of the translated address.
*/
al_bool cfg_shift_mode;
/**********************************************************************
* Inbound specific Parameters *
**********************************************************************/
uint8_t bar_number;
/* BAR match mode, used in EP for MEM and IO tlps*/
/**
* Match Mode. Determines Inbound matching mode for TLPs. The mode
* depends on the type of TLP that is received as follows:
* MEM-I/O: 0 = Address Match Mode
* 1 = BAR Match Mode
* CFG0 : 0 = Routing ID Match Mode
* 1 = Accept Mode
* MSG : 0 = Address Match Mode
* 1 = Vendor ID Match Mode
*/
uint8_t match_mode;
/**
* For outbound: enables taking the function number of the translated
* TLP from the PCIe core. For inbound: enables ATU function match mode
* For outbound:
* - AL_TRUE : enables taking the function number of the translated TLP
* from the PCIe core
* - AL_FALSE: no function number is taken from PCIe core
* For inbound:
* - AL_TRUE : enables ATU function match mode
* - AL_FALSE: no function match mode applied to transactions
*
* Note: this boolean is ignored in RC mode
*/
al_bool function_match_bypass_mode;
/**
* The function number to match/bypass (see previous parameter)
* Note: this parameter is ignored when previous param is FALSE
* Note: this parameter is ignored when previous parameter is AL_FALSE
*/
uint8_t function_match_bypass_mode_number;
/* response code */
/**
* setting up what is the default response for an inbound transaction
* that matches the iATU
*/
enum al_pcie_atu_response response;
/**
* Attr Match Enable. Ensures that a successful AT TLP field comparison
* match (see attr above) occurs for address translation to proceed
*/
al_bool enable_attr_match_mode;
/**
* Message Code Match Enable(Msg TLPS). Ensures that a successful
* message Code TLP field comparison match (see Message msg_code)occurs
* (in MSG transactions) for address translation to proceed.
*/
al_bool enable_msg_match_mode;
/**
* USE WITH CAUTION: setting this boolean to AL_TRUE allows setting the
@ -1008,7 +1227,11 @@ void al_pcie_atu_region_get_fields(
/**
* @brief Configure axi io bar.
* every hit to this bar will override size to 4 bytes.
*
* This is an EP feature, enabling PCIe IO transaction to be captured if it fits
* within start and end address, and then mapped to internal 4-byte
* memRead/memWrite. Every hit to this bar will override size to 4 bytes.
*
* @param pcie_port pcie port handle
* @param start the first address of the memory
* @param end the last address of the memory
@ -1028,6 +1251,13 @@ enum al_pcie_legacy_int_type{
AL_PCIE_LEGACY_INTD
};
/* @brief generate FLR_PF_DONE message
* @param pcie_pf pcie pf handle
* @return 0 if no error found
*/
int al_pcie_pf_flr_done_gen(struct al_pcie_pf *pcie_pf);
/**
* @brief generate INTx Assert/DeAssert Message
* @param pcie_pf pcie pf handle
@ -1075,7 +1305,7 @@ al_bool al_pcie_msix_masked(struct al_pcie_pf *pcie_pf);
/******************** Advanced Error Reporting (AER) API **********************/
/**
* @brief configure AER capability
* @brief configure EP physical function AER capability
* @param pcie_pf pcie pf handle
* @param params AER capability configuration parameters
* @return 0 if no error found
@ -1085,7 +1315,7 @@ int al_pcie_aer_config(
struct al_pcie_aer_params *params);
/**
* @brief AER uncorretable errors get and clear
* @brief EP physical function AER uncorrectable errors get and clear
* @param pcie_pf pcie pf handle
* @return bit mask of uncorrectable errors - see 'AL_PCIE_AER_UNCORR_*' for
* details
@ -1093,7 +1323,7 @@ int al_pcie_aer_config(
unsigned int al_pcie_aer_uncorr_get_and_clear(struct al_pcie_pf *pcie_pf);
/**
* @brief AER corretable errors get and clear
* @brief EP physical function AER correctable errors get and clear
* @param pcie_pf pcie pf handle
* @return bit mask of correctable errors - see 'AL_PCIE_AER_CORR_*' for
* details
@ -1101,7 +1331,8 @@ unsigned int al_pcie_aer_uncorr_get_and_clear(struct al_pcie_pf *pcie_pf);
unsigned int al_pcie_aer_corr_get_and_clear(struct al_pcie_pf *pcie_pf);
/**
* @brief AER get the header for the TLP corresponding to a detected error
* @brief EP physical function AER get the header for
* the TLP corresponding to a detected error
* @param pcie_pf pcie pf handle
* @param hdr pointer to an array for getting the header
*/
@ -1109,6 +1340,44 @@ void al_pcie_aer_err_tlp_hdr_get(
struct al_pcie_pf *pcie_pf,
uint32_t hdr[AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS]);
/**
* @brief configure RC port AER capability
* @param pcie_port pcie port handle
* @param params AER capability configuration parameters
* @return 0 if no error found
*/
int al_pcie_port_aer_config(
struct al_pcie_port *pcie_port,
struct al_pcie_aer_params *params);
/**
* @brief RC port AER uncorrectable errors get and clear
* @param pcie_port pcie port handle
* @return bit mask of uncorrectable errors - see 'AL_PCIE_AER_UNCORR_*' for
* details
*/
unsigned int al_pcie_port_aer_uncorr_get_and_clear(
struct al_pcie_port *pcie_port);
/**
* @brief RC port AER correctable errors get and clear
* @param pcie_port pcie port handle
* @return bit mask of correctable errors - see 'AL_PCIE_AER_CORR_*' for
* details
*/
unsigned int al_pcie_port_aer_corr_get_and_clear(
struct al_pcie_port *pcie_port);
/**
* @brief RC port AER get the header for
* the TLP corresponding to a detected error
* @param pcie_port pcie port handle
* @param hdr pointer to an array for getting the header
*/
void al_pcie_port_aer_err_tlp_hdr_get(
struct al_pcie_port *pcie_port,
uint32_t hdr[AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS]);
/******************** Loop-Back mode (RC and Endpoint modes) ******************/
/**

View File

@ -72,7 +72,7 @@ struct al_pcie_rev1_2_axi_ctrl {
/* [0x28] */
uint32_t dbi_ctl;
/* [0x2c] */
uint32_t vmid_mask;
uint32_t tgtid_mask;
uint32_t rsrvd[4];
};
struct al_pcie_rev3_axi_ctrl {
@ -98,7 +98,7 @@ struct al_pcie_rev3_axi_ctrl {
/* [0x28] */
uint32_t dbi_ctl;
/* [0x2c] */
uint32_t vmid_mask;
uint32_t tgtid_mask;
};
struct al_pcie_rev1_axi_ob_ctrl {
/* [0x0] */
@ -145,10 +145,10 @@ struct al_pcie_rev2_axi_ob_ctrl {
/* [0x24] */
uint32_t msg_limit_h;
/*
* [0x28] this register override the VMID field in the AXUSER [19:4],
* [0x28] this register override the Target-ID field in the AXUSER [19:4],
* for the AXI master port.
*/
uint32_t vmid_reg_ovrd;
uint32_t tgtid_reg_ovrd;
/* [0x2c] this register override the ADDR[63:32] AXI master port. */
uint32_t addr_high_reg_ovrd_value;
/* [0x30] this register override the ADDR[63:32] AXI master port. */
@ -196,10 +196,10 @@ struct al_pcie_rev3_axi_ob_ctrl {
/* [0x40] */
uint32_t aw_msg_addr_mask_h;
/*
* [0x44] this register override the VMID field in the AXUSER [19:4],
* [0x44] this register override the Target-ID field in the AXUSER [19:4],
* for the AXI master port.
*/
uint32_t vmid_reg_ovrd;
uint32_t tgtid_reg_ovrd;
/* [0x48] this register override the ADDR[63:32] AXI master port. */
uint32_t addr_high_reg_ovrd_value;
/* [0x4c] this register override the ADDR[63:32] AXI master port. */
@ -783,9 +783,9 @@ struct al_pcie_rev3_axi_regs {
/* arprot value */
#define PCIE_AXI_CTRL_MASTER_ARCTL_ARPROT_VALUE_MASK 0x000001C0
#define PCIE_AXI_CTRL_MASTER_ARCTL_ARPROT_VALUE_SHIFT 6
/* vmid val */
#define PCIE_AXI_CTRL_MASTER_ARCTL_VMID_VAL_MASK 0x01FFFE00
#define PCIE_AXI_CTRL_MASTER_ARCTL_VMID_VAL_SHIFT 9
/* tgtid val */
#define PCIE_AXI_CTRL_MASTER_ARCTL_TGTID_VAL_MASK 0x01FFFE00
#define PCIE_AXI_CTRL_MASTER_ARCTL_TGTID_VAL_SHIFT 9
/* IPA value */
#define PCIE_AXI_CTRL_MASTER_ARCTL_IPA_VAL (1 << 25)
/* overide snoop inidcation, if not set take it from mstr_armisc ... */
@ -797,6 +797,7 @@ snoop indication value when override */
arqos value */
#define PCIE_AXI_CTRL_MASTER_ARCTL_ARQOS_MASK 0xF0000000
#define PCIE_AXI_CTRL_MASTER_ARCTL_ARQOS_SHIFT 28
#define PCIE_AXI_CTRL_MASTER_ARCTL_ARQOS_VAL_MAX 15
/**** Master_Awctl register ****/
/* override arcache */
@ -809,9 +810,9 @@ arqos value */
/* awprot value */
#define PCIE_AXI_CTRL_MASTER_AWCTL_AWPROT_VALUE_MASK 0x000001C0
#define PCIE_AXI_CTRL_MASTER_AWCTL_AWPROT_VALUE_SHIFT 6
/* vmid val */
#define PCIE_AXI_CTRL_MASTER_AWCTL_VMID_VAL_MASK 0x01FFFE00
#define PCIE_AXI_CTRL_MASTER_AWCTL_VMID_VAL_SHIFT 9
/* tgtid val */
#define PCIE_AXI_CTRL_MASTER_AWCTL_TGTID_VAL_MASK 0x01FFFE00
#define PCIE_AXI_CTRL_MASTER_AWCTL_TGTID_VAL_SHIFT 9
/* IPA value */
#define PCIE_AXI_CTRL_MASTER_AWCTL_IPA_VAL (1 << 25)
/* overide snoop inidcation, if not set take it from mstr_armisc ... */
@ -823,6 +824,7 @@ snoop indication value when override */
awqos value */
#define PCIE_AXI_CTRL_MASTER_AWCTL_AWQOS_MASK 0xF0000000
#define PCIE_AXI_CTRL_MASTER_AWCTL_AWQOS_SHIFT 28
#define PCIE_AXI_CTRL_MASTER_AWCTL_AWQOS_VAL_MAX 15
/**** slv_ctl register ****/
#define PCIE_AXI_CTRL_SLV_CTRL_IO_BAR_EN (1 << 6)
@ -888,17 +890,17 @@ awqos value */
#define PCIE_AXI_MISC_OB_CTRL_MSG_LIMIT_H_ADDR_MASK 0x000003FF
#define PCIE_AXI_MISC_OB_CTRL_MSG_LIMIT_H_ADDR_SHIFT 0
/**** vmid_reg_ovrd register ****/
/**** tgtid_reg_ovrd register ****/
/*
* select if to take the value from register or from address[63:48]:
* 1'b1: register value.
* 1'b0: from address[63:48]
*/
#define PCIE_AXI_MISC_OB_CTRL_VMID_REG_OVRD_SEL_MASK 0x0000FFFF
#define PCIE_AXI_MISC_OB_CTRL_VMID_REG_OVRD_SEL_SHIFT 0
/* vmid override value. */
#define PCIE_AXI_MISC_OB_CTRL_VMID_REG_OVRD_VALUE_MASK 0xFFFF0000
#define PCIE_AXI_MISC_OB_CTRL_VMID_REG_OVRD_VALUE_SHIFT 16
#define PCIE_AXI_MISC_OB_CTRL_TGTID_REG_OVRD_SEL_MASK 0x0000FFFF
#define PCIE_AXI_MISC_OB_CTRL_TGTID_REG_OVRD_SEL_SHIFT 0
/* tgtid override value. */
#define PCIE_AXI_MISC_OB_CTRL_TGTID_REG_OVRD_VALUE_MASK 0xFFFF0000
#define PCIE_AXI_MISC_OB_CTRL_TGTID_REG_OVRD_VALUE_SHIFT 16
/**** addr_size_replace register ****/
/*
@ -1255,17 +1257,17 @@ awqos value */
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_RSRVD_14_15_MASK 0x0000C000
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_RSRVD_14_15_SHIFT 14
/* choose the field from the axuser */
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_PF_VEC_VMID89_VEC_OVRD_FROM_AXUSER_MASK 0x00030000
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_PF_VEC_VMID89_VEC_OVRD_FROM_AXUSER_SHIFT 16
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_PF_VEC_TGTID89_VEC_OVRD_FROM_AXUSER_MASK 0x00030000
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_PF_VEC_TGTID89_VEC_OVRD_FROM_AXUSER_SHIFT 16
/* choose the field from register */
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_PF_VEC_VMID89_VEC_OVRD_FROM_REG_MASK 0x000C0000
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_PF_VEC_VMID89_VEC_OVRD_FROM_REG_SHIFT 18
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_PF_VEC_TGTID89_VEC_OVRD_FROM_REG_MASK 0x000C0000
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_PF_VEC_TGTID89_VEC_OVRD_FROM_REG_SHIFT 18
/* in case the field take from the address, offset field for each bit. */
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_PF_VEC_VMID89_VEC_ADDR_OFFSET_MASK 0x0FF00000
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_PF_VEC_VMID89_VEC_ADDR_OFFSET_SHIFT 20
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_PF_VEC_TGTID89_VEC_ADDR_OFFSET_MASK 0x0FF00000
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_PF_VEC_TGTID89_VEC_ADDR_OFFSET_SHIFT 20
/* register value override */
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_CFG_VMID89_VEC_OVRD_MASK 0x30000000
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_CFG_VMID89_VEC_OVRD_SHIFT 28
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_CFG_TGTID89_VEC_OVRD_MASK 0x30000000
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_CFG_TGTID89_VEC_OVRD_SHIFT 28
/* Rsrvd */
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_RSRVD_MASK 0xC0000000
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_2_RSRVD_SHIFT 30
@ -1291,9 +1293,9 @@ awqos value */
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_3_RSRVD_SHIFT 30
/**** func_ctrl_4 register ****/
/* When set take the corresponding bit address from vmid value. */
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_4_PF_VEC_MEM_ADDR54_63_SEL_VMID_MASK 0x000003FF
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_4_PF_VEC_MEM_ADDR54_63_SEL_VMID_SHIFT 0
/* When set take the corresponding bit address from tgtid value. */
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_4_PF_VEC_MEM_ADDR54_63_SEL_TGTID_MASK 0x000003FF
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_4_PF_VEC_MEM_ADDR54_63_SEL_TGTID_SHIFT 0
/* override value. */
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_4_PF_VEC_MEM_ADDR54_63_OVRD_MASK 0x000FFC00
#define PCIE_AXI_PF_AXI_ATTR_OVRD_FUNC_CTRL_4_PF_VEC_MEM_ADDR54_63_OVRD_SHIFT 10

View File

@ -81,7 +81,7 @@ enum al_pcie_app_int_grp_a {
/** [RC only] Deassert_INTB received */
AL_PCIE_APP_INT_DEASSERT_INTB = AL_BIT(2),
/**
* [RC only] Deassert_INTA received - there's a didcated GIC interrupt
* [RC only] Deassert_INTA received - there's a dedicated GIC interrupt
* line that reflects the status of ASSERT/DEASSERT of INTA
*/
AL_PCIE_APP_INT_DEASSERT_INTA = AL_BIT(3),
@ -92,7 +92,7 @@ enum al_pcie_app_int_grp_a {
/** [RC only] Assert_INTB received */
AL_PCIE_APP_INT_ASSERT_INTB = AL_BIT(6),
/**
* [RC only] Assert_INTA received - there's a didcated GIC interrupt
* [RC only] Assert_INTA received - there's a dedicated GIC interrupt
* line that reflects the status of ASSERT/DEASSERT of INTA
*/
AL_PCIE_APP_INT_ASSERT_INTA = AL_BIT(7),
@ -150,13 +150,13 @@ enum al_pcie_app_int_grp_b {
AL_PCIE_APP_INT_GRP_B_FTL_ERR_MSG_RCVD = AL_BIT(5),
/**
* [RC/EP] Vendor Defined Message received
* Asserted when a vevdor message is received (with no data), buffers 2
* Asserted when a vendor message is received (with no data), buffers 2
* messages only, and latch the headers in registers
*/
AL_PCIE_APP_INT_GRP_B_VNDR_MSG_A_RCVD = AL_BIT(6),
/**
* [RC/EP] Vendor Defined Message received
* Asserted when a vevdor message is received (with no data), buffers 2
* Asserted when a vendor message is received (with no data), buffers 2
* messages only, and latch the headers in registers
*/
AL_PCIE_APP_INT_GRP_B_VNDR_MSG_B_RCVD = AL_BIT(7),
@ -166,7 +166,7 @@ enum al_pcie_app_int_grp_b {
AL_PCIE_APP_INT_GRP_B_LNK_EQ_REQ = AL_BIT(13),
/** [RC/EP] OB Vendor message request is granted by the PCIe core */
AL_PCIE_APP_INT_GRP_B_OB_VNDR_MSG_REQ_GRNT = AL_BIT(14),
/** [RC only] CPL timeout from the PCIe core indiication */
/** [RC only] CPL timeout from the PCIe core indication */
AL_PCIE_APP_INT_GRP_B_CPL_TO = AL_BIT(15),
/** [RC/EP] Slave Response Composer Lookup Error */
AL_PCIE_APP_INT_GRP_B_SLV_RESP_COMP_LKUP_ERR = AL_BIT(16),

View File

@ -51,18 +51,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "al_hal_pcie_w_reg_ex.h"
#endif
/**
* Revision IDs:
* ID_0: SlickRock M0
* ID_1: SlickRock A0
* ID_2: PeakRock x4
* ID_3: PeakRock x8
*/
#define AL_PCIE_REV_ID_0 0
#define AL_PCIE_REV_ID_1 1
#define AL_PCIE_REV_ID_2 2
#define AL_PCIE_REV_ID_3 3
#define AL_PCIE_AXI_REGS_OFFSET 0x0
#define AL_PCIE_REV_1_2_APP_REGS_OFFSET 0x1000
#define AL_PCIE_REV_3_APP_REGS_OFFSET 0x2000
@ -74,6 +62,11 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define REV3_MAX_NUM_LANES 8
#define AL_MAX_NUM_OF_LANES 8 /* the maximum between all Revisions */
/** Number of outbound atu regions - rev 1/2 */
#define AL_PCIE_REV_1_2_ATU_NUM_OUTBOUND_REGIONS 12
/** Number of outbound atu regions - rev 3 */
#define AL_PCIE_REV_3_ATU_NUM_OUTBOUND_REGIONS 16
struct al_pcie_core_iatu_regs {
uint32_t index;
uint32_t cr1;
@ -253,8 +246,10 @@ struct al_pcie_rev3_regs {
struct al_pcie_axi_ctrl {
uint32_t *global;
uint32_t *master_rctl;
uint32_t *master_arctl;
uint32_t *master_awctl;
uint32_t *master_ctl;
uint32_t *slv_ctl;
};
@ -265,6 +260,13 @@ struct al_pcie_axi_ob_ctrl {
uint32_t *io_start_h;
uint32_t *io_limit_l;
uint32_t *io_limit_h;
uint32_t *io_addr_mask_h; /* Rev 3 only */
uint32_t *ar_msg_addr_mask_h; /* Rev 3 only */
uint32_t *aw_msg_addr_mask_h; /* Rev 3 only */
uint32_t *tgtid_reg_ovrd; /* Rev 2/3 only */
uint32_t *addr_high_reg_ovrd_value; /* Rev 2/3 only */
uint32_t *addr_high_reg_ovrd_sel; /* Rev 2/3 only */
uint32_t *addr_size_replace; /* Rev 2/3 only */
};
struct al_pcie_axi_pcie_global {
@ -352,14 +354,23 @@ struct al_pcie_w_global_ctrl {
};
struct al_pcie_w_soc_int {
uint32_t *status_0;
uint32_t *status_1;
uint32_t *status_2;
uint32_t *status_3; /* Rev 2/3 only */
uint32_t *mask_inta_leg_0;
uint32_t *mask_inta_leg_1;
uint32_t *mask_inta_leg_2;
uint32_t *mask_inta_leg_3; /* Rev 2/3 only */
uint32_t *mask_msi_leg_0;
uint32_t *mask_msi_leg_1;
uint32_t *mask_msi_leg_2;
uint32_t *mask_msi_leg_3; /* Rev 2/3 only */
};
struct al_pcie_w_atu {
uint32_t *in_mask_pair;
uint32_t *out_mask_pair;
uint32_t *reg_out_mask; /* Rev 3 only */
};
struct al_pcie_w_regs {
@ -375,6 +386,7 @@ struct al_pcie_w_regs {
struct al_pcie_revx_w_int_grp *int_grp_b;
struct al_pcie_revx_w_int_grp *int_grp_c;
struct al_pcie_revx_w_int_grp *int_grp_d;
struct al_pcie_rev3_w_cfg_func_ext *cfg_func_ext; /* Rev 3 only */
};
struct al_pcie_regs {

View File

@ -1498,7 +1498,7 @@ struct al_pcie_rev3_w_regs {
}
#endif
#endif /* __AL_HAL_PCIE_W_REG_H */
#endif /* __AL_HAL_pcie_w_REG_H */
/** @} end of ... group */

View File

@ -66,10 +66,29 @@ __FBSDID("$FreeBSD$");
#include <sys/errno.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <machine/bus.h>
/* Prototypes for all the bus_space structure functions */
bs_protos(generic);
bs_protos(generic_armv4);
uint8_t generic_bs_r_1(bus_space_tag_t t, bus_space_handle_t bsh,
bus_size_t offset);
uint16_t generic_bs_r_2(bus_space_tag_t t, bus_space_handle_t bsh,
bus_size_t offset);
uint32_t generic_bs_r_4(bus_space_tag_t t, bus_space_handle_t bsh,
bus_size_t offset);
void generic_bs_w_1(bus_space_tag_t t, bus_space_handle_t bsh,
bus_size_t offset, uint8_t value);
void generic_bs_w_2(bus_space_tag_t t, bus_space_handle_t bsh,
bus_size_t offset, uint16_t value);
void generic_bs_w_4(bus_space_tag_t t, bus_space_handle_t bsh,
bus_size_t offset, uint32_t value);
void generic_bs_w_8(bus_space_tag_t t, bus_space_handle_t bsh,
bus_size_t offset, uint64_t value);
#define __UNUSED __attribute__((unused))
@ -79,6 +98,52 @@ extern "C" {
#endif
/* *INDENT-ON* */
/**
* Make sure data will be visible by other masters (other CPUS and DMA).
* usually this is achieved by the ARM DMB instruction.
*/
static void al_data_memory_barrier(void);
static void al_smp_data_memory_barrier(void);
/**
* Make sure data will be visible by DMA masters, no restriction for other cpus
*/
static inline void
al_data_memory_barrier(void)
{
#ifndef __aarch64__
dsb();
#else
dsb(sy);
#endif
}
/**
* Make sure data will be visible in order by other cpus masters.
*/
static inline void
al_smp_data_memory_barrier(void)
{
#ifndef __aarch64__
dmb();
#else
dmb(ish);
#endif
}
/**
* Make sure write data will be visible in order by other cpus masters.
*/
static inline void
al_local_data_memory_barrier(void)
{
#ifndef __aarch64__
dsb();
#else
dsb(sy);
#endif
}
/*
* WMA: This is a hack which allows not modifying the __iomem accessing HAL code.
* On ARMv7, bus_handle holds the information about VA of accessed memory. It
@ -168,50 +233,66 @@ uint64_t al_reg_read64(uint64_t * offset);
* @param offset register offset
* @param val value to write to the register
*/
#define al_reg_write8(l,v) do { dsb(); generic_bs_w_1(NULL, (bus_space_handle_t)l, 0, v); dmb(); } while (0)
#define al_reg_write8(l, v) do { \
al_data_memory_barrier(); \
generic_bs_w_1(NULL, (bus_space_handle_t)l, 0, v); \
al_smp_data_memory_barrier(); \
} while (0)
/**
* Write to MMIO 16 bits register
* @param offset register offset
* @param val value to write to the register
*/
#define al_reg_write16(l,v) do { dsb(); generic_bs_w_2(NULL, (bus_space_handle_t)l, 0, v); dmb(); } while (0)
#define al_reg_write16(l, v) do { \
al_data_memory_barrier(); \
generic_bs_w_2(NULL, (bus_space_handle_t)l, 0, v); \
al_smp_data_memory_barrier(); \
} while (0)
/**
* Write to MMIO 32 bits register
* @param offset register offset
* @param val value to write to the register
*/
#define al_reg_write32(l,v) do { dsb(); generic_bs_w_4(NULL, (bus_space_handle_t)l, 0, v); dmb(); } while (0)
#define al_reg_write32(l, v) do { \
al_data_memory_barrier(); \
generic_bs_w_4(NULL, (bus_space_handle_t)l, 0, v); \
al_smp_data_memory_barrier(); \
} while (0)
/**
* Write to MMIO 64 bits register
* @param offset register offset
* @param val value to write to the register
*/
#define al_reg_write64(l,v) do { dsb(); generic_bs_w_8(NULL, (bus_space_handle_t)l, 0, v); dmb(); } while (0)
#define al_reg_write64(l, v) do { \
al_data_memory_barrier(); \
generic_bs_w_8(NULL, (bus_space_handle_t)l, 0, v); \
al_smp_data_memory_barrier(); \
} while (0)
static inline uint8_t
al_reg_read8(uint8_t *l)
{
dsb();
al_data_memory_barrier();
return (generic_bs_r_1(NULL, (bus_space_handle_t)l, 0));
}
static inline uint16_t
al_reg_read16(uint16_t *l)
{
dsb();
al_data_memory_barrier();
return (generic_bs_r_2(NULL, (bus_space_handle_t)l, 0));
}
static inline uint32_t
al_reg_read32(uint32_t *l)
{
dsb();
al_data_memory_barrier();
return (generic_bs_r_4(NULL, (bus_space_handle_t)l, 0));
}
@ -223,10 +304,8 @@ al_reg_read32(uint32_t *l)
#define AL_DBG_LEVEL AL_DBG_LEVEL_ERR
extern struct mtx al_dbg_lock;
#define AL_DBG_LOCK() mtx_lock_spin(&al_dbg_lock)
#define AL_DBG_UNLOCK() mtx_unlock_spin(&al_dbg_lock)
#define AL_DBG_LOCK()
#define AL_DBG_UNLOCK()
/**
* print message
@ -277,39 +356,6 @@ extern struct mtx al_dbg_lock;
__FILE__, __LINE__, __func__, #COND); \
} while(AL_FALSE)
/**
* Make sure data will be visible by other masters (other CPUS and DMA).
* usually this is achieved by the ARM DMB instruction.
*/
static void al_data_memory_barrier(void);
/**
* Make sure data will be visible by DMA masters, no restriction for other cpus
*/
static inline void
al_data_memory_barrier(void)
{
dsb();
}
/**
* Make sure data will be visible in order by other cpus masters.
*/
static inline void
al_smp_data_memory_barrier(void)
{
dsb();
}
/**
* Make sure write data will be visible in order by other cpus masters.
*/
static inline void
al_local_data_memory_barrier(void)
{
dsb();
}
/**
* al_udelay - micro sec delay
*/

View File

@ -60,24 +60,6 @@ typedef int al_bool; /** boolean */
#define AL_TRUE 1
#define AL_FALSE 0
/* define types */
#ifndef AL_HAVE_TYPES
typedef unsigned char uint8_t; /** unsigned 8 bits */
typedef unsigned short uint16_t; /** unsigned 16 bits */
typedef unsigned int uint32_t; /** unsigned 32 bits */
typedef unsigned long long uint64_t; /** unsigned 64 bits */
typedef signed char int8_t; /** signed 8 bits */
typedef short int int16_t; /** signed 16 bits */
typedef signed int int32_t; /** signed 32 bits */
/** An unsigned int that is guaranteed to be the same size as a pointer */
/** C99 standard */
typedef unsigned long uintptr_t;
#endif
/** in LPAE mode, the address address is 40 bit, we extend it to 64 bit */
typedef uint64_t al_phys_addr_t;

View File

@ -57,6 +57,7 @@ extern "C" {
/* *INDENT-ON* */
#define AL_BIT(b) (1UL << (b))
#define AL_BIT_64(b) (1ULL << (b))
#define AL_ADDR_LOW(x) ((uint32_t)((al_phys_addr_t)(x)))
#define AL_ADDR_HIGH(x) ((uint32_t)((((al_phys_addr_t)(x)) >> 16) >> 16))

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,74 @@
/*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @defgroup group_serdes_api API
* SerDes HAL driver API
* @ingroup group_serdes SerDes
* @{
*
* @file al_hal_serdes_25g.h
*
* @brief Header file for the SerDes HAL driver
*
*/
#ifndef __AL_HAL_SERDES_25G_H__
#define __AL_HAL_SERDES_25G_H__
#include "al_hal_common.h"
#include "al_hal_serdes_interface.h"
/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
#endif
/* *INDENT-ON* */
int al_serdes_25g_handle_init(
void __iomem *serdes_regs_base,
struct al_serdes_grp_obj *obj);
/* *INDENT-OFF* */
#ifdef __cplusplus
}
#endif
/* *INDENT-ON* */
#endif /* __AL_SRDS__ */
/** @} end of SERDES group */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,434 @@
/*******************************************************************************
Copyright (C) 2013 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 or V3 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @{
* @file al_hal_serdes_c_regs.h
*
* @brief ... registers
*
*/
#ifndef __AL_HAL_serdes_c_REGS_H__
#define __AL_HAL_serdes_c_REGS_H__
#include "al_hal_plat_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* Unit Registers
*/
struct al_serdes_c_gen {
/* [0x0] SERDES registers Version */
uint32_t version;
uint32_t rsrvd_0[3];
/* [0x10] SERDES register file address */
uint32_t reg_addr;
/* [0x14] SERDES register file data */
uint32_t reg_data;
/* [0x18] SERDES control */
uint32_t ctrl;
/* [0x1c] SERDES cpu mem address */
uint32_t cpu_prog_addr;
/* [0x20] SERDES cpu mem data */
uint32_t cpu_prog_data;
/* [0x24] SERDES data mem address */
uint32_t cpu_data_mem_addr;
/* [0x28] SERDES data mem data */
uint32_t cpu_data_mem_data;
/* [0x2c] SERDES control */
uint32_t rst;
/* [0x30] SERDES control */
uint32_t status;
uint32_t rsrvd[51];
};
struct al_serdes_c_lane {
uint32_t rsrvd_0[4];
/* [0x10] Data configuration */
uint32_t cfg;
/* [0x14] Lane status */
uint32_t stat;
/* [0x18] SERDES control */
uint32_t reserved;
uint32_t rsrvd[25];
};
struct al_serdes_c_regs {
uint32_t rsrvd_0[64];
struct al_serdes_c_gen gen; /* [0x100] */
struct al_serdes_c_lane lane[2]; /* [0x200] */
};
/*
* Registers Fields
*/
/**** version register ****/
/* Revision number (Minor) */
#define SERDES_C_GEN_VERSION_RELEASE_NUM_MINOR_MASK 0x000000FF
#define SERDES_C_GEN_VERSION_RELEASE_NUM_MINOR_SHIFT 0
/* Revision number (Major) */
#define SERDES_C_GEN_VERSION_RELEASE_NUM_MAJOR_MASK 0x0000FF00
#define SERDES_C_GEN_VERSION_RELEASE_NUM_MAJOR_SHIFT 8
/* date of release */
#define SERDES_C_GEN_VERSION_DATE_DAY_MASK 0x001F0000
#define SERDES_C_GEN_VERSION_DATE_DAY_SHIFT 16
/* month of release */
#define SERDES_C_GEN_VERSION_DATA_MONTH_MASK 0x01E00000
#define SERDES_C_GEN_VERSION_DATA_MONTH_SHIFT 21
/* year of release (starting from 2000) */
#define SERDES_C_GEN_VERSION_DATE_YEAR_MASK 0x3E000000
#define SERDES_C_GEN_VERSION_DATE_YEAR_SHIFT 25
/* Reserved */
#define SERDES_C_GEN_VERSION_RESERVED_MASK 0xC0000000
#define SERDES_C_GEN_VERSION_RESERVED_SHIFT 30
/**** reg_addr register ****/
/* address value */
#define SERDES_C_GEN_REG_ADDR_VAL_MASK 0x00007FFF
#define SERDES_C_GEN_REG_ADDR_VAL_SHIFT 0
/**** reg_data register ****/
/* data value */
#define SERDES_C_GEN_REG_DATA_VAL_MASK 0x000000FF
#define SERDES_C_GEN_REG_DATA_VAL_SHIFT 0
/* Bit-wise write enable */
#define SERDES_C_GEN_REG_DATA_STRB_MASK 0x0000FF00
#define SERDES_C_GEN_REG_DATA_STRB_SHIFT 8
/**** ctrl register ****/
/*
* 0x0 Select reference clock from Bump
* 0x1 Select inter-macro reference clock from the left side
* 0x2 Same as 0x0
* 0x3 Select inter-macro reference clock from the right side
*/
#define SERDES_C_GEN_CTRL_REFCLK_INPUT_SEL_MASK 0x00000003
#define SERDES_C_GEN_CTRL_REFCLK_INPUT_SEL_SHIFT 0
#define SERDES_C_GEN_CTRL_REFCLK_INPUT_SEL_REF \
(0 << (SERDES_C_GEN_CTRL_REFCLK_INPUT_SEL_SHIFT))
#define SERDES_C_GEN_CTRL_REFCLK_INPUT_SEL_L2R \
(1 << (SERDES_C_GEN_CTRL_REFCLK_INPUT_SEL_SHIFT))
#define SERDES_C_GEN_CTRL_REFCLK_INPUT_SEL_R2L \
(3 << (SERDES_C_GEN_CTRL_REFCLK_INPUT_SEL_SHIFT))
/*
* 0x0 Tied to 0 to save power
* 0x1 Select reference clock from Bump
* 0x2 Select inter-macro reference clock input from right side
* 0x3 Same as 0x2
*/
#define SERDES_C_GEN_CTRL_REFCLK_LEFT_SEL_MASK 0x00000030
#define SERDES_C_GEN_CTRL_REFCLK_LEFT_SEL_SHIFT 4
#define SERDES_C_GEN_CTRL_REFCLK_LEFT_SEL_0 \
(0 << (SERDES_C_GEN_CTRL_REFCLK_LEFT_SEL_SHIFT))
#define SERDES_C_GEN_CTRL_REFCLK_LEFT_SEL_REF \
(1 << (SERDES_C_GEN_CTRL_REFCLK_LEFT_SEL_SHIFT))
#define SERDES_C_GEN_CTRL_REFCLK_LEFT_SEL_R2L \
(2 << (SERDES_C_GEN_CTRL_REFCLK_LEFT_SEL_SHIFT))
/*
* 0x0 Tied to 0 to save power
* 0x1 Select reference clock from Bump
* 0x2 Select inter-macro reference clock input from left side
* 0x3 Same as 0x2
*/
#define SERDES_C_GEN_CTRL_REFCLK_RIGHT_SEL_MASK 0x000000C0
#define SERDES_C_GEN_CTRL_REFCLK_RIGHT_SEL_SHIFT 6
#define SERDES_C_GEN_CTRL_REFCLK_RIGHT_SEL_0 \
(0 << (SERDES_C_GEN_CTRL_REFCLK_RIGHT_SEL_SHIFT))
#define SERDES_C_GEN_CTRL_REFCLK_RIGHT_SEL_REF \
(1 << (SERDES_C_GEN_CTRL_REFCLK_RIGHT_SEL_SHIFT))
#define SERDES_C_GEN_CTRL_REFCLK_RIGHT_SEL_L2R \
(2 << (SERDES_C_GEN_CTRL_REFCLK_RIGHT_SEL_SHIFT))
/*
* Program memory acknowledge - Only when the access
* to the program memory is not
* ready for the microcontroller, it
* is driven to 0
*/
#define SERDES_C_GEN_CTRL_CPU_MEMPSACK (1 << 8)
/*
* Data memory acknowledge - Only when the access
* to the program memory is not
* ready for the microcontroller, it
* is driven to 0
*/
#define SERDES_C_GEN_CTRL_CPU_MEMACK (1 << 12)
/*
* 0 - keep cpu clk as sb clk
* 1 cpu_clk is sb_clk divided by 2
*/
#define SERDES_C_GEN_CTRL_CPU_CLK_DIV (1 << 16)
/*
* 0x0 OIF CEI-28G-SR
* 0x1 OIF CIE-25G-LR
* 0x8 XFI
* Others Reserved
*
* Note that phy_ctrl_cfg_i[3] is used to signify high-speed/low-speed
*/
#define SERDES_C_GEN_CTRL_PHY_CTRL_CFG_MASK 0x00F00000
#define SERDES_C_GEN_CTRL_PHY_CTRL_CFG_SHIFT 20
/*
* 0 - Internal 8051 micro- controller is allowed to access the internal APB
* CSR. Internal APB runs at cpu_clk_i, and the accesses from the external APB
* in apb_clk_i domain to APB CSR are resynchronized to cpu_clk_i. 1 Bypass
* CPU. Internal 8051 micro-controller is blocked from accessing the internal
* APB CSR. Internal APB runs at apb_clk_i.
*/
#define SERDES_C_GEN_CTRL_CPU_BYPASS (1 << 24)
/**** cpu_prog_addr register ****/
/*
* address value 32 bit,
* The firmware data will be 1 byte with 64K rows
*/
#define SERDES_C_GEN_CPU_PROG_ADDR_VAL_MASK 0x00007FFF
#define SERDES_C_GEN_CPU_PROG_ADDR_VAL_SHIFT 0
/**** cpu_data_mem_addr register ****/
/* address value 8K byte memory */
#define SERDES_C_GEN_CPU_DATA_MEM_ADDR_VAL_MASK 0x00001FFF
#define SERDES_C_GEN_CPU_DATA_MEM_ADDR_VAL_SHIFT 0
/**** cpu_data_mem_data register ****/
/* data value */
#define SERDES_C_GEN_CPU_DATA_MEM_DATA_VAL_MASK 0x000000FF
#define SERDES_C_GEN_CPU_DATA_MEM_DATA_VAL_SHIFT 0
/**** rst register ****/
/* Power on reset Signal active low */
#define SERDES_C_GEN_RST_POR_N (1 << 0)
/* CMU reset Active low */
#define SERDES_C_GEN_RST_CM0_RST_N (1 << 1)
/*
* 0x0 Normal / Active
* 0x1 Partial power down
* 0x2 Near complete power down (only
* refclk buffers and portions of analog bias
* active)
* 0x3 complete power down (IDDQ mode)
* Can be asserted when CMU is in normal
* mode. These modes provide an increased
* power savings compared to reset mode.
* Signal is overridden by por_n_i so has no
* effect in power on reset state.
*/
#define SERDES_C_GEN_RST_CM0_PD_MASK 0x00000030
#define SERDES_C_GEN_RST_CM0_PD_SHIFT 4
/* Lane0 reset signal active low */
#define SERDES_C_GEN_RST_LN0_RST_N (1 << 6)
/* Lane1 reset signal active low */
#define SERDES_C_GEN_RST_LN1_RST_N (1 << 7)
/*
* 0x0 Normal / Active
* 0x1 Partial power down
* 0x2 Most blocks powered down (only LOS
* active)
* 0x3 complete power down (IDDQ mode)
* Can be asserted when Lane is in normal
* mode. These modes provide an increased
* power savings compared to reset mode.
* Signal is overridden by por_n_i so has no
* affect in power on reset state
*/
#define SERDES_C_GEN_RST_LN0_PD_MASK 0x00000300
#define SERDES_C_GEN_RST_LN0_PD_SHIFT 8
/*
* 0x0 Normal / Active
* 0x1 Partial power down
* 0x2 Most blocks powered down (only LOS
* active)
* 0x3 complete power down (IDDQ mode)
* Can be asserted when Lane is in normal
* mode. These modes provide an increased
* power savings compared to reset mode.
* Signal is overridden by por_n_i so has no
* affect in power on reset state
*/
#define SERDES_C_GEN_RST_LN1_PD_MASK 0x00000C00
#define SERDES_C_GEN_RST_LN1_PD_SHIFT 10
#define SERDES_C_GEN_RST_CPU_MEM_RESET (1 << 12)
#define SERDES_C_GEN_RST_CPU_MEM_SHUTDOWN (1 << 13)
#define SERDES_C_GEN_RST_CAPRI_APB_RESET (1 << 14)
/**** status register ****/
/*
* 0x0 No error
* 0x1 PHY has an internal error
*/
#define SERDES_C_GEN_STATUS_ERR_O (1 << 0)
/*
* 0x0 PHY is not ready to respond to
* cm0_rst_n_i and cm0_pd_i[1:0]. The
* signals should not be changed.
* 0x1 - PHY is ready to respond to
* cm0_rst_n_i and cm0_pd_i[1:0]
*/
#define SERDES_C_GEN_STATUS_CM0_RST_PD_READY (1 << 1)
/*
* Indicates CMU PLL has locked to the
* reference clock and all output clocks are at
* the correct frequency
*/
#define SERDES_C_GEN_STATUS_CM0_OK_O (1 << 2)
/*
* 0x0 PHY is not ready to respond to
* ln0_rst_n and ln0_pd[1:0]. The signals
* should not be changed.
* 0x1 - PHY is ready to respond to lnX_rst_n_i
* and lnX_pd_i[1:0]
*/
#define SERDES_C_GEN_STATUS_LN0_RST_PD_READY (1 << 3)
/*
* 0x0 PHY is not ready to respond to
* ln1_rst_n_i and ln1_pd[1:0]. The signals
* should not be changed.
* 0x1 - PHY is ready to respond to lnX_rst_n_i
* and lnX_pd_i[1:0]
*/
#define SERDES_C_GEN_STATUS_LN1_RST_PD_READY (1 << 4)
/*
* Active low when the CPU performs a wait cycle (internally or externally
* generated)
*/
#define SERDES_C_GEN_STATUS_CPU_WAITSTATE (1 << 5)
#define SERDES_C_GEN_STATUS_TBUS_MASK 0x000FFF00
#define SERDES_C_GEN_STATUS_TBUS_SHIFT 8
/**** cfg register ****/
/* 1- Swap 32 bit data on RX side */
#define SERDES_C_LANE_CFG_RX_LANE_SWAP (1 << 0)
/* 1- Swap 32 bit data on TX side */
#define SERDES_C_LANE_CFG_TX_LANE_SWAP (1 << 1)
/* 1 invert rx data polarity */
#define SERDES_C_LANE_CFG_LN_CTRL_RXPOLARITY (1 << 2)
/* 1 invert tx data polarity */
#define SERDES_C_LANE_CFG_TX_LANE_POLARITY (1 << 3)
/*
* 0x0 Data on lnX_txdata_o will not be
* transmitted. Transmitter will be placed into
* electrical idle.
* 0x1 Data on the active bits of
* lnX_txdata_o will be transmitted
*/
#define SERDES_C_LANE_CFG_LN_CTRL_TX_EN (1 << 4)
/*
* Informs the PHY to bypass the output of the
* analog LOS detector and instead rely upon
* a protocol LOS mechanism in the SoC/ASIC
* 0x0 LOS operates as normal
* 0x1 Bypass analog LOS output and
* instead rely upon protocol-level LOS
* detection via input lnX_ctrl_los_eii_value
*/
#define SERDES_C_LANE_CFG_LN_CTRL_LOS_EII_EN (1 << 5)
/*
* If lnX_ctrl_los_eii_en_i = 1 then Informs
* the PHY that the received signal was lost
*/
#define SERDES_C_LANE_CFG_LN_CTRL_LOS_EII_VALUE (1 << 6)
/* One hot mux */
#define SERDES_C_LANE_CFG_TX_DATA_SRC_SELECT_MASK 0x00000F00
#define SERDES_C_LANE_CFG_TX_DATA_SRC_SELECT_SHIFT 8
/* 0x0 - 20-bit 0x1 40-bit */
#define SERDES_C_LANE_CFG_LN_CTRL_DATA_WIDTH (1 << 12)
/**** stat register ****/
/*
* x0 lane is not ready to send and receive data
* 0x1 lane is ready to send and receive data
*/
#define SERDES_C_LANE_STAT_LNX_STAT_OK (1 << 0)
/*
* 0x0 received data run length has not
* exceed the programmable run length
* detector threshold
* 0x1 received data run length has
* exceeded the programmable run length
* detector threshold
*/
#define SERDES_C_LANE_STAT_LN_STAT_RUNLEN_ERR (1 << 1)
/*
* 0x0 data on lnX_rxdata_o are invalid
* 0x1 data on the active bits of
* lnX_rxdata_o are valid
*/
#define SERDES_C_LANE_STAT_LN_STAT_RXVALID (1 << 2)
/*
* Loss of Signal (LOS) indicator that includes
* the combined functions of the digitally
* assisted analog LOS, digital LOS, and
* protocol LOS override features
* 0x0 Signal detected on lnX_rxp_i /
* lnX_rxm_i pins
* 0x1 No signal detected on lnX_rxp_i /
* lnX_rxm_i pins
*/
#define SERDES_C_LANE_STAT_LN_STAT_LOS (1 << 3)
#define SERDES_C_LANE_STAT_LN_STAT_LOS_DEGLITCH (1 << 4)
/**** reserved register ****/
#define SERDES_C_LANE_RESERVED_DEF_0_MASK 0x0000FFFF
#define SERDES_C_LANE_RESERVED_DEF_0_SHIFT 0
#define SERDES_C_LANE_RESERVED_DEF_1_MASK 0xFFFF0000
#define SERDES_C_LANE_RESERVED_DEF_1_SHIFT 16
#ifdef __cplusplus
}
#endif
#endif /* __AL_HAL_serdes_c_REGS_H__ */
/** @} end of ... group */

View File

@ -0,0 +1,87 @@
/*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @defgroup group_serdes_api API
* SerDes HAL driver API
* @ingroup group_serdes SerDes
* @{
*
* @file al_hal_serdes.h
*
* @brief Header file for the SerDes HAL driver
*
*/
#ifndef __AL_HAL_SERDES_H__
#define __AL_HAL_SERDES_H__
#include "al_hal_common.h"
#include "al_hal_serdes_interface.h"
#include "al_hal_serdes_hssp_regs.h"
/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
#endif
/* *INDENT-ON* */
/**
* Initializes a SERDES group object
*
* @param serdes_regs_base
* The SERDES register file base pointer
*
* @param obj
* An allocated, non initialized object context
*
* @return 0 if no error found.
*
*/
int al_serdes_hssp_handle_init(
void __iomem *serdes_regs_base,
struct al_serdes_grp_obj *obj);
/* *INDENT-OFF* */
#ifdef __cplusplus
}
#endif
/* *INDENT-ON* */
#endif /* __AL_SRDS__ */
/** @} end of SERDES group */

View File

@ -0,0 +1,749 @@
/*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#ifndef __AL_SERDES_INTERNAL_REGS_H__
#define __AL_SERDES_INTERNAL_REGS_H__
#ifdef __cplusplus
extern "C" {
#endif
/*******************************************************************************
* Per lane register fields
******************************************************************************/
/*
* RX and TX lane hard reset
* 0 - Hard reset is asserted
* 1 - Hard reset is de-asserted
*/
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_REG_NUM 2
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_MASK 0x01
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_VAL_ASSERT 0x00
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_VAL_DEASSERT 0x01
/*
* RX and TX lane hard reset control
* 0 - Hard reset is taken from the interface pins
* 1 - Hard reset is taken from registers
*/
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_REG_NUM 2
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_MASK 0x02
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_VAL_IFACE 0x00
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_VAL_REGS 0x02
/* RX lane power state control */
#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_REG_NUM 3
#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_MASK 0x1f
#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_PD 0x01
#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P2 0x02
#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P1 0x04
#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P0S 0x08
#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P0 0x10
/* TX lane power state control */
#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_REG_NUM 4
#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_MASK 0x1f
#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_PD 0x01
#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P2 0x02
#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P1 0x04
#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P0S 0x08
#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P0 0x10
/* RX lane word width */
#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_REG_NUM 5
#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_MASK 0x07
#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_8 0x00
#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_10 0x01
#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_16 0x02
#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_20 0x03
#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_32 0x04
#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_40 0x05
/* TX lane word width */
#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_REG_NUM 5
#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_MASK 0x70
#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_8 0x00
#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_10 0x10
#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_16 0x20
#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_20 0x30
#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_32 0x40
#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_40 0x50
/* RX lane rate select */
#define SERDES_IREG_FLD_PCSRX_DIVRATE_REG_NUM 6
#define SERDES_IREG_FLD_PCSRX_DIVRATE_MASK 0x07
#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_8 0x00
#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_4 0x01
#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_2 0x02
#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_1 0x03
/* TX lane rate select */
#define SERDES_IREG_FLD_PCSTX_DIVRATE_REG_NUM 6
#define SERDES_IREG_FLD_PCSTX_DIVRATE_MASK 0x70
#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_8 0x00
#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_4 0x10
#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_2 0x20
#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_1 0x30
/*
* PMA serial RX-to-TX loop-back enable (from AGC to IO Driver). Serial receive
* to transmit loopback: 0 - Disables loopback 1 - Transmits the untimed,
* partial equalized RX signal out the transmit IO pins
*/
#define SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN_REG_NUM 7
#define SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN 0x10
/*
* PMA TX-to-RX buffered serial loop-back enable (bypasses IO Driver). Serial
* transmit to receive buffered loopback: 0 - Disables loopback 1 - Loops back
* the TX serializer output into the CDR
*/
#define SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN_REG_NUM 7
#define SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN 0x20
/*
* PMA TX-to-RX I/O serial loop-back enable (loop back done directly from TX to
* RX pads). Serial IO loopback from the transmit lane IO pins to the receive
* lane IO pins: 0 - Disables loopback 1 - Loops back the driver IO signal to
* the RX IO pins
*/
#define SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN_REG_NUM 7
#define SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN 0x40
/*
* PMA Parallel RX-to-TX loop-back enable. Parallel loopback from the PMA
* receive lane 20-bit data ports, to the transmit lane 20-bit data ports 0 -
* Disables loopback 1 - Loops back the 20-bit receive data port to the
* transmitter
*/
#define SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN_REG_NUM 7
#define SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN 0x80
/*
* PMA CDR recovered-clock loopback enable; asserted when PARRX2TXTIMEDEN is 1.
* Transmit bit clock select: 0 - Selects synthesizer bit clock for transmit 1
* - Selects CDR clock for transmit
*/
#define SERDES_IREG_FLD_LB_CDRCLK2TXEN_REG_NUM 7
#define SERDES_IREG_FLD_LB_CDRCLK2TXEN 0x01
/* Receive lane BIST enable. Active High */
#define SERDES_IREG_FLD_PCSRXBIST_EN_REG_NUM 8
#define SERDES_IREG_FLD_PCSRXBIST_EN 0x01
/* TX lane BIST enable. Active High */
#define SERDES_IREG_FLD_PCSTXBIST_EN_REG_NUM 8
#define SERDES_IREG_FLD_PCSTXBIST_EN 0x02
/*
* RX BIST completion signal 0 - Indicates test is not completed 1 - Indicates
* the test has completed, and will remain high until a new test is initiated
*/
#define SERDES_IREG_FLD_RXBIST_DONE_REG_NUM 8
#define SERDES_IREG_FLD_RXBIST_DONE 0x04
/*
* RX BIST error count overflow indicator. Indicates an overflow in the number
* of byte errors identified during the course of the test. This word is stable
* to sample when *_DONE_* signal has asserted
*/
#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_OVERFLOW_REG_NUM 8
#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_OVERFLOW 0x08
/*
* RX BIST locked indicator 0 - Indicates BIST is not word locked and error
* comparisons have not begun yet 1 - Indicates BIST is word locked and error
* comparisons have begun
*/
#define SERDES_IREG_FLD_RXBIST_RXLOCKED_REG_NUM 8
#define SERDES_IREG_FLD_RXBIST_RXLOCKED 0x10
/*
* RX BIST error count word. Indicates the number of byte errors identified
* during the course of the test. This word is stable to sample when *_DONE_*
* signal has asserted
*/
#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_MSB_REG_NUM 9
#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_LSB_REG_NUM 10
/* Tx params */
#define SERDES_IREG_TX_DRV_1_REG_NUM 21
#define SERDES_IREG_TX_DRV_1_HLEV_MASK 0x7
#define SERDES_IREG_TX_DRV_1_HLEV_SHIFT 0
#define SERDES_IREG_TX_DRV_1_LEVN_MASK 0xf8
#define SERDES_IREG_TX_DRV_1_LEVN_SHIFT 3
#define SERDES_IREG_TX_DRV_2_REG_NUM 22
#define SERDES_IREG_TX_DRV_2_LEVNM1_MASK 0xf
#define SERDES_IREG_TX_DRV_2_LEVNM1_SHIFT 0
#define SERDES_IREG_TX_DRV_2_LEVNM2_MASK 0x30
#define SERDES_IREG_TX_DRV_2_LEVNM2_SHIFT 4
#define SERDES_IREG_TX_DRV_3_REG_NUM 23
#define SERDES_IREG_TX_DRV_3_LEVNP1_MASK 0x7
#define SERDES_IREG_TX_DRV_3_LEVNP1_SHIFT 0
#define SERDES_IREG_TX_DRV_3_SLEW_MASK 0x18
#define SERDES_IREG_TX_DRV_3_SLEW_SHIFT 3
/* Rx params */
#define SERDES_IREG_RX_CALEQ_1_REG_NUM 24
#define SERDES_IREG_RX_CALEQ_1_DCGAIN_MASK 0x7
#define SERDES_IREG_RX_CALEQ_1_DCGAIN_SHIFT 0
/* DFE post-shaping tap 3dB frequency */
#define SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_MASK 0x38
#define SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_SHIFT 3
#define SERDES_IREG_RX_CALEQ_2_REG_NUM 25
/* DFE post-shaping tap gain */
#define SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_MASK 0x7
#define SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_SHIFT 0
/* DFE first tap gain control */
#define SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_MASK 0x78
#define SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_SHIFT 3
#define SERDES_IREG_RX_CALEQ_3_REG_NUM 26
#define SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_MASK 0xf
#define SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_SHIFT 0
#define SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_MASK 0xf0
#define SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_SHIFT 4
#define SERDES_IREG_RX_CALEQ_4_REG_NUM 27
#define SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_MASK 0xf
#define SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_SHIFT 0
#define SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_MASK 0x70
#define SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_SHIFT 4
#define SERDES_IREG_RX_CALEQ_5_REG_NUM 28
#define SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_MASK 0x7
#define SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_SHIFT 0
#define SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_MASK 0xf8
#define SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_SHIFT 3
/* RX lane best eye point measurement result */
#define SERDES_IREG_RXEQ_BEST_EYE_MSB_VAL_REG_NUM 29
#define SERDES_IREG_RXEQ_BEST_EYE_LSB_VAL_REG_NUM 30
#define SERDES_IREG_RXEQ_BEST_EYE_LSB_VAL_MASK 0x3F
/*
* Adaptive RX Equalization enable
* 0 - Disables adaptive RX equalization.
* 1 - Enables adaptive RX equalization.
*/
#define SERDES_IREG_FLD_PCSRXEQ_START_REG_NUM 31
#define SERDES_IREG_FLD_PCSRXEQ_START (1 << 0)
/*
* Enables an eye diagram measurement
* within the PHY.
* 0 - Disables eye diagram measurement
* 1 - Enables eye diagram measurement
*/
#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START_REG_NUM 31
#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START (1 << 1)
/*
* RX lane single roam eye point measurement start signal.
* If asserted, single measurement at fix XADJUST and YADJUST is started.
*/
#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_REG_NUM 31
#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_START (1 << 2)
/*
* PHY Eye diagram measurement status
* signal
* 0 - Indicates eye diagram results are not
* valid for sampling
* 1 - Indicates eye diagram is complete and
* results are valid for sampling
*/
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE_REG_NUM 32
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE (1 << 0)
/*
* Eye diagram error signal. Indicates if the
* measurement was invalid because the eye
* diagram was interrupted by the link entering
* electrical idle.
* 0 - Indicates eye diagram is valid
* 1- Indicates an error occurred, and the eye
* diagram measurement should be re-run
*/
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_ERR_REG_NUM 32
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_ERR (1 << 1)
/*
* PHY Adaptive Equalization status
* 0 - Indicates Adaptive Equalization results are not valid for sampling
* 1 - Indicates Adaptive Equalization is complete and results are valid for
* sampling
*/
#define SERDES_IREG_FLD_RXCALROAMEYEMEASDONE_REG_NUM 32
#define SERDES_IREG_FLD_RXCALROAMEYEMEASDONE (1 << 2)
/*
*
* PHY Adaptive Equalization Status Signal
* 0 Indicates adaptive equalization results
* are not valid for sampling
* 1 Indicates adaptive equalization is
* complete and results are valid for sampling.
*/
#define SERDES_IREG_FLD_RXEQ_DONE_REG_NUM 32
#define SERDES_IREG_FLD_RXEQ_DONE (1 << 3)
/*
* 7-bit eye diagram time adjust control
* - 6-bits per UI
* - spans 2 UI
*/
#define SERDES_IREG_FLD_RXCALROAMXADJUST_REG_NUM 33
/* 6-bit eye diagram voltage adjust control - spans +/-300mVdiff */
#define SERDES_IREG_FLD_RXCALROAMYADJUST_REG_NUM 34
/*
* Eye diagram status signal. Safe for
* sampling when *DONE* signal has
* asserted
* 14'h0000 - Completely Closed Eye
* 14'hFFFF - Completely Open Eye
*/
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_REG_NUM 35
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_MAKE 0xFF
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_SHIFT 0
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_REG_NUM 36
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_MAKE 0x3F
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_SHIFT 0
/*
* RX lane single roam eye point measurement result.
* If 0, eye is open at current XADJUST and YADJUST settings.
*/
#define SERDES_IREG_FLD_RXCALROAMEYEMEAS_ACC_MSB_REG_NUM 37
#define SERDES_IREG_FLD_RXCALROAMEYEMEAS_ACC_LSB_REG_NUM 38
/*
* Override enable for CDR lock to reference clock
* 0 - CDR is always locked to reference
* 1 - CDR operation mode (Lock2Reference or Lock2data are controlled internally
* depending on the incoming signal and ppm status)
*/
#define SERDES_IREG_FLD_RXLOCK2REF_OVREN_REG_NUM 39
#define SERDES_IREG_FLD_RXLOCK2REF_OVREN (1 << 1)
/*
* Selects Eye to capture based on edge
* 0 - Capture 1st Eye in Eye Diagram
* 1 - Capture 2nd Eye in Eye Diagram measurement
*/
#define SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM 39
#define SERDES_IREG_FLD_RXROAM_XORBITSEL (1 << 2)
#define SERDES_IREG_FLD_RXROAM_XORBITSEL_1ST 0
#define SERDES_IREG_FLD_RXROAM_XORBITSEL_2ND (1 << 2)
/*
* RX Signal detect. 0 indicates no signal, 1 indicates signal detected.
*/
#define SERDES_IREG_FLD_RXRANDET_REG_NUM 41
#define SERDES_IREG_FLD_RXRANDET_STAT 0x20
/*
* RX data polarity inversion control:
* 1'b0: no inversion
* 1'b1: invert polarity
*/
#define SERDES_IREG_FLD_POLARITY_RX_REG_NUM 46
#define SERDES_IREG_FLD_POLARITY_RX_INV (1 << 0)
/*
* TX data polarity inversion control:
* 1'b0: no inversion
* 1'b1: invert polarity
*/
#define SERDES_IREG_FLD_POLARITY_TX_REG_NUM 46
#define SERDES_IREG_FLD_POLARITY_TX_INV (1 << 1)
/* LANEPCSPSTATE* override enable (Active low) */
#define SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM 85
#define SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN (1 << 0)
/* LB* override enable (Active low) */
#define SERDES_IREG_FLD_LB_LOCWREN_REG_NUM 85
#define SERDES_IREG_FLD_LB_LOCWREN (1 << 1)
/* PCSRX* override enable (Active low) */
#define SERDES_IREG_FLD_PCSRX_LOCWREN_REG_NUM 85
#define SERDES_IREG_FLD_PCSRX_LOCWREN (1 << 4)
/* PCSRXBIST* override enable (Active low) */
#define SERDES_IREG_FLD_PCSRXBIST_LOCWREN_REG_NUM 85
#define SERDES_IREG_FLD_PCSRXBIST_LOCWREN (1 << 5)
/* PCSRXEQ* override enable (Active low) */
#define SERDES_IREG_FLD_PCSRXEQ_LOCWREN_REG_NUM 85
#define SERDES_IREG_FLD_PCSRXEQ_LOCWREN (1 << 6)
/* PCSTX* override enable (Active low) */
#define SERDES_IREG_FLD_PCSTX_LOCWREN_REG_NUM 85
#define SERDES_IREG_FLD_PCSTX_LOCWREN (1 << 7)
/*
* group registers:
* SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN,
* SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN
* SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN
*/
#define SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM 86
/* PCSTXBIST* override enable (Active low) */
#define SERDES_IREG_FLD_PCSTXBIST_LOCWREN_REG_NUM 86
#define SERDES_IREG_FLD_PCSTXBIST_LOCWREN (1 << 0)
/* Override RX_CALCEQ through the internal registers (Active low) */
#define SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN_REG_NUM 86
#define SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN (1 << 3)
#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN_REG_NUM 86
#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN (1 << 4)
/* RXCALROAMEYEMEASIN* override enable - Active Low */
#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN_REG_NUM 86
#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN (1 << 6)
/* RXCALROAMXADJUST* override enable - Active Low */
#define SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN_REG_NUM 86
#define SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN (1 << 7)
/* RXCALROAMYADJUST* override enable - Active Low */
#define SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM 87
#define SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN (1 << 0)
/* RXCDRCALFOSC* override enable. Active Low */
#define SERDES_IREG_FLD_RXCDRCALFOSC_LOCWREN_REG_NUM 87
#define SERDES_IREG_FLD_RXCDRCALFOSC_LOCWREN (1 << 1)
/* Over-write enable for RXEYEDIAGFSM_INITXVAL */
#define SERDES_IREG_FLD_RXEYEDIAGFSM_LOCWREN_REG_NUM 87
#define SERDES_IREG_FLD_RXEYEDIAGFSM_LOCWREN (1 << 2)
/* Over-write enable for CMNCLKGENMUXSEL_TXINTERNAL */
#define SERDES_IREG_FLD_RXTERMHIZ_LOCWREN_REG_NUM 87
#define SERDES_IREG_FLD_RXTERMHIZ_LOCWREN (1 << 3)
/* TXCALTCLKDUTY* override enable. Active Low */
#define SERDES_IREG_FLD_TXCALTCLKDUTY_LOCWREN_REG_NUM 87
#define SERDES_IREG_FLD_TXCALTCLKDUTY_LOCWREN (1 << 4)
/* Override TX_DRV through the internal registers (Active low) */
#define SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN_REG_NUM 87
#define SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN (1 << 5)
/*******************************************************************************
* Common lane register fields - PMA
******************************************************************************/
/*
* Common lane hard reset control
* 0 - Hard reset is taken from the interface pins
* 1 - Hard reset is taken from registers
*/
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_REG_NUM 2
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_MASK 0x01
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_VAL_IFACE 0x00
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_VAL_REGS 0x01
/*
* Common lane hard reset
* 0 - Hard reset is asserted
* 1 - Hard reset is de-asserted
*/
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_REG_NUM 2
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_MASK 0x02
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_VAL_ASSERT 0x00
#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_VAL_DEASSERT 0x02
/* Synth power state control */
#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_REG_NUM 3
#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_MASK 0x1f
#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_PD 0x01
#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P2 0x02
#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P1 0x04
#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P0S 0x08
#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P0 0x10
/* Transmit datapath FIFO enable (Active High) */
#define SERDES_IREG_FLD_CMNPCS_TXENABLE_REG_NUM 8
#define SERDES_IREG_FLD_CMNPCS_TXENABLE (1 << 2)
/*
* RX lost of signal detector enable
* - 0 - disable
* - 1 - enable
*/
#define SERDES_IREG_FLD_RXLOSDET_ENABLE_REG_NUM 13
#define SERDES_IREG_FLD_RXLOSDET_ENABLE AL_BIT(4)
/* Signal Detect Threshold Level */
#define SERDES_IREG_FLD_RXELECIDLE_SIGDETTHRESH_REG_NUM 15
#define SERDES_IREG_FLD_RXELECIDLE_SIGDETTHRESH_MASK AL_FIELD_MASK(2, 0)
/* LOS Detect Threshold Level */
#define SERDES_IREG_FLD_RXLOSDET_THRESH_REG_NUM 15
#define SERDES_IREG_FLD_RXLOSDET_THRESH_MASK AL_FIELD_MASK(4, 3)
#define SERDES_IREG_FLD_RXLOSDET_THRESH_SHIFT 3
#define SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_REG_NUM 30
#define SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_MASK 0x7f
#define SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_SHIFT 0
#define SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_REG_NUM 31
#define SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_MASK 0x7f
#define SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_SHIFT 0
#define SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_REG_NUM 32
#define SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_MASK 0xff
#define SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_SHIFT 0
#define SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_REG_NUM 33
#define SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_MASK 0x1
#define SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_SHIFT 0
#define SERDES_IREG_FLD_RXEQ_COARSE_STEP_REG_NUM 33
#define SERDES_IREG_FLD_RXEQ_COARSE_STEP_MASK 0x3e
#define SERDES_IREG_FLD_RXEQ_COARSE_STEP_SHIFT 1
#define SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_REG_NUM 34
#define SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_MASK 0xff
#define SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_SHIFT 0
#define SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_REG_NUM 35
#define SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_MASK 0x1
#define SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_SHIFT 0
#define SERDES_IREG_FLD_RXEQ_FINE_STEP_REG_NUM 35
#define SERDES_IREG_FLD_RXEQ_FINE_STEP_MASK 0x3e
#define SERDES_IREG_FLD_RXEQ_FINE_STEP_SHIFT 1
#define SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_REG_NUM 36
#define SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_MASK 0xff
#define SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_SHIFT 0
#define SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_REG_NUM 37
#define SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_MASK 0x7
#define SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_SHIFT 0
#define SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_REG_NUM 43
#define SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_MASK 0x7
#define SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_SHIFT 0
#define SERDES_IREG_FLD_TX_BIST_PAT_REG_NUM(byte_num) (56 + (byte_num))
#define SERDES_IREG_FLD_TX_BIST_PAT_NUM_BYTES 10
/*
* Selects the transmit BIST mode:
* 0 - Uses the 80-bit internal memory pattern (w/ OOB)
* 1 - Uses a 27 PRBS pattern
* 2 - Uses a 223 PRBS pattern
* 3 - Uses a 231 PRBS pattern
* 4 - Uses a 1010 clock pattern
* 5 and above - Reserved
*/
#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_REG_NUM 80
#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_MASK 0x07
#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_USER 0x00
#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS7 0x01
#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS23 0x02
#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS31 0x03
#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_CLK1010 0x04
/* Single-Bit error injection enable (on posedge) */
#define SERDES_IREG_FLD_TXBIST_BITERROR_EN_REG_NUM 80
#define SERDES_IREG_FLD_TXBIST_BITERROR_EN 0x20
/* CMNPCIEGEN3* override enable (Active Low) */
#define SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM 95
#define SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN (1 << 2)
/* CMNPCS* override enable (Active Low) */
#define SERDES_IREG_FLD_CMNPCS_LOCWREN_REG_NUM 95
#define SERDES_IREG_FLD_CMNPCS_LOCWREN (1 << 3)
/* CMNPCSBIST* override enable (Active Low) */
#define SERDES_IREG_FLD_CMNPCSBIST_LOCWREN_REG_NUM 95
#define SERDES_IREG_FLD_CMNPCSBIST_LOCWREN (1 << 4)
/* CMNPCSPSTATE* override enable (Active Low) */
#define SERDES_IREG_FLD_CMNPCSPSTATE_LOCWREN_REG_NUM 95
#define SERDES_IREG_FLD_CMNPCSPSTATE_LOCWREN (1 << 5)
/* PCS_EN* override enable (Active Low) */
#define SERDES_IREG_FLD_PCS_LOCWREN_REG_NUM 96
#define SERDES_IREG_FLD_PCS_LOCWREN (1 << 3)
/* Eye diagram sample count */
#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_REG_NUM 150
#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_MASK 0xff
#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_SHIFT 0
#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_REG_NUM 151
#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_MASK 0xff
#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_SHIFT 0
/* override control */
#define SERDES_IREG_FLD_RXLOCK2REF_LOCWREN_REG_NUM 230
#define SERDES_IREG_FLD_RXLOCK2REF_LOCWREN 1 << 0
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_REG_NUM 623
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_MASK 0xff
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_SHIFT 0
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_REG_NUM 624
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_MASK 0xff
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_SHIFT 0
/* X and Y coefficient return value */
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_X_Y_VALWEIGHT_REG_NUM 626
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALWEIGHT_MASK 0x0F
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALWEIGHT_SHIFT 0
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALWEIGHT_MASK 0xF0
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALWEIGHT_SHIFT 4
/* X coarse scan step */
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM 627
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_MASK 0x7F
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_SHIFT 0
/* X fine scan step */
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM 628
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_MASK 0x7F
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_SHIFT 0
/* Y coarse scan step */
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM 629
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_MASK 0x0F
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_SHIFT 0
/* Y fine scan step */
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM 630
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_MASK 0x0F
#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_SHIFT 0
#define SERDES_IREG_FLD_PPMDRIFTCOUNT1_REG_NUM 157
#define SERDES_IREG_FLD_PPMDRIFTCOUNT2_REG_NUM 158
#define SERDES_IREG_FLD_PPMDRIFTMAX1_REG_NUM 159
#define SERDES_IREG_FLD_PPMDRIFTMAX2_REG_NUM 160
#define SERDES_IREG_FLD_SYNTHPPMDRIFTMAX1_REG_NUM 163
#define SERDES_IREG_FLD_SYNTHPPMDRIFTMAX2_REG_NUM 164
/*******************************************************************************
* Common lane register fields - PCS
******************************************************************************/
#define SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_REG_NUM 3
#define SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_MASK AL_FIELD_MASK(5, 4)
#define SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_SHIFT 4
#define SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_ENA_REG_NUM 6
#define SERDES_IREG_FLD_PCS_VPCSIF_OVR_RATE_ENA AL_BIT(2)
#define SERDES_IREG_FLD_PCS_EBUF_FULL_D2R1_REG_NUM 18
#define SERDES_IREG_FLD_PCS_EBUF_FULL_D2R1_REG_MASK 0x1F
#define SERDES_IREG_FLD_PCS_EBUF_FULL_D2R1_REG_SHIFT 0
#define SERDES_IREG_FLD_PCS_EBUF_FULL_PCIE_G3_REG_NUM 19
#define SERDES_IREG_FLD_PCS_EBUF_FULL_PCIE_G3_REG_MASK 0x7C
#define SERDES_IREG_FLD_PCS_EBUF_FULL_PCIE_G3_REG_SHIFT 2
#define SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_D2R1_REG_NUM 20
#define SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_D2R1_REG_MASK 0x1F
#define SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_D2R1_REG_SHIFT 0
#define SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_PCIE_G3_REG_NUM 21
#define SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_PCIE_G3_REG_MASK 0x7C
#define SERDES_IREG_FLD_PCS_EBUF_RD_THRESHOLD_PCIE_G3_REG_SHIFT 2
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_ITER_NUM_REG_NUM 22
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_ITER_NUM_MASK 0x7f
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_ITER_NUM_SHIFT 0
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_ITER_NUM_REG_NUM 34
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_ITER_NUM_MASK 0x7f
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_ITER_NUM_SHIFT 0
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_RUN1_MASK_REG_NUM 23
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_RUN1_MASK_MASK 0xff
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_RUN1_MASK_SHIFT 0
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_RUN2_MASK_REG_NUM 22
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_RUN2_MASK_MASK 0x80
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_RUN2_MASK_SHIFT 7
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_STEP_REG_NUM 24
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_STEP_MASK 0x3e
#define SERDES_IREG_FLD_PCS_RXEQ_COARSE_STEP_SHIFT 1
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_RUN1_MASK_REG_NUM 35
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_RUN1_MASK_MASK 0xff
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_RUN1_MASK_SHIFT 0
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_RUN2_MASK_REG_NUM 34
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_RUN2_MASK_MASK 0x80
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_RUN2_MASK_SHIFT 7
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_STEP_REG_NUM 36
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_STEP_MASK 0x1f
#define SERDES_IREG_FLD_PCS_RXEQ_FINE_STEP_SHIFT 0
#define SERDES_IREG_FLD_PCS_RXEQ_LOOKUP_CODE_EN_REG_NUM 37
#define SERDES_IREG_FLD_PCS_RXEQ_LOOKUP_CODE_EN_MASK 0xff
#define SERDES_IREG_FLD_PCS_RXEQ_LOOKUP_CODE_EN_SHIFT 0
#define SERDES_IREG_FLD_PCS_RXEQ_LOOKUP_LASTCODE_REG_NUM 36
#define SERDES_IREG_FLD_PCS_RXEQ_LOOKUP_LASTCODE_MASK 0xe0
#define SERDES_IREG_FLD_PCS_RXEQ_LOOKUP_LASTCODE_SHIFT 5
#ifdef __cplusplus
}
#endif
#endif /* __AL_serdes_REG_H */

View File

@ -0,0 +1,494 @@
/*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @{
* @file al_hal_serdes_regs.h
*
* @brief ... registers
*
*/
#ifndef __AL_HAL_SERDES_REGS_H__
#define __AL_HAL_SERDES_REGS_H__
#include "al_hal_plat_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* Unit Registers
*/
struct serdes_gen {
/* [0x0] SerDes Registers Version */
uint32_t version;
uint32_t rsrvd_0[3];
/* [0x10] SerDes register file address */
uint32_t reg_addr;
/* [0x14] SerDes register file data */
uint32_t reg_data;
uint32_t rsrvd_1[2];
/* [0x20] SerDes control */
uint32_t ictl_multi_bist;
/* [0x24] SerDes control */
uint32_t ictl_pcs;
/* [0x28] SerDes control */
uint32_t ictl_pma;
uint32_t rsrvd_2;
/* [0x30] SerDes control */
uint32_t ipd_multi_synth;
/* [0x34] SerDes control */
uint32_t irst;
/* [0x38] SerDes control */
uint32_t octl_multi_synthready;
/* [0x3c] SerDes control */
uint32_t octl_multi_synthstatus;
/* [0x40] SerDes control */
uint32_t clk_out;
uint32_t rsrvd[47];
};
struct serdes_lane {
uint32_t rsrvd1[4];
/* [0x10] SerDes status */
uint32_t octl_pma;
/* [0x14] SerDes control */
uint32_t ictl_multi_andme;
/* [0x18] SerDes control */
uint32_t ictl_multi_lb;
/* [0x1c] SerDes control */
uint32_t ictl_multi_rxbist;
/* [0x20] SerDes control */
uint32_t ictl_multi_txbist;
/* [0x24] SerDes control */
uint32_t ictl_multi;
/* [0x28] SerDes control */
uint32_t ictl_multi_rxeq;
/* [0x2c] SerDes control */
uint32_t ictl_multi_rxeq_l_low;
/* [0x30] SerDes control */
uint32_t ictl_multi_rxeq_l_high;
/* [0x34] SerDes control */
uint32_t ictl_multi_rxeyediag;
/* [0x38] SerDes control */
uint32_t ictl_multi_txdeemph;
/* [0x3c] SerDes control */
uint32_t ictl_multi_txmargin;
/* [0x40] SerDes control */
uint32_t ictl_multi_txswing;
/* [0x44] SerDes control */
uint32_t idat_multi;
/* [0x48] SerDes control */
uint32_t ipd_multi;
/* [0x4c] SerDes control */
uint32_t octl_multi_rxbist;
/* [0x50] SerDes control */
uint32_t octl_multi;
/* [0x54] SerDes control */
uint32_t octl_multi_rxeyediag;
/* [0x58] SerDes control */
uint32_t odat_multi_rxbist;
/* [0x5c] SerDes control */
uint32_t odat_multi_rxeq;
/* [0x60] SerDes control */
uint32_t multi_rx_dvalid;
/* [0x64] SerDes control */
uint32_t reserved;
uint32_t rsrvd[6];
};
struct al_serdes_regs {
uint32_t rsrvd_0[64];
struct serdes_gen gen; /* [0x100] */
struct serdes_lane lane[4]; /* [0x200] */
};
/*
* Registers Fields
*/
/**** version register ****/
/* Revision number (Minor) */
#define SERDES_GEN_VERSION_RELEASE_NUM_MINOR_MASK 0x000000FF
#define SERDES_GEN_VERSION_RELEASE_NUM_MINOR_SHIFT 0
/* Revision number (Major) */
#define SERDES_GEN_VERSION_RELEASE_NUM_MAJOR_MASK 0x0000FF00
#define SERDES_GEN_VERSION_RELEASE_NUM_MAJOR_SHIFT 8
/* Date of release */
#define SERDES_GEN_VERSION_DATE_DAY_MASK 0x001F0000
#define SERDES_GEN_VERSION_DATE_DAY_SHIFT 16
/* Month of release */
#define SERDES_GEN_VERSION_DATA_MONTH_MASK 0x01E00000
#define SERDES_GEN_VERSION_DATA_MONTH_SHIFT 21
/* Year of release (starting from 2000) */
#define SERDES_GEN_VERSION_DATE_YEAR_MASK 0x3E000000
#define SERDES_GEN_VERSION_DATE_YEAR_SHIFT 25
/* Reserved */
#define SERDES_GEN_VERSION_RESERVED_MASK 0xC0000000
#define SERDES_GEN_VERSION_RESERVED_SHIFT 30
/**** reg_addr register ****/
/* Address value */
#define SERDES_GEN_REG_ADDR_VAL_MASK 0x0000FFFF
#define SERDES_GEN_REG_ADDR_VAL_SHIFT 0
/**** reg_data register ****/
/* Data value */
#define SERDES_GEN_REG_DATA_VAL_MASK 0x000000FF
#define SERDES_GEN_REG_DATA_VAL_SHIFT 0
/**** ICTL_MULTI_BIST register ****/
#define SERDES_GEN_ICTL_MULTI_BIST_MODESEL_NT_MASK 0x00000007
#define SERDES_GEN_ICTL_MULTI_BIST_MODESEL_NT_SHIFT 0
/**** ICTL_PCS register ****/
#define SERDES_GEN_ICTL_PCS_EN_NT (1 << 0)
/**** ICTL_PMA register ****/
#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_MASK 0x00000007
#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT 0
#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_REF \
(0 << (SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT))
#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_R2L \
(3 << (SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT))
#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_L2R \
(4 << (SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT))
#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_MASK 0x00000070
#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT 4
#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_0 \
(0 << (SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT))
#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_REF \
(2 << (SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT))
#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_R2L \
(3 << (SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT))
#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_MASK 0x00000700
#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT 8
#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_0 \
(0 << (SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT))
#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_REF \
(2 << (SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT))
#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_L2R \
(3 << (SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT))
#define SERDES_GEN_ICTL_PMA_TXENABLE_A_SRC (1 << 11)
#define SERDES_GEN_ICTL_PMA_TXENABLE_A_SRC_THIS (0 << 11)
#define SERDES_GEN_ICTL_PMA_TXENABLE_A_SRC_MASTER (1 << 11)
#define SERDES_GEN_ICTL_PMA_TXENABLE_A (1 << 12)
#define SERDES_GEN_ICTL_PMA_SYNTHCKBYPASSEN_NT (1 << 13)
/**** IPD_MULTI_SYNTH register ****/
#define SERDES_GEN_IPD_MULTI_SYNTH_B (1 << 0)
/**** IRST register ****/
#define SERDES_GEN_IRST_PIPE_RST_L3_B_A (1 << 0)
#define SERDES_GEN_IRST_PIPE_RST_L2_B_A (1 << 1)
#define SERDES_GEN_IRST_PIPE_RST_L1_B_A (1 << 2)
#define SERDES_GEN_IRST_PIPE_RST_L0_B_A (1 << 3)
#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L3_B_A (1 << 4)
#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L2_B_A (1 << 5)
#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L1_B_A (1 << 6)
#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L0_B_A (1 << 7)
#define SERDES_GEN_IRST_MULTI_HARD_SYNTH_B_A (1 << 8)
#define SERDES_GEN_IRST_POR_B_A (1 << 12)
#define SERDES_GEN_IRST_PIPE_RST_L3_B_A_SEL (1 << 16)
#define SERDES_GEN_IRST_PIPE_RST_L2_B_A_SEL (1 << 17)
#define SERDES_GEN_IRST_PIPE_RST_L1_B_A_SEL (1 << 18)
#define SERDES_GEN_IRST_PIPE_RST_L0_B_A_SEL (1 << 19)
#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L3_B_A_SEL (1 << 20)
#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L2_B_A_SEL (1 << 21)
#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L1_B_A_SEL (1 << 22)
#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L0_B_A_SEL (1 << 23)
/**** OCTL_MULTI_SYNTHREADY register ****/
#define SERDES_GEN_OCTL_MULTI_SYNTHREADY_A (1 << 0)
/**** OCTL_MULTI_SYNTHSTATUS register ****/
#define SERDES_GEN_OCTL_MULTI_SYNTHSTATUS_A (1 << 0)
/**** clk_out register ****/
#define SERDES_GEN_CLK_OUT_SEL_MASK 0x0000003F
#define SERDES_GEN_CLK_OUT_SEL_SHIFT 0
/**** OCTL_PMA register ****/
#define SERDES_LANE_OCTL_PMA_TXSTATUS_L_A (1 << 0)
/**** ICTL_MULTI_ANDME register ****/
#define SERDES_LANE_ICTL_MULTI_ANDME_EN_L_A (1 << 0)
#define SERDES_LANE_ICTL_MULTI_ANDME_EN_L_A_SEL (1 << 1)
/**** ICTL_MULTI_LB register ****/
#define SERDES_LANE_ICTL_MULTI_LB_TX2RXIOTIMEDEN_L_NT (1 << 0)
#define SERDES_LANE_ICTL_MULTI_LB_TX2RXBUFTIMEDEN_L_NT (1 << 1)
#define SERDES_LANE_ICTL_MULTI_LB_RX2TXUNTIMEDEN_L_NT (1 << 2)
#define SERDES_LANE_ICTL_MULTI_LB_PARRX2TXTIMEDEN_L_NT (1 << 3)
#define SERDES_LANE_ICTL_MULTI_LB_CDRCLK2TXEN_L_NT (1 << 4)
#define SERDES_LANE_ICTL_MULTI_LB_TX2RXBUFTIMEDEN_L_NT_SEL (1 << 8)
#define SERDES_LANE_ICTL_MULTI_LB_RX2TXUNTIMEDEN_L_NT_SEL (1 << 9)
/**** ICTL_MULTI_RXBIST register ****/
#define SERDES_LANE_ICTL_MULTI_RXBIST_EN_L_A (1 << 0)
/**** ICTL_MULTI_TXBIST register ****/
#define SERDES_LANE_ICTL_MULTI_TXBIST_EN_L_A (1 << 0)
/**** ICTL_MULTI register ****/
#define SERDES_LANE_ICTL_MULTI_PSTATE_L_MASK 0x00000003
#define SERDES_LANE_ICTL_MULTI_PSTATE_L_SHIFT 0
#define SERDES_LANE_ICTL_MULTI_PSTATE_L_SEL (1 << 2)
#define SERDES_LANE_ICTL_MULTI_RXDATAWIDTH_L_MASK 0x00000070
#define SERDES_LANE_ICTL_MULTI_RXDATAWIDTH_L_SHIFT 4
#define SERDES_LANE_ICTL_MULTI_RXOVRCDRLOCK2DATAEN_L_A (1 << 8)
#define SERDES_LANE_ICTL_MULTI_RXOVRCDRLOCK2DATA_L_A (1 << 9)
#define SERDES_LANE_ICTL_MULTI_TXBEACON_L_A (1 << 12)
#define SERDES_LANE_ICTL_MULTI_TXDETECTRXREQ_L_A (1 << 13)
#define SERDES_LANE_ICTL_MULTI_RXRATE_L_MASK 0x00070000
#define SERDES_LANE_ICTL_MULTI_RXRATE_L_SHIFT 16
#define SERDES_LANE_ICTL_MULTI_RXRATE_L_SEL (1 << 19)
#define SERDES_LANE_ICTL_MULTI_TXRATE_L_MASK 0x00700000
#define SERDES_LANE_ICTL_MULTI_TXRATE_L_SHIFT 20
#define SERDES_LANE_ICTL_MULTI_TXRATE_L_SEL (1 << 23)
#define SERDES_LANE_ICTL_MULTI_TXAMP_L_MASK 0x07000000
#define SERDES_LANE_ICTL_MULTI_TXAMP_L_SHIFT 24
#define SERDES_LANE_ICTL_MULTI_TXAMP_EN_L (1 << 27)
#define SERDES_LANE_ICTL_MULTI_TXDATAWIDTH_L_MASK 0x70000000
#define SERDES_LANE_ICTL_MULTI_TXDATAWIDTH_L_SHIFT 28
/**** ICTL_MULTI_RXEQ register ****/
#define SERDES_LANE_ICTL_MULTI_RXEQ_EN_L (1 << 0)
#define SERDES_LANE_ICTL_MULTI_RXEQ_START_L_A (1 << 1)
#define SERDES_LANE_ICTL_MULTI_RXEQ_PRECAL_CODE_SEL_MASK 0x00000070
#define SERDES_LANE_ICTL_MULTI_RXEQ_PRECAL_CODE_SEL_SHIFT 4
/**** ICTL_MULTI_RXEQ_L_high register ****/
#define SERDES_LANE_ICTL_MULTI_RXEQ_L_HIGH_VAL (1 << 0)
/**** ICTL_MULTI_RXEYEDIAG register ****/
#define SERDES_LANE_ICTL_MULTI_RXEYEDIAG_START_L_A (1 << 0)
/**** ICTL_MULTI_TXDEEMPH register ****/
#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_L_MASK 0x0003FFFF
#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_L_SHIFT 0
#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_ZERO_MASK 0x7c0
#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_ZERO_SHIFT 6
#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_PLUS_MASK 0xf000
#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_PLUS_SHIFT 12
#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_MINUS_MASK 0x7
#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_MINUS_SHIFT 0
/**** ICTL_MULTI_TXMARGIN register ****/
#define SERDES_LANE_ICTL_MULTI_TXMARGIN_L_MASK 0x00000007
#define SERDES_LANE_ICTL_MULTI_TXMARGIN_L_SHIFT 0
/**** ICTL_MULTI_TXSWING register ****/
#define SERDES_LANE_ICTL_MULTI_TXSWING_L (1 << 0)
/**** IDAT_MULTI register ****/
#define SERDES_LANE_IDAT_MULTI_TXELECIDLE_L_MASK 0x0000000F
#define SERDES_LANE_IDAT_MULTI_TXELECIDLE_L_SHIFT 0
#define SERDES_LANE_IDAT_MULTI_TXELECIDLE_L_SEL (1 << 4)
/**** IPD_MULTI register ****/
#define SERDES_LANE_IPD_MULTI_TX_L_B (1 << 0)
#define SERDES_LANE_IPD_MULTI_RX_L_B (1 << 1)
/**** OCTL_MULTI_RXBIST register ****/
#define SERDES_LANE_OCTL_MULTI_RXBIST_DONE_L_A (1 << 0)
#define SERDES_LANE_OCTL_MULTI_RXBIST_RXLOCKED_L_A (1 << 1)
/**** OCTL_MULTI register ****/
#define SERDES_LANE_OCTL_MULTI_RXCDRLOCK2DATA_L_A (1 << 0)
#define SERDES_LANE_OCTL_MULTI_RXEQ_DONE_L_A (1 << 1)
#define SERDES_LANE_OCTL_MULTI_RXREADY_L_A (1 << 2)
#define SERDES_LANE_OCTL_MULTI_RXSTATUS_L_A (1 << 3)
#define SERDES_LANE_OCTL_MULTI_TXREADY_L_A (1 << 4)
#define SERDES_LANE_OCTL_MULTI_TXDETECTRXSTAT_L_A (1 << 5)
#define SERDES_LANE_OCTL_MULTI_TXDETECTRXACK_L_A (1 << 6)
#define SERDES_LANE_OCTL_MULTI_RXSIGNALDETECT_L_A (1 << 7)
/**** OCTL_MULTI_RXEYEDIAG register ****/
#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_STAT_L_A_MASK 0x00003FFF
#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_STAT_L_A_SHIFT 0
#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_DONE_L_A (1 << 16)
#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_ERR_L_A (1 << 17)
/**** ODAT_MULTI_RXBIST register ****/
#define SERDES_LANE_ODAT_MULTI_RXBIST_ERRCOUNT_L_A_MASK 0x0000FFFF
#define SERDES_LANE_ODAT_MULTI_RXBIST_ERRCOUNT_L_A_SHIFT 0
#define SERDES_LANE_ODAT_MULTI_RXBIST_ERRCOUNT_OVERFLOW_L_A (1 << 16)
/**** ODAT_MULTI_RXEQ register ****/
#define SERDES_LANE_ODAT_MULTI_RXEQ_BEST_EYE_VAL_L_A_MASK 0x00003FFF
#define SERDES_LANE_ODAT_MULTI_RXEQ_BEST_EYE_VAL_L_A_SHIFT 0
/**** MULTI_RX_DVALID register ****/
#define SERDES_LANE_MULTI_RX_DVALID_MASK_CDR_LOCK (1 << 0)
#define SERDES_LANE_MULTI_RX_DVALID_MASK_SIGNALDETECT (1 << 1)
#define SERDES_LANE_MULTI_RX_DVALID_MASK_TX_READY (1 << 2)
#define SERDES_LANE_MULTI_RX_DVALID_MASK_RX_READY (1 << 3)
#define SERDES_LANE_MULTI_RX_DVALID_MASK_SYNT_READY (1 << 4)
#define SERDES_LANE_MULTI_RX_DVALID_MASK_RX_ELECIDLE (1 << 5)
#define SERDES_LANE_MULTI_RX_DVALID_MUX_SEL_MASK 0x00FF0000
#define SERDES_LANE_MULTI_RX_DVALID_MUX_SEL_SHIFT 16
#define SERDES_LANE_MULTI_RX_DVALID_PS_00_SEL (1 << 24)
#define SERDES_LANE_MULTI_RX_DVALID_PS_00_VAL (1 << 25)
#define SERDES_LANE_MULTI_RX_DVALID_PS_01_SEL (1 << 26)
#define SERDES_LANE_MULTI_RX_DVALID_PS_01_VAL (1 << 27)
#define SERDES_LANE_MULTI_RX_DVALID_PS_10_SEL (1 << 28)
#define SERDES_LANE_MULTI_RX_DVALID_PS_10_VAL (1 << 29)
#define SERDES_LANE_MULTI_RX_DVALID_PS_11_SEL (1 << 30)
#define SERDES_LANE_MULTI_RX_DVALID_PS_11_VAL (1 << 31)
/**** reserved register ****/
#define SERDES_LANE_RESERVED_OUT_MASK 0x000000FF
#define SERDES_LANE_RESERVED_OUT_SHIFT 0
#define SERDES_LANE_RESERVED_IN_MASK 0x00FF0000
#define SERDES_LANE_RESERVED_IN_SHIFT 16
#ifdef __cplusplus
}
#endif
#endif /* __AL_HAL_serdes_REGS_H__ */
/** @} end of ... group */

View File

@ -0,0 +1,875 @@
/*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/**
* @defgroup group_serdes_api API
* SerDes HAL driver API
* @ingroup group_serdes SerDes
* @{
*
* @file al_hal_serdes_interface.h
*
* @brief Header file for the SerDes HAL driver
*
*/
#ifndef __AL_HAL_SERDES_INTERFACE_H__
#define __AL_HAL_SERDES_INTERFACE_H__
#include "al_hal_common.h"
/* *INDENT-OFF* */
#ifdef __cplusplus
extern "C" {
#endif
/* *INDENT-ON* */
enum al_serdes_type {
AL_SRDS_TYPE_HSSP,
AL_SRDS_TYPE_25G,
};
enum al_serdes_reg_page {
/* Relevant to Serdes hssp and 25g */
AL_SRDS_REG_PAGE_0_LANE_0 = 0,
AL_SRDS_REG_PAGE_1_LANE_1,
/* Relevant to Serdes hssp only */
AL_SRDS_REG_PAGE_2_LANE_2,
AL_SRDS_REG_PAGE_3_LANE_3,
/* Relevant to Serdes hssp and 25g */
AL_SRDS_REG_PAGE_4_COMMON,
/* Relevant to Serdes hssp only */
AL_SRDS_REG_PAGE_0123_LANES_0123 = 7,
/* Relevant to Serdes 25g only */
AL_SRDS_REG_PAGE_TOP,
};
/* Relevant to Serdes hssp only */
enum al_serdes_reg_type {
AL_SRDS_REG_TYPE_PMA = 0,
AL_SRDS_REG_TYPE_PCS,
};
enum al_serdes_lane {
AL_SRDS_LANE_0 = AL_SRDS_REG_PAGE_0_LANE_0,
AL_SRDS_LANE_1 = AL_SRDS_REG_PAGE_1_LANE_1,
AL_SRDS_LANE_2 = AL_SRDS_REG_PAGE_2_LANE_2,
AL_SRDS_LANE_3 = AL_SRDS_REG_PAGE_3_LANE_3,
AL_SRDS_NUM_LANES,
AL_SRDS_LANES_0123 = AL_SRDS_REG_PAGE_0123_LANES_0123,
};
/** Serdes loopback mode */
enum al_serdes_lb_mode {
/** No loopback */
AL_SRDS_LB_MODE_OFF,
/**
* Transmits the untimed, partial equalized RX signal out the transmit
* IO pins.
* No clock used (untimed)
*/
AL_SRDS_LB_MODE_PMA_IO_UN_TIMED_RX_TO_TX,
/**
* Loops back the TX serializer output into the CDR.
* CDR recovered bit clock used (without attenuation)
*/
AL_SRDS_LB_MODE_PMA_INTERNALLY_BUFFERED_SERIAL_TX_TO_RX,
/**
* Loops back the TX driver IO signal to the RX IO pins
* CDR recovered bit clock used (only through IO)
*/
AL_SRDS_LB_MODE_PMA_SERIAL_TX_IO_TO_RX_IO,
/**
* Parallel loopback from the PMA receive lane data ports, to the
* transmit lane data ports
* CDR recovered bit clock used
*/
AL_SRDS_LB_MODE_PMA_PARALLEL_RX_TO_TX,
/** Loops received data after elastic buffer to transmit path */
AL_SRDS_LB_MODE_PCS_PIPE,
/** Loops TX data (to PMA) to RX path (instead of PMA data) */
AL_SRDS_LB_MODE_PCS_NEAR_END,
/** Loops receive data prior to interface block to transmit path */
AL_SRDS_LB_MODE_PCS_FAR_END,
};
enum al_serdes_clk_freq {
AL_SRDS_CLK_FREQ_NA,
AL_SRDS_CLK_FREQ_100_MHZ,
AL_SRDS_CLK_FREQ_125_MHZ,
AL_SRDS_CLK_FREQ_156_MHZ,
};
enum al_serdes_clk_src {
AL_SRDS_CLK_SRC_LOGIC_0,
AL_SRDS_CLK_SRC_REF_PINS,
AL_SRDS_CLK_SRC_R2L,
AL_SRDS_CLK_SRC_R2L_PLL,
AL_SRDS_CLK_SRC_L2R,
};
/** Serdes BIST pattern */
enum al_serdes_bist_pattern {
AL_SRDS_BIST_PATTERN_USER,
AL_SRDS_BIST_PATTERN_PRBS7,
AL_SRDS_BIST_PATTERN_PRBS23,
AL_SRDS_BIST_PATTERN_PRBS31,
AL_SRDS_BIST_PATTERN_CLK1010,
};
/** SerDes group rate */
enum al_serdes_rate {
AL_SRDS_RATE_1_8,
AL_SRDS_RATE_1_4,
AL_SRDS_RATE_1_2,
AL_SRDS_RATE_FULL,
};
/** SerDes power mode */
enum al_serdes_pm {
AL_SRDS_PM_PD,
AL_SRDS_PM_P2,
AL_SRDS_PM_P1,
AL_SRDS_PM_P0S,
AL_SRDS_PM_P0,
};
/**
* Tx de-emphasis parameters
*/
enum al_serdes_tx_deemph_param {
AL_SERDES_TX_DEEMP_C_ZERO, /*< c(0) */
AL_SERDES_TX_DEEMP_C_PLUS, /*< c(1) */
AL_SERDES_TX_DEEMP_C_MINUS, /*< c(-1) */
};
struct al_serdes_adv_tx_params {
/*
* select the input values location.
* When set to true the values will be taken from the internal registers
* that will be override with the next following parameters.
* When set to false the values will be taken from external pins (the
* other parameters in this case is not needed)
*/
al_bool override;
/*
* Transmit Amplitude control signal. Used to define the full-scale
* maximum swing of the driver.
* 000 - Not Supported
* 001 - 952mVdiff-pkpk
* 010 - 1024mVdiff-pkpk
* 011 - 1094mVdiff-pkpk
* 100 - 1163mVdiff-pkpk
* 101 - 1227mVdiff-pkpk
* 110 - 1283mVdiff-pkpk
* 111 - 1331mVdiff-pkpk
*/
uint8_t amp;
/* Defines the total number of driver units allocated in the driver */
uint8_t total_driver_units;
/* Defines the total number of driver units allocated to the
* first post-cursor (C+1) tap. */
uint8_t c_plus_1;
/* Defines the total number of driver units allocated to the
* second post-cursor (C+2) tap. */
uint8_t c_plus_2;
/* Defines the total number of driver units allocated to the
* first pre-cursor (C-1) tap. */
uint8_t c_minus_1;
/* TX driver Slew Rate control:
* 00 - 31ps
* 01 - 33ps
* 10 - 68ps
* 11 - 170ps
*/
uint8_t slew_rate;
};
struct al_serdes_adv_rx_params {
/*
* select the input values location.
* When set to true the values will be taken from the internal registers
* that will be override with the next following parameters.
* When set to false the values will be taken based in the equalization
* results (the other parameters in this case is not needed)
*/
al_bool override;
/* RX agc high frequency dc gain:
* -3'b000: -3dB
* -3'b001: -2.5dB
* -3'b010: -2dB
* -3'b011: -1.5dB
* -3'b100: -1dB
* -3'b101: -0.5dB
* -3'b110: -0dB
* -3'b111: 0.5dB
*/
uint8_t dcgain;
/* DFE post-shaping tap 3dB frequency
* -3'b000: 684MHz
* -3'b001: 576MHz
* -3'b010: 514MHz
* -3'b011: 435MHz
* -3'b100: 354MHz
* -3'b101: 281MHz
* -3'b110: 199MHz
* -3'b111: 125MHz
*/
uint8_t dfe_3db_freq;
/* DFE post-shaping tap gain
* 0: no pulse shaping tap
* 1: -24mVpeak
* 2: -45mVpeak
* 3: -64mVpeak
* 4: -80mVpeak
* 5: -93mVpeak
* 6: -101mVpeak
* 7: -105mVpeak
*/
uint8_t dfe_gain;
/* DFE first tap gain control
* -4'b0000: +1mVpeak
* -4'b0001: +10mVpeak
* ....
* -4'b0110: +55mVpeak
* -4'b0111: +64mVpeak
* -4'b1000: -1mVpeak
* -4'b1001: -10mVpeak
* ....
* -4'b1110: -55mVpeak
* -4'b1111: -64mVpeak
*/
uint8_t dfe_first_tap_ctrl;
/* DFE second tap gain control
* -4'b0000: +0mVpeak
* -4'b0001: +9mVpeak
* ....
* -4'b0110: +46mVpeak
* -4'b0111: +53mVpeak
* -4'b1000: -0mVpeak
* -4'b1001: -9mVpeak
* ....
* -4'b1110: -46mVpeak
* -4'b1111: -53mVpeak
*/
uint8_t dfe_secound_tap_ctrl;
/* DFE third tap gain control
* -4'b0000: +0mVpeak
* -4'b0001: +7mVpeak
* ....
* -4'b0110: +38mVpeak
* -4'b0111: +44mVpeak
* -4'b1000: -0mVpeak
* -4'b1001: -7mVpeak
* ....
* -4'b1110: -38mVpeak
* -4'b1111: -44mVpeak
*/
uint8_t dfe_third_tap_ctrl;
/* DFE fourth tap gain control
* -4'b0000: +0mVpeak
* -4'b0001: +6mVpeak
* ....
* -4'b0110: +29mVpeak
* -4'b0111: +33mVpeak
* -4'b1000: -0mVpeak
* -4'b1001: -6mVpeak
* ....
* -4'b1110: -29mVpeak
* -4'b1111: -33mVpeak
*/
uint8_t dfe_fourth_tap_ctrl;
/* Low frequency agc gain (att) select
* -3'b000: Disconnected
* -3'b001: -18.5dB
* -3'b010: -12.5dB
* -3'b011: -9dB
* -3'b100: -6.5dB
* -3'b101: -4.5dB
* -3'b110: -2.9dB
* -3'b111: -1.6dB
*/
uint8_t low_freq_agc_gain;
/* Provides a RX Equalizer pre-hint, prior to beginning
* adaptive equalization */
uint8_t precal_code_sel;
/* High frequency agc boost control
* Min d0: Boost ~4dB
* Max d31: Boost ~20dB
*/
uint8_t high_freq_agc_boost;
};
struct al_serdes_25g_adv_rx_params {
/* ATT (PLE Flat-Band Gain) */
uint8_t att;
/* APG (CTLE's Flat-Band Gain) */
uint8_t apg;
/* LFG (Low-Freq Gain) */
uint8_t lfg;
/* HFG (High-Freq Gain) */
uint8_t hfg;
/* MBG (MidBand-Freq-knob Gain) */
uint8_t mbg;
/* MBF (MidBand-Freq-knob Frequency position Gain) */
uint8_t mbf;
/* DFE Tap1 even#0 Value */
int8_t dfe_first_tap_even0_ctrl;
/* DFE Tap1 even#1 Value */
int8_t dfe_first_tap_even1_ctrl;
/* DFE Tap1 odd#0 Value */
int8_t dfe_first_tap_odd0_ctrl;
/* DFE Tap1 odd#1 Value */
int8_t dfe_first_tap_odd1_ctrl;
/* DFE Tap2 Value */
int8_t dfe_second_tap_ctrl;
/* DFE Tap3 Value */
int8_t dfe_third_tap_ctrl;
/* DFE Tap4 Value */
int8_t dfe_fourth_tap_ctrl;
/* DFE Tap5 Value */
int8_t dfe_fifth_tap_ctrl;
};
struct al_serdes_25g_tx_diag_info {
uint8_t regulated_supply;
int8_t dcd_trim;
uint8_t clk_delay;
uint8_t calp_multiplied_by_2;
uint8_t caln_multiplied_by_2;
};
struct al_serdes_25g_rx_diag_info {
int8_t los_offset;
int8_t agc_offset;
int8_t leq_gainstage_offset;
int8_t leq_eq1_offset;
int8_t leq_eq2_offset;
int8_t leq_eq3_offset;
int8_t leq_eq4_offset;
int8_t leq_eq5_offset;
int8_t summer_even_offset;
int8_t summer_odd_offset;
int8_t vscan_even_offset;
int8_t vscan_odd_offset;
int8_t data_slicer_even0_offset;
int8_t data_slicer_even1_offset;
int8_t data_slicer_odd0_offset;
int8_t data_slicer_odd1_offset;
int8_t edge_slicer_even_offset;
int8_t edge_slicer_odd_offset;
int8_t eye_slicer_even_offset;
int8_t eye_slicer_odd_offset;
uint8_t cdr_clk_i;
uint8_t cdr_clk_q;
uint8_t cdr_dll;
uint8_t cdr_vco_dosc;
uint8_t cdr_vco_fr;
uint16_t cdr_dlpf;
uint8_t ple_resistance;
uint8_t rx_term_mode;
uint8_t rx_coupling;
uint8_t rx_term_cal_code;
uint8_t rx_sheet_res_cal_code;
};
/**
* SRIS parameters
*/
struct al_serdes_sris_params {
/* Controls the frequency accuracy threshold (ppm) for lock detection CDR */
uint16_t ppm_drift_count;
/* Controls the frequency accuracy threshold (ppm) for lock detection in the CDR */
uint16_t ppm_drift_max;
/* Controls the frequency accuracy threshold (ppm) for lock detection in PLL */
uint16_t synth_ppm_drift_max;
/* Elastic buffer full threshold for PCIE modes: GEN1/GEN2 */
uint8_t full_d2r1;
/* Elastic buffer full threshold for PCIE modes: GEN3 */
uint8_t full_pcie_g3;
/* Elastic buffer midpoint threshold.
* Sets the depth of the buffer while in PCIE mode, GEN1/GEN2
*/
uint8_t rd_threshold_d2r1;
/* Elastic buffer midpoint threshold.
* Sets the depth of the buffer while in PCIE mode, GEN3
*/
uint8_t rd_threshold_pcie_g3;
};
/** SerDes PCIe Rate - values are important for proper behavior */
enum al_serdes_pcie_rate {
AL_SRDS_PCIE_RATE_GEN1 = 0,
AL_SRDS_PCIE_RATE_GEN2,
AL_SRDS_PCIE_RATE_GEN3,
};
struct al_serdes_grp_obj {
void __iomem *regs_base;
/**
* get the type of the serdes.
* Must be implemented for all SerDes unit.
*
* @return the serdes type.
*/
enum al_serdes_type (*type_get)(void);
/**
* Reads a SERDES internal register
*
* @param obj The object context
* @param page The SERDES register page within the group
* @param type The SERDES register type (PMA /PCS)
* @param offset The SERDES register offset (0 - 4095)
* @param data The read data
*
* @return 0 if no error found.
*/
int (*reg_read)(struct al_serdes_grp_obj *, enum al_serdes_reg_page,
enum al_serdes_reg_type, uint16_t, uint8_t *);
/**
* Writes a SERDES internal register
*
* @param obj The object context
* @param page The SERDES register page within the group
* @param type The SERDES register type (PMA /PCS)
* @param offset The SERDES register offset (0 - 4095)
* @param data The data to write
*
* @return 0 if no error found.
*/
int (*reg_write)(struct al_serdes_grp_obj *, enum al_serdes_reg_page,
enum al_serdes_reg_type, uint16_t, uint8_t);
/**
* Enable BIST required overrides
*
* @param obj The object context
* @param grp The SERDES group
* @param rate The required speed rate
*/
void (*bist_overrides_enable)(struct al_serdes_grp_obj *, enum al_serdes_rate);
/**
* Disable BIST required overrides
*
* @param obj The object context
* @param grp The SERDES group
* @param rate The required speed rate
*/
void (*bist_overrides_disable)(struct al_serdes_grp_obj *);
/**
* Rx rate change
*
* @param obj The object context
* @param grp The SERDES group
* @param rate The Rx required rate
*/
void (*rx_rate_change)(struct al_serdes_grp_obj *, enum al_serdes_rate);
/**
* SERDES lane Rx rate change software flow enable
*
* @param obj The object context
* @param lane The SERDES lane within the group
*/
void (*rx_rate_change_sw_flow_en)(struct al_serdes_grp_obj *, enum al_serdes_lane);
/**
* SERDES lane Rx rate change software flow disable
*
* @param obj The object context
* @param lane The SERDES lane within the group
*/
void (*rx_rate_change_sw_flow_dis)(struct al_serdes_grp_obj *, enum al_serdes_lane);
/**
* PCIe lane rate override check
*
* @param obj The object context
* @param grp The SERDES group
* @param lane The SERDES lane within the group
*
* @returns AL_TRUE if the override is enabled
*/
al_bool (*pcie_rate_override_is_enabled)(struct al_serdes_grp_obj *, enum al_serdes_lane);
/**
* PCIe lane rate override control
*
* @param obj The object context
* @param lane The SERDES lane within the group
* @param en Enable/disable
*/
void (*pcie_rate_override_enable_set)(struct al_serdes_grp_obj *, enum al_serdes_lane,
al_bool en);
/**
* PCIe lane rate get
*
* @param obj The object context
* @param lane The SERDES lane within the group
*/
enum al_serdes_pcie_rate (*pcie_rate_get)(struct al_serdes_grp_obj *, enum al_serdes_lane);
/**
* PCIe lane rate set
*
* @param obj The object context
* @param lane The SERDES lane within the group
* @param rate The required rate
*/
void (*pcie_rate_set)(struct al_serdes_grp_obj *, enum al_serdes_lane,
enum al_serdes_pcie_rate rate);
/**
* SERDES group power mode control
*
* @param obj The object context
* @param grp The SERDES group
* @param pm The required power mode
*/
void (*group_pm_set)(struct al_serdes_grp_obj *, enum al_serdes_pm);
/**
* SERDES lane power mode control
*
* @param obj The object context
* @param grp The SERDES group
* @param lane The SERDES lane within the group
* @param rx_pm The required RX power mode
* @param tx_pm The required TX power mode
*/
void (*lane_pm_set)(struct al_serdes_grp_obj *, enum al_serdes_lane,
enum al_serdes_pm, enum al_serdes_pm);
/**
* SERDES group PMA hard reset
* Controls Serdes group PMA hard reset
*
* @param obj The object context
* @param grp The SERDES group
* @param enable Enable/disable hard reset
*/
void (*pma_hard_reset_group)(struct al_serdes_grp_obj *, al_bool);
/**
* SERDES lane PMA hard reset
* Controls Serdes lane PMA hard reset
*
* @param obj The object context
* @param lane The SERDES lane within the group
* @param enable Enable/disable hard reset
*/
void (*pma_hard_reset_lane)(struct al_serdes_grp_obj *, enum al_serdes_lane, al_bool);
/**
* Configure SERDES loopback
* Controls the loopback
*
* @param obj The object context
* @param lane The SERDES lane within the group
* @param mode The requested loopback mode
*/
void (*loopback_control)(struct al_serdes_grp_obj *, enum al_serdes_lane,
enum al_serdes_lb_mode);
/**
* SERDES BIST pattern selection
* Selects the BIST pattern to be used
*
* @param obj The object context
* @param pattern The pattern to set
* @param user_data The pattern user data (when pattern == AL_SRDS_BIST_PATTERN_USER)
* 80 bits (8 bytes array)
*/
void (*bist_pattern_select)(struct al_serdes_grp_obj *,
enum al_serdes_bist_pattern, uint8_t *);
/**
* SERDES BIST TX Enable
* Enables/disables TX BIST per lane
*
* @param obj The object context
* @param lane The SERDES lane within the group
* @param enable Enable or disable TX BIST
*/
void (*bist_tx_enable)(struct al_serdes_grp_obj *, enum al_serdes_lane, al_bool);
/**
* SERDES BIST TX single bit error injection
* Injects single bit error during a TX BIST
*
* @param obj The object context
*/
void (*bist_tx_err_inject)(struct al_serdes_grp_obj *);
/**
* SERDES BIST RX Enable
* Enables/disables RX BIST per lane
*
* @param obj The object context
* @param lane The SERDES lane within the group
* @param enable Enable or disable TX BIST
*/
void (*bist_rx_enable)(struct al_serdes_grp_obj *, enum al_serdes_lane, al_bool);
/**
* SERDES BIST RX status
* Checks the RX BIST status for a specific SERDES lane
*
* @param obj The object context
* @param lane The SERDES lane within the group
* @param is_locked An indication whether RX BIST is locked
* @param err_cnt_overflow An indication whether error count overflow occured
* @param err_cnt Current bit error count
*/
void (*bist_rx_status)(struct al_serdes_grp_obj *, enum al_serdes_lane, al_bool *,
al_bool *, uint32_t *);
/**
* Set the tx de-emphasis to preset values
*
* @param obj The object context
* @param lane The SERDES lane within the group
*
*/
void (*tx_deemph_preset)(struct al_serdes_grp_obj *, enum al_serdes_lane);
/**
* Increase tx de-emphasis param.
*
* @param obj The object context
* @param lane The SERDES lane within the group
* @param param which tx de-emphasis to change
*
* @return false in case max is reached. true otherwise.
*/
al_bool (*tx_deemph_inc)(struct al_serdes_grp_obj *, enum al_serdes_lane,
enum al_serdes_tx_deemph_param);
/**
* Decrease tx de-emphasis param.
*
* @param obj The object context
* @param lane The SERDES lane within the group
* @param param which tx de-emphasis to change
*
* @return false in case min is reached. true otherwise.
*/
al_bool (*tx_deemph_dec)(struct al_serdes_grp_obj *, enum al_serdes_lane,
enum al_serdes_tx_deemph_param);
/**
* run Rx eye measurement.
*
* @param obj The object context
* @param lane The SERDES lane within the group
* @param timeout timeout in uSec
* @param value Rx eye measurement value
* (0 - completely closed eye, 0xffff - completely open eye).
*
* @return 0 if no error found.
*/
int (*eye_measure_run)(struct al_serdes_grp_obj *, enum al_serdes_lane,
uint32_t, unsigned int *);
/**
* Eye diagram single sampling
*
* @param obj The object context
* @param lane The SERDES lane within the group
* @param x Sampling X position (0 - 63 --> -1.00 UI ... 1.00 UI)
* @param y Sampling Y position (0 - 62 --> 500mV ... -500mV)
* @param timeout timeout in uSec
* @param value Eye diagram sample value (BER - 0x0000 - 0xffff)
*
* @return 0 if no error found.
*/
int (*eye_diag_sample)(struct al_serdes_grp_obj *, enum al_serdes_lane,
unsigned int, int, unsigned int, unsigned int *);
/**
* Eye diagram full run
*
* @param obj The object context
* @param lane The SERDES lane within the group
* @param x_start Sampling from X position
* @param x_stop Sampling to X position
* @param x_step jump in x_step
* @param y_start Sampling from Y position
* @param y_stop Sampling to Y position
* @param y_step jump in y_step
* @param num_bits_per_sample How many bits to check
* @param buf array of results
* @param buf_size array size - must be equal to
* (((y_stop - y_start) / y_step) + 1) *
* (((x_stop - x_start) / x_step) + 1)
*
* @return 0 if no error found.
*/
int (*eye_diag_run)(struct al_serdes_grp_obj *, enum al_serdes_lane,
int, int, unsigned int, int, int, unsigned int, uint64_t, uint64_t *,
uint32_t);
/**
* Check if signal is detected
*
* @param obj The object context
* @param lane The SERDES lane within the group
*
* @return true if signal is detected. false otherwise.
*/
al_bool (*signal_is_detected)(struct al_serdes_grp_obj *, enum al_serdes_lane);
/**
* Check if CDR is locked
*
* @param obj The object context
* @param lane The SERDES lane within the group
*
* @return true if cdr is locked. false otherwise.
*/
al_bool (*cdr_is_locked)(struct al_serdes_grp_obj *, enum al_serdes_lane);
/**
* Check if rx is valid for this lane
*
* @param obj The object context
* @param lane The SERDES lane within the group
*
* @return true if rx is valid. false otherwise.
*/
al_bool (*rx_valid)(struct al_serdes_grp_obj *, enum al_serdes_lane);
/**
* configure tx advanced parameters
*
* @param obj The object context
* @param lane The SERDES lane within the group
* @param params pointer to the tx parameters
*/
void (*tx_advanced_params_set)(struct al_serdes_grp_obj *, enum al_serdes_lane, void *);
/**
* read tx advanced parameters
*
* @param obj The object context
* @param lane The SERDES lane within the group
* @param params pointer to the tx parameters
*/
void (*tx_advanced_params_get)(struct al_serdes_grp_obj *, enum al_serdes_lane, void *);
/**
* configure rx advanced parameters
*
* @param obj The object context
* @param lane The SERDES lane within the group
* @param params pointer to the rx parameters
*/
void (*rx_advanced_params_set)(struct al_serdes_grp_obj *, enum al_serdes_lane, void *);
/**
* read rx advanced parameters
*
* @param obj The object context
* @param lane The SERDES lane within the group
* @param params pointer to the rx parameters
*/
void (*rx_advanced_params_get)(struct al_serdes_grp_obj *, enum al_serdes_lane, void *);
/**
* Switch entire SerDes group to SGMII mode based on 156.25 Mhz reference clock
*
* @param obj The object context
*
*/
void (*mode_set_sgmii)(struct al_serdes_grp_obj *);
/**
* Switch entire SerDes group to KR mode based on 156.25 Mhz reference clock
*
* @param obj The object context
*
*/
void (*mode_set_kr)(struct al_serdes_grp_obj *);
/**
* performs SerDes HW equalization test and update equalization parameters
*
* @param obj the object context
* @param lane The SERDES lane within the group
*/
int (*rx_equalization)(struct al_serdes_grp_obj *, enum al_serdes_lane);
/**
* performs Rx equalization and compute the width and height of the eye
*
* @param obj the object context
* @param lane The SERDES lane within the group
* @param width the output width of the eye
* @param height the output height of the eye
*/
int (*calc_eye_size)(struct al_serdes_grp_obj *, enum al_serdes_lane, int *, int *);
/**
* SRIS: Separate Refclk Independent SSC (Spread Spectrum Clocking)
* Currently available only for PCIe interfaces.
* When working with local Refclk, same SRIS configuration in both serdes sides
* (EP and RC in PCIe interface) is required.
*
* performs SRIS configuration according to params
*
* @param obj the object context
* @param params the SRIS parameters
*/
void (*sris_config)(struct al_serdes_grp_obj *, void *);
/**
* set SERDES dcgain parameter
*
* @param obj the object context
* @param dcgain dcgain value to set
*/
void (*dcgain_set)(struct al_serdes_grp_obj *, uint8_t);
/**
* read tx diagnostics info
*
* @param obj The object context
* @param lane The SERDES lane within the group
* @param params pointer to the tx diagnostics info structure
*/
void (*tx_diag_info_get)(struct al_serdes_grp_obj *, enum al_serdes_lane, void*);
/**
* read rx diagnostics info
*
* @param obj The object context
* @param lane The SERDES lane within the group
* @param params pointer to the rx diagnostics info structure
*/
void (*rx_diag_info_get)(struct al_serdes_grp_obj *, enum al_serdes_lane, void*);
};
/* *INDENT-OFF* */
#ifdef __cplusplus
}
#endif
/* *INDENT-ON* */
#endif /* __AL_HAL_SERDES_INTERFACE_H__ */
/** @} end of SERDES group */

View File

@ -70,7 +70,6 @@ extern "C" {
/* Default Max number of descriptors supported per action */
#define AL_UDMA_DEFAULT_MAX_ACTN_DESCS 16
#define AL_UDMA_REV_ID_0 0
#define AL_UDMA_REV_ID_1 1
#define AL_UDMA_REV_ID_2 2
@ -130,8 +129,8 @@ union al_udma_desc {
#define AL_S2M_DESC_LEN2_MASK (0x3fff << AL_S2M_DESC_LEN2_SHIFT)
#define AL_S2M_DESC_LEN2_GRANULARITY_SHIFT 6
/* TX/RX descriptor VMID field (in the buffer address 64 bit field) */
#define AL_UDMA_DESC_VMID_SHIFT 48
/* TX/RX descriptor Target-ID field (in the buffer address 64 bit field) */
#define AL_UDMA_DESC_TGTID_SHIFT 48
/** UDMA completion descriptor */
union al_udma_cdesc {
@ -168,11 +167,11 @@ struct al_block {
uint32_t num; /**< Number of buffers of the block */
/**<
* VMID to be assigned to the block descriptors
* Requires VMID in descriptor to be enabled for the specific UDMA
* Target-ID to be assigned to the block descriptors
* Requires Target-ID in descriptor to be enabled for the specific UDMA
* queue.
*/
uint16_t vmid;
uint16_t tgtid;
};
/** UDMA type */

View File

@ -565,7 +565,7 @@ int al_udma_s2m_pref_set(struct al_udma *udma,
reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK;
reg |=(conf->min_burst_above_thr <<
UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT) &
UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK;
UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK;
al_reg_write32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_3, reg);
@ -1114,260 +1114,105 @@ int al_udma_s2m_q_comp_set(struct al_udma_q *udma_q,
return 0;
}
/* UDMA VMID control configuration */
void al_udma_gen_vmid_conf_set(
/* UDMA Target-ID control configuration per queue */
void al_udma_gen_tgtid_conf_queue_set(
struct unit_regs *unit_regs,
struct al_udma_gen_vmid_conf *conf)
struct al_udma_gen_tgtid_conf *conf,
uint32_t qid)
{
uint32_t *tx_tgtid_reg, *rx_tgtid_reg, *tx_tgtaddr_reg, *rx_tgtaddr_reg;
unsigned int rev_id;
al_reg_write32_masked(
&unit_regs->gen.vmid.cfg_vmid_0,
UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_DESC_EN_MASK |
UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_QUEUE_EN_MASK |
UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_DESC_EN_MASK |
UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_QUEUE_EN_MASK,
(((conf->tx_q_conf[0].desc_en << 0) |
(conf->tx_q_conf[1].desc_en << 1) |
(conf->tx_q_conf[2].desc_en << 2) |
(conf->tx_q_conf[3].desc_en << 3)) <<
UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_DESC_EN_SHIFT) |
(((conf->tx_q_conf[0].queue_en << 0) |
(conf->tx_q_conf[1].queue_en << 1) |
(conf->tx_q_conf[2].queue_en << 2) |
(conf->tx_q_conf[3].queue_en << 3)) <<
UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_QUEUE_EN_SHIFT) |
(((conf->rx_q_conf[0].desc_en << 0) |
(conf->rx_q_conf[1].desc_en << 1) |
(conf->rx_q_conf[2].desc_en << 2) |
(conf->rx_q_conf[3].desc_en << 3)) <<
UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_DESC_EN_SHIFT) |
(((conf->rx_q_conf[0].queue_en << 0) |
(conf->rx_q_conf[1].queue_en << 1) |
(conf->rx_q_conf[2].queue_en << 2) |
(conf->rx_q_conf[3].queue_en << 3)) <<
UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_QUEUE_EN_SHIFT));
/* VMID per queue */
al_reg_write32(
&unit_regs->gen.vmid.cfg_vmid_1,
(conf->tx_q_conf[0].vmid <<
UDMA_GEN_VMID_CFG_VMID_1_TX_Q_0_VMID_SHIFT) |
(conf->tx_q_conf[1].vmid <<
UDMA_GEN_VMID_CFG_VMID_1_TX_Q_1_VMID_SHIFT));
al_reg_write32(
&unit_regs->gen.vmid.cfg_vmid_2,
(conf->tx_q_conf[2].vmid <<
UDMA_GEN_VMID_CFG_VMID_2_TX_Q_2_VMID_SHIFT) |
(conf->tx_q_conf[3].vmid <<
UDMA_GEN_VMID_CFG_VMID_2_TX_Q_3_VMID_SHIFT));
al_reg_write32(
&unit_regs->gen.vmid.cfg_vmid_3,
(conf->rx_q_conf[0].vmid <<
UDMA_GEN_VMID_CFG_VMID_3_RX_Q_0_VMID_SHIFT) |
(conf->rx_q_conf[1].vmid <<
UDMA_GEN_VMID_CFG_VMID_3_RX_Q_1_VMID_SHIFT));
al_reg_write32(
&unit_regs->gen.vmid.cfg_vmid_4,
(conf->rx_q_conf[2].vmid <<
UDMA_GEN_VMID_CFG_VMID_4_RX_Q_2_VMID_SHIFT) |
(conf->rx_q_conf[3].vmid <<
UDMA_GEN_VMID_CFG_VMID_4_RX_Q_3_VMID_SHIFT));
/* VMADDR per queue */
al_assert(qid < DMA_MAX_Q);
rev_id = al_udma_get_revision(unit_regs);
/* Target-ID TX DESC EN */
al_reg_write32_masked(&unit_regs->gen.tgtid.cfg_tgtid_0,
(conf->tx_q_conf[qid].desc_en << qid) <<
UDMA_GEN_TGTID_CFG_TGTID_0_TX_Q_TGTID_DESC_EN_SHIFT,
(conf->tx_q_conf[qid].desc_en << qid) <<
UDMA_GEN_TGTID_CFG_TGTID_0_TX_Q_TGTID_DESC_EN_SHIFT);
/* Target-ID TX QUEUE EN */
al_reg_write32_masked(&unit_regs->gen.tgtid.cfg_tgtid_0,
(conf->tx_q_conf[qid].queue_en << qid) <<
UDMA_GEN_TGTID_CFG_TGTID_0_TX_Q_TGTID_QUEUE_EN_SHIFT,
(conf->tx_q_conf[qid].queue_en << qid) <<
UDMA_GEN_TGTID_CFG_TGTID_0_TX_Q_TGTID_QUEUE_EN_SHIFT);
/* Target-ID RX DESC EN */
al_reg_write32_masked(&unit_regs->gen.tgtid.cfg_tgtid_0,
(conf->rx_q_conf[qid].desc_en << qid) <<
UDMA_GEN_TGTID_CFG_TGTID_0_RX_Q_TGTID_DESC_EN_SHIFT,
(conf->rx_q_conf[qid].desc_en << qid) <<
UDMA_GEN_TGTID_CFG_TGTID_0_RX_Q_TGTID_DESC_EN_SHIFT);
/* Target-ID RX QUEUE EN */
al_reg_write32_masked(&unit_regs->gen.tgtid.cfg_tgtid_0,
(conf->rx_q_conf[qid].queue_en << qid) <<
UDMA_GEN_TGTID_CFG_TGTID_0_RX_Q_TGTID_QUEUE_EN_SHIFT,
(conf->rx_q_conf[qid].queue_en << qid) <<
UDMA_GEN_TGTID_CFG_TGTID_0_RX_Q_TGTID_QUEUE_EN_SHIFT);
switch (qid) {
case 0:
case 1:
tx_tgtid_reg = &unit_regs->gen.tgtid.cfg_tgtid_1;
rx_tgtid_reg = &unit_regs->gen.tgtid.cfg_tgtid_3;
tx_tgtaddr_reg = &unit_regs->gen.tgtaddr.cfg_tgtaddr_0;
rx_tgtaddr_reg = &unit_regs->gen.tgtaddr.cfg_tgtaddr_2;
break;
case 2:
case 3:
tx_tgtid_reg = &unit_regs->gen.tgtid.cfg_tgtid_2;
rx_tgtid_reg = &unit_regs->gen.tgtid.cfg_tgtid_4;
tx_tgtaddr_reg = &unit_regs->gen.tgtaddr.cfg_tgtaddr_1;
rx_tgtaddr_reg = &unit_regs->gen.tgtaddr.cfg_tgtaddr_3;
break;
default:
al_assert(AL_FALSE);
return;
}
al_reg_write32_masked(tx_tgtid_reg,
UDMA_GEN_TGTID_CFG_TGTID_MASK(qid),
conf->tx_q_conf[qid].tgtid << UDMA_GEN_TGTID_CFG_TGTID_SHIFT(qid));
al_reg_write32_masked(rx_tgtid_reg,
UDMA_GEN_TGTID_CFG_TGTID_MASK(qid),
conf->rx_q_conf[qid].tgtid << UDMA_GEN_TGTID_CFG_TGTID_SHIFT(qid));
if (rev_id >= AL_UDMA_REV_ID_REV2) {
al_reg_write32(
&unit_regs->gen.vmaddr.cfg_vmaddr_0,
(conf->tx_q_conf[0].vmaddr <<
UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_0_VMADDR_SHIFT) |
(conf->tx_q_conf[1].vmaddr <<
UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_1_VMADDR_SHIFT));
al_reg_write32_masked(tx_tgtaddr_reg,
UDMA_GEN_TGTADDR_CFG_MASK(qid),
conf->tx_q_conf[qid].tgtaddr << UDMA_GEN_TGTADDR_CFG_SHIFT(qid));
al_reg_write32(
&unit_regs->gen.vmaddr.cfg_vmaddr_1,
(conf->tx_q_conf[2].vmaddr <<
UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_2_VMADDR_SHIFT) |
(conf->tx_q_conf[3].vmaddr <<
UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_3_VMADDR_SHIFT));
al_reg_write32(
&unit_regs->gen.vmaddr.cfg_vmaddr_2,
(conf->rx_q_conf[0].vmaddr <<
UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_0_VMADDR_SHIFT) |
(conf->rx_q_conf[1].vmaddr <<
UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_1_VMADDR_SHIFT));
al_reg_write32(
&unit_regs->gen.vmaddr.cfg_vmaddr_3,
(conf->rx_q_conf[2].vmaddr <<
UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_2_VMADDR_SHIFT) |
(conf->rx_q_conf[3].vmaddr <<
UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_3_VMADDR_SHIFT));
al_reg_write32_masked(rx_tgtaddr_reg,
UDMA_GEN_TGTADDR_CFG_MASK(qid),
conf->rx_q_conf[qid].tgtaddr << UDMA_GEN_TGTADDR_CFG_SHIFT(qid));
}
}
/* UDMA VMID MSIX control configuration */
void al_udma_gen_vmid_msix_conf_set(
/* UDMA Target-ID control configuration */
void al_udma_gen_tgtid_conf_set(
struct unit_regs *unit_regs,
struct al_udma_gen_tgtid_conf *conf)
{
int i;
for (i = 0; i < DMA_MAX_Q; i++)
al_udma_gen_tgtid_conf_queue_set(unit_regs, conf, i);
}
/* UDMA Target-ID MSIX control configuration */
void al_udma_gen_tgtid_msix_conf_set(
struct unit_regs *unit_regs,
struct al_udma_gen_vmid_msix_conf *conf)
struct al_udma_gen_tgtid_msix_conf *conf)
{
al_reg_write32_masked(
&unit_regs->gen.vmid.cfg_vmid_0,
UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_ACCESS_EN |
UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_SEL,
(conf->access_en ? UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_ACCESS_EN : 0) |
(conf->sel ? UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_SEL : 0));
&unit_regs->gen.tgtid.cfg_tgtid_0,
UDMA_GEN_TGTID_CFG_TGTID_0_MSIX_TGTID_ACCESS_EN |
UDMA_GEN_TGTID_CFG_TGTID_0_MSIX_TGTID_SEL,
(conf->access_en ? UDMA_GEN_TGTID_CFG_TGTID_0_MSIX_TGTID_ACCESS_EN : 0) |
(conf->sel ? UDMA_GEN_TGTID_CFG_TGTID_0_MSIX_TGTID_SEL : 0));
}
/* UDMA VMID control advanced Tx queue configuration */
void al_udma_gen_vmid_advanced_tx_q_conf(
struct al_udma_q *q,
struct al_udma_gen_vmid_advanced_tx_q_conf *conf)
{
struct udma_gen_regs *gen_regs = q->udma->gen_regs;
struct udma_gen_vmpr *vmpr = &gen_regs->vmpr[q->qid];
al_reg_write32_masked(
&vmpr->cfg_vmpr_0,
UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_HISEL_MASK |
UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_DATA_VMID_EN |
UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_PREF_VMID_EN |
UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_CMPL_VMID_EN,
conf->tx_q_addr_hi_sel |
((conf->tx_q_data_vmid_en == AL_TRUE) ?
UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_DATA_VMID_EN : 0) |
((conf->tx_q_prefetch_vmid_en == AL_TRUE) ?
UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_PREF_VMID_EN : 0) |
((conf->tx_q_compl_vmid_en == AL_TRUE) ?
UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_CMPL_VMID_EN : 0));
al_reg_write32(
&vmpr->cfg_vmpr_1,
conf->tx_q_addr_hi);
al_reg_write32_masked(
&vmpr->cfg_vmpr_2,
UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_VMID_MASK |
UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_VMID_MASK,
(conf->tx_q_prefetch_vmid <<
UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_VMID_SHIFT) |
(conf->tx_q_compl_vmid <<
UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_VMID_SHIFT));
al_reg_write32_masked(
&vmpr->cfg_vmpr_3,
UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_MASK |
UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SEL_MASK,
(conf->tx_q_data_vmid <<
UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SHIFT) |
(conf->tx_q_data_vmid_mask <<
UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SEL_SHIFT));
}
/** UDMA VMID control advanced Rx queue configuration */
void al_udma_gen_vmid_advanced_rx_q_conf(
struct al_udma_q *q,
struct al_udma_gen_vmid_advanced_rx_q_conf *conf)
{
struct udma_gen_regs *gen_regs = q->udma->gen_regs;
struct udma_gen_vmpr *vmpr = &gen_regs->vmpr[q->qid];
al_reg_write32_masked(
&vmpr->cfg_vmpr_4,
UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_HISEL_MASK |
UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_VMID_EN |
UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_HISEL_MASK |
UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_VMID_EN |
UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_HISEL_MASK |
UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_VMID_EN |
UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_PREF_VMID_EN |
UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_CMPL_VMID_EN,
(conf->rx_q_addr_hi_sel <<
UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_HISEL_SHIFT) |
((conf->rx_q_data_vmid_en == AL_TRUE) ?
UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_VMID_EN : 0) |
(conf->rx_q_data_buff2_addr_hi_sel <<
UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_HISEL_SHIFT) |
((conf->rx_q_data_buff2_vmid_en == AL_TRUE) ?
UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_VMID_EN : 0) |
(conf->rx_q_ddp_addr_hi_sel <<
UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_HISEL_SHIFT) |
((conf->rx_q_ddp_vmid_en == AL_TRUE) ?
UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_VMID_EN : 0) |
((conf->rx_q_prefetch_vmid_en == AL_TRUE) ?
UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_PREF_VMID_EN : 0) |
((conf->rx_q_compl_vmid_en == AL_TRUE) ?
UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_CMPL_VMID_EN : 0));
al_reg_write32_masked(
&vmpr->cfg_vmpr_6,
UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_VMID_MASK |
UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_VMID_MASK,
(conf->rx_q_prefetch_vmid <<
UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_VMID_SHIFT) |
(conf->rx_q_compl_vmid <<
UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_VMID_SHIFT));
al_reg_write32_masked(
&vmpr->cfg_vmpr_7,
UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_MASK |
UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SEL_MASK,
(conf->rx_q_data_vmid <<
UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SHIFT) |
(conf->rx_q_data_vmid_mask <<
UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SEL_SHIFT));
al_reg_write32_masked(
&vmpr->cfg_vmpr_8,
UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_MASK |
UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SEL_MASK,
(conf->rx_q_data_buff2_vmid <<
UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SHIFT) |
(conf->rx_q_data_buff2_mask <<
UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SEL_SHIFT));
al_reg_write32_masked(
&vmpr->cfg_vmpr_9,
UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_MASK |
UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SEL_MASK,
(conf->rx_q_ddp_vmid <<
UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SHIFT) |
(conf->rx_q_ddp_mask <<
UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SEL_SHIFT));
al_reg_write32(
&vmpr->cfg_vmpr_10,
conf->rx_q_addr_hi);
al_reg_write32(
&vmpr->cfg_vmpr_11,
conf->rx_q_data_buff2_addr_hi);
al_reg_write32(
&vmpr->cfg_vmpr_12,
conf->rx_q_ddp_addr_hi);
}
/* UDMA header split buffer 2 Rx queue configuration */
void al_udma_gen_hdr_split_buff2_rx_q_conf(
struct al_udma_q *q,
struct al_udma_gen_hdr_split_buff2_q_conf *conf)
{
struct udma_gen_regs *gen_regs = q->udma->gen_regs;
struct udma_gen_vmpr *vmpr = &gen_regs->vmpr[q->qid];
al_reg_write32_masked(
&vmpr->cfg_vmpr_4,
UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_MSB_ADDR_SEL_MASK,
conf->add_msb_sel <<
UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_MSB_ADDR_SEL_SHIFT);
al_reg_write32(
&vmpr->cfg_vmpr_5,
conf->addr_msb);
}

View File

@ -1,5 +1,4 @@
/*-
*******************************************************************************
/*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
@ -313,237 +312,40 @@ struct al_udma_s2m_q_comp_conf {
uint8_t q_qos; /* queue QoS */
};
/** UDMA per queue VMID control configuration */
struct al_udma_gen_vmid_q_conf {
/* Enable usage of the VMID per queue according to 'vmid' */
/** UDMA per queue Target-ID control configuration */
struct al_udma_gen_tgtid_q_conf {
/* Enable usage of the Target-ID per queue according to 'tgtid' */
al_bool queue_en;
/* Enable usage of the VMID from the descriptor buffer address 63:48 */
/* Enable usage of the Target-ID from the descriptor buffer address 63:48 */
al_bool desc_en;
/* VMID to be applied when 'queue_en' is asserted */
uint16_t vmid;
/* Target-ID to be applied when 'queue_en' is asserted */
uint16_t tgtid;
/* VMADDR to be applied to msbs when 'desc_en' is asserted.
/* TGTADDR to be applied to msbs when 'desc_en' is asserted.
* Relevant for revisions >= AL_UDMA_REV_ID_REV2 */
uint16_t vmaddr;
uint16_t tgtaddr;
};
/** UDMA VMID control configuration */
struct al_udma_gen_vmid_conf {
/** UDMA Target-ID control configuration */
struct al_udma_gen_tgtid_conf {
/* TX queue configuration */
struct al_udma_gen_vmid_q_conf tx_q_conf[DMA_MAX_Q];
struct al_udma_gen_tgtid_q_conf tx_q_conf[DMA_MAX_Q];
/* RX queue configuration */
struct al_udma_gen_vmid_q_conf rx_q_conf[DMA_MAX_Q];
struct al_udma_gen_tgtid_q_conf rx_q_conf[DMA_MAX_Q];
};
/** UDMA VMID MSIX control configuration */
struct al_udma_gen_vmid_msix_conf {
/* Enable write to all VMID_n registers in the MSI-X Controller */
/** UDMA Target-ID MSIX control configuration */
struct al_udma_gen_tgtid_msix_conf {
/* Enable write to all TGTID_n registers in the MSI-X Controller */
al_bool access_en;
/* use VMID_n [7:0] from MSI-X Controller for MSI-X message */
/* use TGTID_n [7:0] from MSI-X Controller for MSI-X message */
al_bool sel;
};
/** UDMA per Tx queue advanced VMID control configuration */
struct al_udma_gen_vmid_advanced_tx_q_conf {
/**********************************************************************
* Tx Data VMID
**********************************************************************/
/* Tx data VMID enable */
al_bool tx_q_data_vmid_en;
/*
* For Tx data reads, replacement bits for the original address.
* The number of bits replaced is determined according to
* 'tx_q_addr_hi_sel'
*/
unsigned int tx_q_addr_hi;
/*
* For Tx data reads, 6 bits serving the number of bits taken from the
* extra register on account of bits coming from the original address
* field.
* When 'tx_q_addr_hi_sel'=32 all of 'tx_q_addr_hi' will be taken.
* When 'tx_q_addr_hi_sel'=0 none of it will be taken, and when any
* value in between, it will start from the MSB bit and sweep down as
* many bits as needed. For example if 'tx_q_addr_hi_sel'=8, the final
* address [63:56] will carry 'tx_q_addr_hi'[31:24] while [55:32] will
* carry the original buffer address[55:32].
*/
unsigned int tx_q_addr_hi_sel;
/*
* Tx data read VMID
* Masked per bit with 'tx_q_data_vmid_mask'
*/
unsigned int tx_q_data_vmid;
/*
* Tx data read VMID mask
* Each '1' selects from the buffer address, each '0' selects from
* 'tx_q_data_vmid'
*/
unsigned int tx_q_data_vmid_mask;
/**********************************************************************
* Tx prefetch VMID
**********************************************************************/
/* Tx prefetch VMID enable */
al_bool tx_q_prefetch_vmid_en;
/* Tx prefetch VMID */
unsigned int tx_q_prefetch_vmid;
/**********************************************************************
* Tx completion VMID
**********************************************************************/
/* Tx completion VMID enable */
al_bool tx_q_compl_vmid_en;
/* Tx completion VMID */
unsigned int tx_q_compl_vmid;
};
/** UDMA per Rx queue advanced VMID control configuration */
struct al_udma_gen_vmid_advanced_rx_q_conf {
/**********************************************************************
* Rx Data VMID
**********************************************************************/
/* Rx data VMID enable */
al_bool rx_q_data_vmid_en;
/*
* For Rx data writes, replacement bits for the original address.
* The number of bits replaced is determined according to
* 'rx_q_addr_hi_sel'
*/
unsigned int rx_q_addr_hi;
/*
* For Rx data writes, 6 bits serving the number of bits taken from the
* extra register on account of bits coming from the original address
* field.
*/
unsigned int rx_q_addr_hi_sel;
/*
* Rx data write VMID
* Masked per bit with 'rx_q_data_vmid_mask'
*/
unsigned int rx_q_data_vmid;
/* Rx data write VMID mask */
unsigned int rx_q_data_vmid_mask;
/**********************************************************************
* Rx Data Buffer 2 VMID
**********************************************************************/
/* Rx data buff2 VMID enable */
al_bool rx_q_data_buff2_vmid_en;
/*
* For Rx data buff2 writes, replacement bits for the original address.
* The number of bits replaced is determined according to
* 'rx_q_data_buff2_addr_hi_sel'
*/
unsigned int rx_q_data_buff2_addr_hi;
/*
* For Rx data buff2 writes, 6 bits serving the number of bits taken
* from the extra register on account of bits coming from the original
* address field.
*/
unsigned int rx_q_data_buff2_addr_hi_sel;
/*
* Rx data buff2 write VMID
* Masked per bit with 'rx_q_data_buff2_mask'
*/
unsigned int rx_q_data_buff2_vmid;
/* Rx data buff2 write VMID mask */
unsigned int rx_q_data_buff2_mask;
/**********************************************************************
* Rx DDP VMID
**********************************************************************/
/* Rx DDP write VMID enable */
al_bool rx_q_ddp_vmid_en;
/*
* For Rx DDP writes, replacement bits for the original address.
* The number of bits replaced is determined according to
* 'rx_q_ddp_addr_hi_sel'
*/
unsigned int rx_q_ddp_addr_hi;
/*
* For Rx DDP writes, 6 bits serving the number of bits taken from the
* extra register on account of bits coming from the original address
* field.
*/
unsigned int rx_q_ddp_addr_hi_sel;
/*
* Rx DDP write VMID
* Masked per bit with 'rx_q_ddp_mask'
*/
unsigned int rx_q_ddp_vmid;
/* Rx DDP write VMID mask */
unsigned int rx_q_ddp_mask;
/**********************************************************************
* Rx prefetch VMID
**********************************************************************/
/* Rx prefetch VMID enable */
al_bool rx_q_prefetch_vmid_en;
/* Rx prefetch VMID */
unsigned int rx_q_prefetch_vmid;
/**********************************************************************
* Rx completion VMID
**********************************************************************/
/* Rx completion VMID enable */
al_bool rx_q_compl_vmid_en;
/* Rx completion VMID */
unsigned int rx_q_compl_vmid;
};
/**
* Header split, buffer 2 per queue configuration
* When header split is enabled, Buffer_2 is used as an address for the header
* data. Buffer_2 is defined as 32-bits in the RX descriptor and it is defined
* that the MSB ([63:32]) of Buffer_1 is used as address [63:32] for the header
* address.
*/
struct al_udma_gen_hdr_split_buff2_q_conf {
/*
* MSB of the 64-bit address (bits [63:32]) that can be used for header
* split for this queue
*/
unsigned int addr_msb;
/*
* Determine how to select the MSB (bits [63:32]) of the address when
* header split is enabled (4 bits, one per byte)
* - Bits [3:0]:
* [0] selector for bits [39:32]
* [1] selector for bits [47:40]
* [2] selector for bits [55:48]
* [3] selector for bits [63:55]
* - Bit value:
* 0 Use Buffer_1 (legacy operation)
* 1 Use the queue configuration 'addr_msb'
*/
unsigned int add_msb_sel;
};
/* Report Error - to be used for abort */
void al_udma_err_report(struct al_udma *udma);
@ -721,30 +523,21 @@ int al_udma_s2m_q_compl_hdr_split_config(struct al_udma_q *udma_q,
int al_udma_s2m_q_comp_set(struct al_udma_q *udma_q,
struct al_udma_s2m_q_comp_conf *conf);
/** UDMA VMID control configuration */
void al_udma_gen_vmid_conf_set(
/** UDMA Target-ID control configuration per queue */
void al_udma_gen_tgtid_conf_queue_set(
struct unit_regs *unit_regs,
struct al_udma_gen_tgtid_conf *conf,
uint32_t qid);
/** UDMA Target-ID control configuration */
void al_udma_gen_tgtid_conf_set(
struct unit_regs __iomem *unit_regs,
struct al_udma_gen_vmid_conf *conf);
struct al_udma_gen_tgtid_conf *conf);
/** UDMA VMID MSIX control configuration */
void al_udma_gen_vmid_msix_conf_set(
/** UDMA Target-ID MSIX control configuration */
void al_udma_gen_tgtid_msix_conf_set(
struct unit_regs __iomem *unit_regs,
struct al_udma_gen_vmid_msix_conf *conf);
/** UDMA VMID control advanced Tx queue configuration */
void al_udma_gen_vmid_advanced_tx_q_conf(
struct al_udma_q *q,
struct al_udma_gen_vmid_advanced_tx_q_conf *conf);
/** UDMA VMID control advanced Rx queue configuration */
void al_udma_gen_vmid_advanced_rx_q_conf(
struct al_udma_q *q,
struct al_udma_gen_vmid_advanced_rx_q_conf *conf);
/** UDMA header split buffer 2 Rx queue configuration */
void al_udma_gen_hdr_split_buff2_rx_q_conf(
struct al_udma_q *q,
struct al_udma_gen_hdr_split_buff2_q_conf *conf);
struct al_udma_gen_tgtid_msix_conf *conf);
/* *INDENT-OFF* */
#ifdef __cplusplus

View File

@ -425,9 +425,9 @@ void al_udma_q_struct_print(struct al_udma *udma, uint32_t qid)
al_dbg(" comp_head_ptr = %p\n", queue->comp_head_ptr);
al_dbg(" pkt_crnt_descs = %d\n", (uint32_t)queue->pkt_crnt_descs);
al_dbg(" comp_ring_id = %d\n", (uint32_t)queue->comp_ring_id);
al_dbg(" desc_phy_base = 0x%016llx\n", (uint64_t)queue->desc_phy_base);
al_dbg(" cdesc_phy_base = 0x%016llx\n",
(uint64_t)queue->cdesc_phy_base);
al_dbg(" desc_phy_base = 0x%016jx\n", (uintmax_t)queue->desc_phy_base);
al_dbg(" cdesc_phy_base = 0x%016jx\n",
(uintmax_t)queue->cdesc_phy_base);
al_dbg(" flags = 0x%08x\n", (uint32_t)queue->flags);
al_dbg(" size = %d\n", (uint32_t)queue->size);
al_dbg(" status = %d\n", (uint32_t)queue->status);
@ -471,7 +471,7 @@ void al_udma_ring_print(struct al_udma *udma, uint32_t qid,
}
for (i = 0; i < queue->size; i++) {
uint32_t *curr_addr = (void*)((uint32_t)base_ptr + i * desc_size);
uint32_t *curr_addr = (void*)((uintptr_t)base_ptr + i * desc_size);
if (desc_size == 16)
al_dbg("[%04d](%p): %08x %08x %08x %08x\n",
i,

View File

@ -610,5 +610,23 @@ static INLINE uint32_t al_udma_iofic_read_cause(
return al_iofic_read_cause(al_udma_iofic_reg_base_get(regs, level), group);
}
/**
* clear bits in the interrupt cause register for a given group
*
* @param regs pointer to udma unit registers
* @param level the interrupt controller level (primary / secondary)
* @param group the interrupt group ('AL_INT_GROUP_*')
* @param mask bitwise of bits to be cleared, set bits will be cleared.
*/
static INLINE void al_udma_iofic_clear_cause(
struct unit_regs __iomem *regs,
enum al_udma_iofic_level level,
int group,
uint32_t mask)
{
al_assert(al_udma_iofic_level_and_group_valid(level, group));
al_iofic_clear_cause(al_udma_iofic_reg_base_get(regs, level), group, mask);
}
#endif
/** @} end of UDMA group */

View File

@ -70,7 +70,6 @@ const char *const al_udma_states_name[] = {
static void al_udma_set_defaults(struct al_udma *udma)
{
uint32_t tmp;
uint8_t rev_id = udma->rev_id;
if (udma->type == UDMA_TX) {
@ -85,25 +84,11 @@ static void al_udma_set_defaults(struct al_udma *udma)
256 << UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_SHIFT);
}
if (rev_id == AL_UDMA_REV_ID_0)
/* disable AXI timeout for M0*/
al_reg_write32(&tmp_unit_regs->gen.axi.cfg_1, 0);
else
/* set AXI timeout to 1M (~2.6 ms) */
al_reg_write32(&tmp_unit_regs->gen.axi.cfg_1, 1000000);
/* set AXI timeout to 1M (~2.6 ms) */
al_reg_write32(&tmp_unit_regs->gen.axi.cfg_1, 1000000);
al_reg_write32(&tmp_unit_regs->m2s.m2s_comp.cfg_application_ack
, 0); /* Ack time out */
if (rev_id == AL_UDMA_REV_ID_0) {
tmp = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1);
tmp &= ~UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK;
tmp |= 4 << UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_SHIFT;
al_reg_write32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1
, tmp);
}
}
if (udma->type == UDMA_RX) {
al_reg_write32(
@ -365,14 +350,13 @@ int al_udma_q_init(struct al_udma *udma, uint32_t qid,
al_udma_q_enable(udma_q, 1);
al_dbg("udma [%s %d]: %s q init. size 0x%x\n"
" desc ring info: phys base 0x%llx virt base %p\n"
" cdesc ring info: phys base 0x%llx virt base %p "
"entry size 0x%x",
" desc ring info: phys base 0x%llx virt base %p)",
udma_q->udma->name, udma_q->qid,
udma->type == UDMA_TX ? "Tx" : "Rx",
q_params->size,
(unsigned long long)q_params->desc_phy_base,
q_params->desc_base,
q_params->desc_base);
al_dbg(" cdesc ring info: phys base 0x%llx virt base %p entry size 0x%x",
(unsigned long long)q_params->cdesc_phy_base,
q_params->cdesc_base,
q_params->cdesc_size);

View File

@ -1,5 +1,4 @@
/*-
*******************************************************************************
/*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
@ -97,48 +96,48 @@ struct udma_gen_sram_ctrl {
/* [0x0] Timing configuration */
uint32_t timing;
};
struct udma_gen_vmid {
/* [0x0] VMID control */
uint32_t cfg_vmid_0;
/* [0x4] TX queue 0/1 VMID */
uint32_t cfg_vmid_1;
/* [0x8] TX queue 2/3 VMID */
uint32_t cfg_vmid_2;
/* [0xc] RX queue 0/1 VMID */
uint32_t cfg_vmid_3;
/* [0x10] RX queue 2/3 VMID */
uint32_t cfg_vmid_4;
struct udma_gen_tgtid {
/* [0x0] Target-ID control */
uint32_t cfg_tgtid_0;
/* [0x4] TX queue 0/1 Target-ID */
uint32_t cfg_tgtid_1;
/* [0x8] TX queue 2/3 Target-ID */
uint32_t cfg_tgtid_2;
/* [0xc] RX queue 0/1 Target-ID */
uint32_t cfg_tgtid_3;
/* [0x10] RX queue 2/3 Target-ID */
uint32_t cfg_tgtid_4;
};
struct udma_gen_vmaddr {
/* [0x0] TX queue 0/1 VMADDR */
uint32_t cfg_vmaddr_0;
/* [0x4] TX queue 2/3 VMADDR */
uint32_t cfg_vmaddr_1;
/* [0x8] RX queue 0/1 VMADDR */
uint32_t cfg_vmaddr_2;
/* [0xc] RX queue 2/3 VMADDR */
uint32_t cfg_vmaddr_3;
struct udma_gen_tgtaddr {
/* [0x0] TX queue 0/1 Target-Address */
uint32_t cfg_tgtaddr_0;
/* [0x4] TX queue 2/3 Target-Address */
uint32_t cfg_tgtaddr_1;
/* [0x8] RX queue 0/1 Target-Address */
uint32_t cfg_tgtaddr_2;
/* [0xc] RX queue 2/3 Target-Address */
uint32_t cfg_tgtaddr_3;
};
struct udma_gen_vmpr {
/* [0x0] TX VMPR control */
uint32_t cfg_vmpr_0;
/* [0x4] TX VMPR Address High Regsiter */
uint32_t cfg_vmpr_1;
/* [0x8] TX queue VMID values */
/* [0x8] TX queue Target-ID values */
uint32_t cfg_vmpr_2;
/* [0xc] TX queue VMID values */
/* [0xc] TX queue Target-ID values */
uint32_t cfg_vmpr_3;
/* [0x10] RX VMPR control */
uint32_t cfg_vmpr_4;
/* [0x14] RX VMPR Buffer2 MSB address */
uint32_t cfg_vmpr_5;
/* [0x18] RX queue VMID values */
/* [0x18] RX queue Target-ID values */
uint32_t cfg_vmpr_6;
/* [0x1c] RX queue BUF1 VMID values */
/* [0x1c] RX queue BUF1 Target-ID values */
uint32_t cfg_vmpr_7;
/* [0x20] RX queue BUF2 VMID values */
/* [0x20] RX queue BUF2 Target-ID values */
uint32_t cfg_vmpr_8;
/* [0x24] RX queue Direct Data Placement VMID values */
/* [0x24] RX queue Direct Data Placement Target-ID values */
uint32_t cfg_vmpr_9;
/* [0x28] RX VMPR BUF1 Address High Regsiter */
uint32_t cfg_vmpr_10;
@ -156,8 +155,8 @@ struct udma_gen_regs {
struct udma_gen_axi axi; /* [0x2280] */
struct udma_gen_sram_ctrl sram_ctrl[25]; /* [0x2380] */
uint32_t rsrvd_1[2];
struct udma_gen_vmid vmid; /* [0x23ec] */
struct udma_gen_vmaddr vmaddr; /* [0x2400] */
struct udma_gen_tgtid tgtid; /* [0x23ec] */
struct udma_gen_tgtaddr tgtaddr; /* [0x2400] */
uint32_t rsrvd_2[252];
struct udma_gen_vmpr vmpr[4]; /* [0x2800] */
};
@ -236,176 +235,182 @@ struct udma_gen_regs {
/* Read margin enable */
#define UDMA_GEN_SRAM_CTRL_TIMING_RMEB (1 << 24)
/**** cfg_vmid_0 register ****/
/* For M2S queues 3:0, enable usage of the VMID from the buffer address 63:56 */
#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_DESC_EN_MASK 0x0000000F
#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_DESC_EN_SHIFT 0
/**** cfg_tgtid_0 register ****/
/* For M2S queues 3:0, enable usage of the Target-ID from the buffer address 63:56 */
#define UDMA_GEN_TGTID_CFG_TGTID_0_TX_Q_TGTID_DESC_EN_MASK 0x0000000F
#define UDMA_GEN_TGTID_CFG_TGTID_0_TX_Q_TGTID_DESC_EN_SHIFT 0
/*
* For M2S queues 3:0, enable usage of the VMID from the configuration register
* (cfg_vmid_1/2 used for M2S queue_x)
* For M2S queues 3:0, enable usage of the Target-ID from the configuration register
* (cfg_tgtid_1/2 used for M2S queue_x)
*/
#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_QUEUE_EN_MASK 0x000000F0
#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_QUEUE_EN_SHIFT 4
/* use VMID_n [7:0] from MSI-X Controller for MSI-X message */
#define UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_SEL (1 << 8)
/* Enable write to all VMID_n registers in the MSI-X Controller */
#define UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_ACCESS_EN (1 << 9)
/* For S2M queues 3:0, enable usage of the VMID from the buffer address 63:56 */
#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_DESC_EN_MASK 0x000F0000
#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_DESC_EN_SHIFT 16
#define UDMA_GEN_TGTID_CFG_TGTID_0_TX_Q_TGTID_QUEUE_EN_MASK 0x000000F0
#define UDMA_GEN_TGTID_CFG_TGTID_0_TX_Q_TGTID_QUEUE_EN_SHIFT 4
/* use Target-ID_n [7:0] from MSI-X Controller for MSI-X message */
#define UDMA_GEN_TGTID_CFG_TGTID_0_MSIX_TGTID_SEL (1 << 8)
/* Enable write to all Target-ID_n registers in the MSI-X Controller */
#define UDMA_GEN_TGTID_CFG_TGTID_0_MSIX_TGTID_ACCESS_EN (1 << 9)
/* For S2M queues 3:0, enable usage of the Target-ID from the buffer address 63:56 */
#define UDMA_GEN_TGTID_CFG_TGTID_0_RX_Q_TGTID_DESC_EN_MASK 0x000F0000
#define UDMA_GEN_TGTID_CFG_TGTID_0_RX_Q_TGTID_DESC_EN_SHIFT 16
/*
* For S2M queues 3:0, enable usage of the VMID from the configuration register
* (cfg_vmid_3/4 used for M2S queue_x)
* For S2M queues 3:0, enable usage of the Target-ID from the configuration register
* (cfg_tgtid_3/4 used for M2S queue_x)
*/
#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_QUEUE_EN_MASK 0x00F00000
#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_QUEUE_EN_SHIFT 20
#define UDMA_GEN_TGTID_CFG_TGTID_0_RX_Q_TGTID_QUEUE_EN_MASK 0x00F00000
#define UDMA_GEN_TGTID_CFG_TGTID_0_RX_Q_TGTID_QUEUE_EN_SHIFT 20
/**** cfg_vmid_1 register ****/
/* TX queue 0 VMID value */
#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_0_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_0_VMID_SHIFT 0
/* TX queue 1 VMID value */
#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_1_VMID_MASK 0xFFFF0000
#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_1_VMID_SHIFT 16
#define UDMA_GEN_TGTID_CFG_TGTID_SHIFT(qid) (((qid) & 0x1) ? 16 : 0)
#define UDMA_GEN_TGTID_CFG_TGTID_MASK(qid) (((qid) & 0x1) ? 0xFFFF0000 : 0x0000FFFF)
/**** cfg_vmid_2 register ****/
/* TX queue 2 VMID value */
#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_2_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_2_VMID_SHIFT 0
/* TX queue 3 VMID value */
#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_3_VMID_MASK 0xFFFF0000
#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_3_VMID_SHIFT 16
/**** cfg_tgtid_1 register ****/
/* TX queue 0 Target-ID value */
#define UDMA_GEN_TGTID_CFG_TGTID_1_TX_Q_0_TGTID_MASK 0x0000FFFF
#define UDMA_GEN_TGTID_CFG_TGTID_1_TX_Q_0_TGTID_SHIFT 0
/* TX queue 1 Target-ID value */
#define UDMA_GEN_TGTID_CFG_TGTID_1_TX_Q_1_TGTID_MASK 0xFFFF0000
#define UDMA_GEN_TGTID_CFG_TGTID_1_TX_Q_1_TGTID_SHIFT 16
/**** cfg_vmid_3 register ****/
/* RX queue 0 VMID value */
#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_0_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_0_VMID_SHIFT 0
/* RX queue 1 VMID value */
#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_1_VMID_MASK 0xFFFF0000
#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_1_VMID_SHIFT 16
/**** cfg_tgtid_2 register ****/
/* TX queue 2 Target-ID value */
#define UDMA_GEN_TGTID_CFG_TGTID_2_TX_Q_2_TGTID_MASK 0x0000FFFF
#define UDMA_GEN_TGTID_CFG_TGTID_2_TX_Q_2_TGTID_SHIFT 0
/* TX queue 3 Target-ID value */
#define UDMA_GEN_TGTID_CFG_TGTID_2_TX_Q_3_TGTID_MASK 0xFFFF0000
#define UDMA_GEN_TGTID_CFG_TGTID_2_TX_Q_3_TGTID_SHIFT 16
/**** cfg_vmid_4 register ****/
/* RX queue 2 VMID value */
#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_2_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_2_VMID_SHIFT 0
/* RX queue 3 VMID value */
#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_3_VMID_MASK 0xFFFF0000
#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_3_VMID_SHIFT 16
/**** cfg_tgtid_3 register ****/
/* RX queue 0 Target-ID value */
#define UDMA_GEN_TGTID_CFG_TGTID_3_RX_Q_0_TGTID_MASK 0x0000FFFF
#define UDMA_GEN_TGTID_CFG_TGTID_3_RX_Q_0_TGTID_SHIFT 0
/* RX queue 1 Target-ID value */
#define UDMA_GEN_TGTID_CFG_TGTID_3_RX_Q_1_TGTID_MASK 0xFFFF0000
#define UDMA_GEN_TGTID_CFG_TGTID_3_RX_Q_1_TGTID_SHIFT 16
/**** cfg_vmaddr_0 register ****/
/* TX queue 0 VMADDR value */
#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_0_VMADDR_MASK 0x0000FFFF
#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_0_VMADDR_SHIFT 0
/* TX queue 1 VMADDR value */
#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_1_VMADDR_MASK 0xFFFF0000
#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_1_VMADDR_SHIFT 16
/**** cfg_tgtid_4 register ****/
/* RX queue 2 Target-ID value */
#define UDMA_GEN_TGTID_CFG_TGTID_4_RX_Q_2_TGTID_MASK 0x0000FFFF
#define UDMA_GEN_TGTID_CFG_TGTID_4_RX_Q_2_TGTID_SHIFT 0
/* RX queue 3 Target-ID value */
#define UDMA_GEN_TGTID_CFG_TGTID_4_RX_Q_3_TGTID_MASK 0xFFFF0000
#define UDMA_GEN_TGTID_CFG_TGTID_4_RX_Q_3_TGTID_SHIFT 16
/**** cfg_vmaddr_1 register ****/
/* TX queue 2 VMADDR value */
#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_2_VMADDR_MASK 0x0000FFFF
#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_2_VMADDR_SHIFT 0
/* TX queue 3 VMADDR value */
#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_3_VMADDR_MASK 0xFFFF0000
#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_3_VMADDR_SHIFT 16
#define UDMA_GEN_TGTADDR_CFG_SHIFT(qid) (((qid) & 0x1) ? 16 : 0)
#define UDMA_GEN_TGTADDR_CFG_MASK(qid) (((qid) & 0x1) ? 0xFFFF0000 : 0x0000FFFF)
/**** cfg_vmaddr_2 register ****/
/* RX queue 0 VMADDR value */
#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_0_VMADDR_MASK 0x0000FFFF
#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_0_VMADDR_SHIFT 0
/* RX queue 1 VMADDR value */
#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_1_VMADDR_MASK 0xFFFF0000
#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_1_VMADDR_SHIFT 16
/**** cfg_tgtaddr_0 register ****/
/* TX queue 0 Target-Address value */
#define UDMA_GEN_TGTADDR_CFG_TGTADDR_0_TX_Q_0_TGTADDR_MASK 0x0000FFFF
#define UDMA_GEN_TGTADDR_CFG_TGTADDR_0_TX_Q_0_TGTADDR_SHIFT 0
/* TX queue 1 Target-Address value */
#define UDMA_GEN_TGTADDR_CFG_TGTADDR_0_TX_Q_1_TGTADDR_MASK 0xFFFF0000
#define UDMA_GEN_TGTADDR_CFG_TGTADDR_0_TX_Q_1_TGTADDR_SHIFT 16
/**** cfg_vmaddr_3 register ****/
/* RX queue 2 VMADDR value */
#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_2_VMADDR_MASK 0x0000FFFF
#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_2_VMADDR_SHIFT 0
/* RX queue 3 VMADDR value */
#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_3_VMADDR_MASK 0xFFFF0000
#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_3_VMADDR_SHIFT 16
/**** cfg_tgtaddr_1 register ****/
/* TX queue 2 Target-Address value */
#define UDMA_GEN_TGTADDR_CFG_TGTADDR_1_TX_Q_2_TGTADDR_MASK 0x0000FFFF
#define UDMA_GEN_TGTADDR_CFG_TGTADDR_1_TX_Q_2_TGTADDR_SHIFT 0
/* TX queue 3 Target-Address value */
#define UDMA_GEN_TGTADDR_CFG_TGTADDR_1_TX_Q_3_TGTADDR_MASK 0xFFFF0000
#define UDMA_GEN_TGTADDR_CFG_TGTADDR_1_TX_Q_3_TGTADDR_SHIFT 16
/**** cfg_tgtaddr_2 register ****/
/* RX queue 0 Target-Address value */
#define UDMA_GEN_TGTADDR_CFG_TGTADDR_2_RX_Q_0_TGTADDR_MASK 0x0000FFFF
#define UDMA_GEN_TGTADDR_CFG_TGTADDR_2_RX_Q_0_TGTADDR_SHIFT 0
/* RX queue 1 Target-Address value */
#define UDMA_GEN_TGTADDR_CFG_TGTADDR_2_RX_Q_1_TGTADDR_MASK 0xFFFF0000
#define UDMA_GEN_TGTADDR_CFG_TGTADDR_2_RX_Q_1_TGTADDR_SHIFT 16
/**** cfg_tgtaddr_3 register ****/
/* RX queue 2 Target-Address value */
#define UDMA_GEN_TGTADDR_CFG_TGTADDR_3_RX_Q_2_TGTADDR_MASK 0x0000FFFF
#define UDMA_GEN_TGTADDR_CFG_TGTADDR_3_RX_Q_2_TGTADDR_SHIFT 0
/* RX queue 3 Target-Address value */
#define UDMA_GEN_TGTADDR_CFG_TGTADDR_3_RX_Q_3_TGTADDR_MASK 0xFFFF0000
#define UDMA_GEN_TGTADDR_CFG_TGTADDR_3_RX_Q_3_TGTADDR_SHIFT 16
/**** cfg_vmpr_0 register ****/
/* TX High Address Select Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_HISEL_MASK 0x0000003F
#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_HISEL_SHIFT 0
/* TX Data VMID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_DATA_VMID_EN (1 << 7)
/* TX Prefetch VMID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_PREF_VMID_EN (1 << 28)
/* TX Completions VMID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_CMPL_VMID_EN (1 << 29)
/* TX Data Target-ID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_DATA_TGTID_EN (1 << 7)
/* TX Prefetch Target-ID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_PREF_TGTID_EN (1 << 28)
/* TX Completions Target-ID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_CMPL_TGTID_EN (1 << 29)
/**** cfg_vmpr_2 register ****/
/* TX queue Prefetch VMID */
#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_VMID_SHIFT 0
/* TX queue Completion VMID */
#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_VMID_MASK 0xFFFF0000
#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_VMID_SHIFT 16
/* TX queue Prefetch Target-ID */
#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_TGTID_MASK 0x0000FFFF
#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_TGTID_SHIFT 0
/* TX queue Completion Target-ID */
#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_TGTID_MASK 0xFFFF0000
#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_TGTID_SHIFT 16
/**** cfg_vmpr_3 register ****/
/* TX queue Data VMID */
#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SHIFT 0
/* TX queue Data VMID select */
#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SEL_MASK 0xFFFF0000
#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SEL_SHIFT 16
/* TX queue Data Target-ID */
#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_TGTID_MASK 0x0000FFFF
#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_TGTID_SHIFT 0
/* TX queue Data Target-ID select */
#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_TGTID_SEL_MASK 0xFFFF0000
#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_TGTID_SEL_SHIFT 16
/**** cfg_vmpr_4 register ****/
/* RX Data Buffer1 - High Address Select Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_HISEL_MASK 0x0000003F
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_HISEL_SHIFT 0
/* RX Data Buffer1 VMID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_VMID_EN (1 << 7)
/* RX Data Buffer1 Target-ID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_TGTID_EN (1 << 7)
/* RX Data Buffer2 - High Address Select Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_HISEL_MASK 0x00003F00
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_HISEL_SHIFT 8
/* RX Data Buffer2 VMID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_VMID_EN (1 << 15)
/* RX Data Buffer2 Target-ID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_TGTID_EN (1 << 15)
/* RX Direct Data Placement - High Address Select Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_HISEL_MASK 0x003F0000
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_HISEL_SHIFT 16
/* RX Direct Data Placement VMID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_VMID_EN (1 << 23)
/* RX Direct Data Placement Target-ID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_TGTID_EN (1 << 23)
/* RX Buffer 2 MSB address word selects per bytes, per queue */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_MSB_ADDR_SEL_MASK 0x0F000000
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_MSB_ADDR_SEL_SHIFT 24
/* RX Prefetch VMID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_PREF_VMID_EN (1 << 28)
/* RX Completions VMID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_CMPL_VMID_EN (1 << 29)
/* RX Prefetch Target-ID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_PREF_TGTID_EN (1 << 28)
/* RX Completions Target-ID Enable Per Q */
#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_CMPL_TGTID_EN (1 << 29)
/**** cfg_vmpr_6 register ****/
/* RX queue Prefetch VMID */
#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_VMID_SHIFT 0
/* RX queue Completion VMID */
#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_VMID_MASK 0xFFFF0000
#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_VMID_SHIFT 16
/* RX queue Prefetch Target-ID */
#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_TGTID_MASK 0x0000FFFF
#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_TGTID_SHIFT 0
/* RX queue Completion Target-ID */
#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_TGTID_MASK 0xFFFF0000
#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_TGTID_SHIFT 16
/**** cfg_vmpr_7 register ****/
/* RX queue Data Buffer 1 VMID */
#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SHIFT 0
/* RX queue Data Buffer 1 VMID select */
#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SEL_MASK 0xFFFF0000
#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SEL_SHIFT 16
/* RX queue Data Buffer 1 Target-ID */
#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_TGTID_MASK 0x0000FFFF
#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_TGTID_SHIFT 0
/* RX queue Data Buffer 1 Target-ID select */
#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_TGTID_SEL_MASK 0xFFFF0000
#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_TGTID_SEL_SHIFT 16
/**** cfg_vmpr_8 register ****/
/* RX queue Data Buffer 2 VMID */
#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SHIFT 0
/* RX queue Data Buffer 2 VMID select */
#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SEL_MASK 0xFFFF0000
#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SEL_SHIFT 16
/* RX queue Data Buffer 2 Target-ID */
#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_TGTID_MASK 0x0000FFFF
#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_TGTID_SHIFT 0
/* RX queue Data Buffer 2 Target-ID select */
#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_TGTID_SEL_MASK 0xFFFF0000
#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_TGTID_SEL_SHIFT 16
/**** cfg_vmpr_9 register ****/
/* RX queue DDP VMID */
#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_MASK 0x0000FFFF
#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SHIFT 0
/* RX queue DDP VMID select */
#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SEL_MASK 0xFFFF0000
#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SEL_SHIFT 16
/* RX queue DDP Target-ID */
#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_TGTID_MASK 0x0000FFFF
#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_TGTID_SHIFT 0
/* RX queue DDP Target-ID select */
#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_TGTID_SEL_MASK 0xFFFF0000
#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_TGTID_SEL_SHIFT 16
#ifdef __cplusplus
}

View File

@ -274,11 +274,11 @@ extern "C" {
#define AL_ADPTR_GEN_CTL_13_SATA_ARUSER_VAL_SHIFT 0
#define AL_ADPTR_GEN_CTL_13_SATA_ARUSER_SEL_MASK AL_FIELD_MASK(31, 16)
#define AL_ADPTR_GEN_CTL_13_SATA_ARUSER_SEL_SHIFT 16
/* Central VMID enabler. If set, then each entry will be used as programmed */
#define AL_ADPTR_GEN_CTL_14_SATA_MSIX_VMID_SEL AL_BIT(0)
/* Allow access to store VMID values per entry */
#define AL_ADPTR_GEN_CTL_14_SATA_MSIX_VMID_ACCESS_EN AL_BIT(1)
/* VMID Address select */
/* Central Target-ID enabler. If set, then each entry will be used as programmed */
#define AL_ADPTR_GEN_CTL_14_SATA_MSIX_TGTID_SEL AL_BIT(0)
/* Allow access to store Target-ID values per entry */
#define AL_ADPTR_GEN_CTL_14_SATA_MSIX_TGTID_ACCESS_EN AL_BIT(1)
/* Target-ID Address select */
/* Tx */
#define AL_ADPTR_GEN_CTL_14_SATA_VM_ARADDR_SEL_MASK AL_FIELD_MASK(13, 8)
#define AL_ADPTR_GEN_CTL_14_SATA_VM_ARADDR_SEL_SHIFT 8
@ -294,13 +294,13 @@ extern "C" {
/*
* ROB registers
*/
/* Read ROB_Enable, when disabled the read ROB is bypassed */
/* Read ROB Enable, when disabled the read ROB is bypassed */
#define AL_ADPTR_GEN_CTL_19_READ_ROB_EN AL_BIT(0)
/* Read force in-order of every read transaction */
#define AL_ADPTR_GEN_CTL_19_READ_ROB_FORCE_INORDER AL_BIT(1)
/* Read software reset */
#define AL_ADPTR_GEN_CTL_19_READ_ROB_SW_RESET AL_BIT(15)
/* Write ROB_Enable, when disabled_the_Write ROB is bypassed */
/* Write ROB Enable, when disabled the Write ROB is bypassed */
#define AL_ADPTR_GEN_CTL_19_WRITE_ROB_EN AL_BIT(16)
/* Write force in-order of every write transaction */
#define AL_ADPTR_GEN_CTL_19_WRITE_ROB_FORCE_INORDER AL_BIT(17)

Some files were not shown because too many files have changed in this diff Show More