Merge ^/head r343712 through r343806.

This commit is contained in:
dim 2019-02-05 19:50:46 +00:00
parent 23e870dc13
commit 9c6508ad9d
125 changed files with 986 additions and 795 deletions

View File

@ -60,8 +60,8 @@
# ------------------------------------------------------------------------
#
# The Makefile knows about the following maps:
# access, bitdomain, domaintable, genericstable, mailertable, userdb,
# uucpdomain, virtusertable
# access, authinfo, bitdomain, domaintable, genericstable, mailertable,
# userdb, uucpdomain, virtusertable
#
.ifndef SENDMAIL_MC
@ -125,7 +125,7 @@ SENDMAIL_MAP_PERMS?= 0640
# type to use when calling makemap.
#
SENDMAIL_MAP_SRC+= mailertable domaintable bitdomain uucpdomain \
genericstable virtusertable access
genericstable virtusertable access authinfo
SENDMAIL_MAP_OBJ=
SENDMAIL_MAP_TYPE?= hash

View File

@ -97,22 +97,31 @@ generate_random_port(int seed)
static void
resolve_localhost(struct addrinfo **res, int domain, int type, int port)
{
const char *host;
char *serv;
struct addrinfo hints;
int error;
ATF_REQUIRE_MSG(domain == AF_INET || domain == AF_INET6,
"unhandled domain: %d", domain);
switch (domain) {
case AF_INET:
host = "127.0.0.1";
break;
case AF_INET6:
host = "::1";
break;
default:
atf_tc_fail("unhandled domain: %d", domain);
}
ATF_REQUIRE_MSG(asprintf(&serv, "%d", port) >= 0,
"asprintf failed: %s", strerror(errno));
memset(&hints, 0, sizeof(hints));
hints.ai_family = domain;
hints.ai_flags = AI_ADDRCONFIG|AI_NUMERICSERV;
hints.ai_flags = AI_ADDRCONFIG|AI_NUMERICSERV|AI_NUMERICHOST;
hints.ai_socktype = type;
error = getaddrinfo("localhost", serv, &hints, res);
error = getaddrinfo(host, serv, &hints, res);
ATF_REQUIRE_EQ_MSG(error, 0,
"getaddrinfo failed: %s", gai_strerror(error));
free(serv);

View File

@ -1,6 +1,5 @@
/*-
* Copyright (c) 2017 Netflix, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,6 +1,5 @@
/*-
* Copyright (c) 2017 Netflix, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,6 +1,5 @@
/*-
* Copyright (c) 2017 Netflix, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,6 +1,5 @@
/*-
* Copyright (c) 2017 Netflix, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,6 +1,5 @@
/*-
* Copyright (c) 2017 Netflix, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,3 +1,4 @@
.\"
.\" Copyright 2016 Netflix, Inc.
.\" All rights reserved.
.\"

View File

@ -1,6 +1,5 @@
/*-
* Copyright (c) 2016 Netflix, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,6 +1,5 @@
/*-
* Copyright (c) 2016 Netflix, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,6 +1,5 @@
/*-
* Copyright (c) 2017 Netflix, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,6 +1,5 @@
/*-
* Copyright (c) 2017 Netflix, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -54,6 +54,10 @@
#include "json.h"
#include "jevents.h"
static int
nftw_ordered(const char *path, int (*fn)(const char *, const struct stat *, int,
struct FTW *), int nfds, int ftwflags);
_Noreturn void _Exit(int);
int verbose;
@ -1122,7 +1126,7 @@ int main(int argc, char *argv[])
maxfds = get_maxfds();
mapfile = NULL;
rc = nftw(ldirname, preprocess_arch_std_files, maxfds, 0);
rc = nftw_ordered(ldirname, preprocess_arch_std_files, maxfds, 0);
if (rc && verbose) {
pr_info("%s: Error preprocessing arch standard files %s: %s\n",
prog, ldirname, strerror(errno));
@ -1135,7 +1139,7 @@ int main(int argc, char *argv[])
goto empty_map;
}
rc = nftw(ldirname, process_one_file, maxfds, 0);
rc = nftw_ordered(ldirname, process_one_file, maxfds, 0);
if (rc && verbose) {
pr_info("%s: Error walking file tree %s\n", prog, ldirname);
goto empty_map;
@ -1169,3 +1173,90 @@ int main(int argc, char *argv[])
free_arch_std_events();
return 0;
}
#include <fts.h>
static int
fts_compare(const FTSENT * const *a, const FTSENT * const *b)
{
return (strcmp((*a)->fts_name, (*b)->fts_name));
}
static int
nftw_ordered(const char *path, int (*fn)(const char *, const struct stat *, int,
struct FTW *), int nfds, int ftwflags)
{
char * const paths[2] = { (char *)path, NULL };
struct FTW ftw;
FTSENT *cur;
FTS *ftsp;
int error = 0, ftsflags, fnflag, postorder, sverrno;
/* XXX - nfds is currently unused */
if (nfds < 1) {
errno = EINVAL;
return (-1);
}
ftsflags = FTS_COMFOLLOW;
if (!(ftwflags & FTW_CHDIR))
ftsflags |= FTS_NOCHDIR;
if (ftwflags & FTW_MOUNT)
ftsflags |= FTS_XDEV;
if (ftwflags & FTW_PHYS)
ftsflags |= FTS_PHYSICAL;
else
ftsflags |= FTS_LOGICAL;
postorder = (ftwflags & FTW_DEPTH) != 0;
ftsp = fts_open(paths, ftsflags, fts_compare);
if (ftsp == NULL)
return (-1);
while ((cur = fts_read(ftsp)) != NULL) {
switch (cur->fts_info) {
case FTS_D:
if (postorder)
continue;
fnflag = FTW_D;
break;
case FTS_DC:
continue;
case FTS_DNR:
fnflag = FTW_DNR;
break;
case FTS_DP:
if (!postorder)
continue;
fnflag = FTW_DP;
break;
case FTS_F:
case FTS_DEFAULT:
fnflag = FTW_F;
break;
case FTS_NS:
case FTS_NSOK:
fnflag = FTW_NS;
break;
case FTS_SL:
fnflag = FTW_SL;
break;
case FTS_SLNONE:
fnflag = FTW_SLN;
break;
default:
error = -1;
goto done;
}
ftw.base = cur->fts_pathlen - cur->fts_namelen;
ftw.level = cur->fts_level;
error = fn(cur->fts_path, cur->fts_statp, fnflag, &ftw);
if (error != 0)
break;
}
done:
sverrno = errno;
if (fts_close(ftsp) != 0 && error == 0)
error = -1;
else
errno = sverrno;
return (error);
}

View File

@ -1208,7 +1208,7 @@ sysdecode_sctp_pr_policy(int policy)
static struct name_table sctpsndflags[] = {
X(SCTP_EOF) X(SCTP_ABORT) X(SCTP_UNORDERED) X(SCTP_ADDR_OVER)
X(SCTP_SENDALL) X(SCTP_SACK_IMMEDIATELY) XEND
X(SCTP_SENDALL) X(SCTP_EOR) X(SCTP_SACK_IMMEDIATELY) XEND
};
bool

View File

@ -46,6 +46,8 @@ void
__thr_malloc_init(void)
{
if (npagesizes != 0)
return;
npagesizes = getpagesizes(pagesizes_d, nitems(pagesizes_d));
if (npagesizes == -1) {
npagesizes = 1;
@ -59,6 +61,8 @@ static void
thr_malloc_lock(struct pthread *curthread)
{
if (curthread == NULL)
return;
curthread->locklevel++;
_thr_umutex_lock(&thr_malloc_umtx, TID(curthread));
}
@ -67,6 +71,8 @@ static void
thr_malloc_unlock(struct pthread *curthread)
{
if (curthread == NULL)
return;
_thr_umutex_unlock(&thr_malloc_umtx, TID(curthread));
curthread->locklevel--;
_thr_ast(curthread);

View File

@ -390,6 +390,7 @@ __pthread_mutex_init(pthread_mutex_t * __restrict mutex,
}
if (mutex_attr == NULL ||
(*mutex_attr)->m_pshared == PTHREAD_PROCESS_PRIVATE) {
__thr_malloc_init();
return (mutex_init(mutex, mutex_attr ? *mutex_attr : NULL,
__thr_calloc));
}

View File

@ -6,7 +6,7 @@
EMBEDDED_TARGET_ARCH="aarch64"
EMBEDDED_TARGET="arm64"
EMBEDDEDBUILD=1
EMBEDDEDPORTS="sysutils/u-boot-sopine"
EMBEDDEDPORTS="sysutils/u-boot-pine64-lts"
FAT_SIZE="54m -b 1m"
FAT_TYPE="16"
IMAGE_SIZE="2560M"
@ -18,7 +18,7 @@ FDT_OVERLAYS="sun50i-a64-sid,sun50i-a64-ths,sun50i-a64-timer,sun50i-a64-opp"
export BOARDNAME="PINE64-LTS"
arm_install_uboot() {
UBOOT_DIR="/usr/local/share/u-boot/u-boot-sopine"
UBOOT_DIR="/usr/local/share/u-boot/u-boot-pine64-lts"
UBOOT_FILES="u-boot-sunxi-with-spl.bin"
chroot ${CHROOTDIR} dd if=${UBOOT_DIR}/${UBOOT_FILES} \
of=/dev/${mddev} bs=1k seek=8 conv=sync

View File

@ -4,7 +4,7 @@
#
DTB_DIR="/usr/local/share/rpi-firmware"
DTB="bcm2710-rpi-3-b.dtb"
DTB="bcm2710-rpi-3-b.dtb bcm2710-rpi-3-b-plus.dtb"
EMBEDDED_TARGET_ARCH="aarch64"
EMBEDDED_TARGET="arm64"
EMBEDDEDBUILD=1

View File

@ -1,7 +1,5 @@
.\"
.\" Copyright (c) 2017 Netflix, Inc
.\"
.\" All rights reserved.
.\" Copyright (c) 2017 Netflix, Inc.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions

View File

@ -1,6 +1,5 @@
/*-
* Copyright (c) 2017 Netflix, Inc
* All rights reserved.
* Copyright (c) 2017 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,6 +1,5 @@
/*-
* Copyright (c) 2017 Netflix, Inc
* All rights reserved.
* Copyright (c) 2017 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,6 +1,5 @@
/*-
* Copyright (c) 2017 Netflix, Inc
* All rights reserved.
* Copyright (c) 2017 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2017 Netflix, Inc
* Copyright (c) 2017 Netflix, Inc.
* Copyright (C) 2018 Alexander Motin <mav@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (C) 2018 Netflix
* Copyright (C) 2018 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,6 +1,5 @@
/*-
* Copyright (c) 2016 Netflix, Inc
* All rights reserved.
* Copyright (c) 2016 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,5 +1,5 @@
.\" Copyright (c) 2017 Netflix, Inc
.\" All rights reserved.
.\"
.\" Copyright (c) 2017 Netflix, Inc.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions

View File

@ -1,6 +1,5 @@
.\"
.\" Copyright (c) 2015 Netflix Inc.
.\" All rights reserved.
.\" Copyright (c) 2015 Netflix, Inc.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions

View File

@ -1,6 +1,5 @@
/*-
* Copyright (c) 2018 Netflix, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015 Netflix, Inc. All Rights Reserved.
* Copyright (c) 2015 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,5 +1,5 @@
/*-
* Copyright 2016 Netflix, Inc. All Rights Reserved.
* Copyright 2016 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,9 +1,10 @@
/*-
* Copyright (c) 2008-2010 Rui Paulo
* Copyright (c) 2006 Marcel Moolenaar
* Copyright (c) 2018 Netflix, Inc
* All rights reserved.
*
* Copyright (c) 2018 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:

View File

@ -1,5 +1,5 @@
\ Copyright (c) 2016 Netflix, Inc
\ All rights reserved.
\
\ Copyright (c) 2016 Netflix, Inc.
\
\ Redistribution and use in source and binary forms, with or without
\ modification, are permitted provided that the following conditions

View File

@ -1,8 +1,9 @@
/*-
* Copyright (c) 1998 Michael Smith <msmith@freebsd.org>
* Copyright (c) 2016 Netflix, Inc
* All rights reserved.
*
* Copyright (c) 2016 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018 Netflix. All Rights Reserved.
* Copyright (c) 2018 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2018 Netflix
* Copyright (c) 2018 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -521,12 +521,14 @@ fast_syscall_common:
movq TF_RFLAGS(%rsp),%r11 /* original %rflags */
movq TF_RIP(%rsp),%rcx /* original %rip */
movq TF_RSP(%rsp),%rsp /* user stack pointer */
xorl %r8d,%r8d /* zero the rest of GPRs */
xorl %r10d,%r10d
cmpq $~0,PCPU(UCR3)
je 2f
movq PCPU(UCR3),%r9
movq %r9,%cr3
xorl %r9d,%r9d
2: swapgs
2: xorl %r9d,%r9d
swapgs
sysretq
3: /* AST scheduled. */

View File

@ -102,8 +102,8 @@ options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones
options VERBOSE_SYSINIT=0 # Support debug.verbose_sysinit, off by default
# Kernel Sanitizers
#options COVERAGE # Generic kernel coverage. Used by KCOV
#options KCOV # Kernel Coverage Sanitizer
options COVERAGE # Generic kernel coverage. Used by KCOV
options KCOV # Kernel Coverage Sanitizer
# Warning: KUBSAN can result in a kernel too large for loader to load
#options KUBSAN # Kernel Undefined Behavior Sanitizer

View File

@ -29,6 +29,7 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_acpi.h"
#include "opt_ddb.h"
/*
@ -48,8 +49,13 @@ __FBSDID("$FreeBSD$");
* that can be allocated, or both, depending on the exclusion flags associated
* with the region.
*/
#ifdef DEV_ACPI
#define MAX_HWCNT 32 /* ACPI needs more regions */
#define MAX_EXCNT 32
#else
#define MAX_HWCNT 16
#define MAX_EXCNT 16
#endif
#if defined(__arm__)
#define MAX_PHYS_ADDR 0xFFFFFFFFull

View File

@ -94,8 +94,8 @@ options USB_DEBUG # enable debug msgs
options VERBOSE_SYSINIT=0 # Support debug.verbose_sysinit, off by default
# Kernel Sanitizers
#options COVERAGE # Generic kernel coverage. Used by KCOV
#options KCOV # Kernel Coverage Sanitizer
options COVERAGE # Generic kernel coverage. Used by KCOV
options KCOV # Kernel Coverage Sanitizer
# Warning: KUBSAN can result in a kernel too large for loader to load
#options KUBSAN # Kernel Undefined Behavior Sanitizer

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2015 Netflix, Inc
* Copyright (c) 2015 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2015 Netflix, Inc
* Copyright (c) 2015 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2015 Netflix, Inc
* Copyright (c) 2015 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -2729,13 +2729,13 @@ ses_handle_string(enc_softc_t *enc, encioc_string_t *sstr, int ioc)
if (sstr->bufsiz > 0xffff)
return (EINVAL); /* buffer size too large */
if (ioc == ENCIOC_SETSTRING) {
switch (ioc) {
case ENCIOC_SETSTRING:
payload = sstr->bufsiz + 4; /* header for SEND DIAGNOSTIC */
amt = 0 - payload;
buf = ENC_MALLOC(payload);
if (buf == NULL)
return ENOMEM;
return (ENOMEM);
ses_page_cdb(cdb, payload, 0, CAM_DIR_OUT);
/* Construct the page request */
buf[0] = SesStringOut;
@ -2743,12 +2743,14 @@ ses_handle_string(enc_softc_t *enc, encioc_string_t *sstr, int ioc)
buf[2] = sstr->bufsiz >> 8;
buf[3] = sstr->bufsiz & 0xff;
memcpy(&buf[4], sstr->buf, sstr->bufsiz);
} else if (ioc == ENCIOC_GETSTRING) {
break;
case ENCIOC_GETSTRING:
payload = sstr->bufsiz;
amt = payload;
ses_page_cdb(cdb, payload, SesStringIn, CAM_DIR_IN);
buf = sstr->buf;
} else if (ioc == ENCIOC_GETENCNAME) {
break;
case ENCIOC_GETENCNAME:
if (ses_cache->ses_nsubencs < 1)
return (ENODEV);
enc_desc = ses_cache->subencs[0];
@ -2768,7 +2770,7 @@ ses_handle_string(enc_softc_t *enc, encioc_string_t *sstr, int ioc)
size = sstr->bufsiz;
copyout(str, sstr->buf, size);
return (size == rsize ? 0 : ENOMEM);
} else if (ioc == ENCIOC_GETENCID) {
case ENCIOC_GETENCID:
if (ses_cache->ses_nsubencs < 1)
return (ENODEV);
enc_desc = ses_cache->subencs[0];
@ -2782,13 +2784,13 @@ ses_handle_string(enc_softc_t *enc, encioc_string_t *sstr, int ioc)
size = sstr->bufsiz;
copyout(str, sstr->buf, size);
return (size == rsize ? 0 : ENOMEM);
} else
return EINVAL;
default:
return (EINVAL);
}
ret = enc_runcmd(enc, cdb, 6, buf, &amt);
if (ioc == ENCIOC_SETSTRING)
ENC_FREE(buf);
return ret;
return (ret);
}
/**

View File

@ -165,29 +165,38 @@ static vdev_ops_t *vdev_ops_table[] = {
/* target number of metaslabs per top-level vdev */
int vdev_max_ms_count = 200;
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, max_ms_count, CTLFLAG_RDTUN,
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, max_ms_count, CTLFLAG_RWTUN,
&vdev_max_ms_count, 0,
"Maximum number of metaslabs per top-level vdev");
"Target number of metaslabs per top-level vdev");
/* minimum number of metaslabs per top-level vdev */
int vdev_min_ms_count = 16;
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, min_ms_count, CTLFLAG_RDTUN,
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, min_ms_count, CTLFLAG_RWTUN,
&vdev_min_ms_count, 0,
"Minimum number of metaslabs per top-level vdev");
/* practical upper limit of total metaslabs per top-level vdev */
int vdev_ms_count_limit = 1ULL << 17;
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, max_ms_count_limit, CTLFLAG_RWTUN,
&vdev_ms_count_limit, 0,
"Maximum number of metaslabs per top-level vdev");
/* lower limit for metaslab size (512M) */
int vdev_default_ms_shift = 29;
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, default_ms_shift, CTLFLAG_RDTUN,
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, default_ms_shift, CTLFLAG_RWTUN,
&vdev_default_ms_shift, 0,
"Shift between vdev size and number of metaslabs");
"Default shift between vdev size and number of metaslabs");
/* upper limit for metaslab size (256G) */
int vdev_max_ms_shift = 38;
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, max_ms_shift, CTLFLAG_RWTUN,
&vdev_max_ms_shift, 0,
"Maximum shift between vdev size and number of metaslabs");
boolean_t vdev_validate_skip = B_FALSE;
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, validate_skip, CTLFLAG_RWTUN,
&vdev_validate_skip, 0,
"Bypass vdev validation");
/*
* Since the DTL space map of a vdev is not expected to have a lot of

View File

@ -3808,7 +3808,7 @@ kern/kern_idle.c standard
kern/kern_intr.c standard
kern/kern_jail.c standard
kern/kern_kcov.c optional kcov \
compile-with "${NORMAL_C} -fno-sanitize-coverage=trace-pc,trace-cmp"
compile-with "${NORMAL_C} -fno-sanitize=all"
kern/kern_khelp.c standard
kern/kern_kthread.c standard
kern/kern_ktr.c optional ktr

View File

@ -120,7 +120,12 @@ SAN_CFLAGS+= -fsanitize=undefined
COVERAGE_ENABLED!= grep COVERAGE opt_global.h || true ; echo
.if !empty(COVERAGE_ENABLED)
.if ${COMPILER_TYPE} == "clang" || \
(${COMPILER_TYPE} == "gcc" && ${COMPILER_VERSION} >= 80100)
SAN_CFLAGS+= -fsanitize-coverage=trace-pc,trace-cmp
.else
SAN_CFLAGS+= -fsanitize-coverage=trace-pc
.endif
.endif
CFLAGS+= ${SAN_CFLAGS}

View File

@ -6119,9 +6119,7 @@ ipf_getifname(ifp, buffer)
char *buffer;
{
static char namebuf[LIFNAMSIZ];
# if defined(MENTAT) || defined(__FreeBSD__) || defined(__osf__) || \
defined(__sgi) || defined(linux) || defined(_AIX51) || \
(defined(sun) && !defined(__SVR4) && !defined(__svr4__))
# if defined(MENTAT) || defined(__FreeBSD__)
int unit, space;
char temp[20];
char *s;
@ -6131,9 +6129,7 @@ ipf_getifname(ifp, buffer)
buffer = namebuf;
(void) strncpy(buffer, ifp->if_name, LIFNAMSIZ);
buffer[LIFNAMSIZ - 1] = '\0';
# if defined(MENTAT) || defined(__FreeBSD__) || defined(__osf__) || \
defined(__sgi) || defined(_AIX51) || \
(defined(sun) && !defined(__SVR4) && !defined(__svr4__))
# if defined(MENTAT) || defined(__FreeBSD__)
for (s = buffer; *s; s++)
;
unit = ifp->if_unit;

View File

@ -1,8 +1,9 @@
/*-
* Copyright 2013 John-Mark Gurney <jmg@FreeBSD.org>
* Copyright 2015 Netflix, Inc.
* All rights reserved.
*
* Copyright 2015 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:

View File

@ -1,6 +1,5 @@
/*-
* Copyright (c) 2016 Netflix, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1151,10 +1151,10 @@ ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na)
kring->nr_hwtail = kring->rtail =
kring->ring->tail = ktoa->hwtail;
ND("%d,%d: csb {hc %u h %u c %u ht %u}", t, i,
nm_prdis("%d,%d: csb {hc %u h %u c %u ht %u}", t, i,
ktoa->hwcur, atok->head, atok->cur,
ktoa->hwtail);
ND("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}",
nm_prdis("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}",
t, i, kring->nr_hwcur, kring->rhead, kring->rcur,
kring->ring->head, kring->ring->cur, kring->nr_hwtail,
kring->rtail, kring->ring->tail);
@ -1179,7 +1179,6 @@ ptnet_nm_register(struct netmap_adapter *na, int onoff)
struct ptnet_softc *sc = if_getsoftc(ifp);
int native = (na == &sc->ptna->hwup.up);
struct ptnet_queue *pq;
enum txrx t;
int ret = 0;
int i;
@ -1194,7 +1193,7 @@ ptnet_nm_register(struct netmap_adapter *na, int onoff)
* in the RX rings, since we will not receive further interrupts
* until these will be processed. */
if (native && !onoff && na->active_fds == 0) {
D("Exit netmap mode, re-enable interrupts");
nm_prinf("Exit netmap mode, re-enable interrupts");
for (i = 0; i < sc->num_rings; i++) {
pq = sc->queues + i;
pq->atok->appl_need_kick = 1;
@ -1230,30 +1229,14 @@ ptnet_nm_register(struct netmap_adapter *na, int onoff)
/* If not native, don't call nm_set_native_flags, since we don't want
* to replace if_transmit method, nor set NAF_NETMAP_ON */
if (native) {
for_rx_tx(t) {
for (i = 0; i <= nma_get_nrings(na, t); i++) {
struct netmap_kring *kring = NMR(na, t)[i];
if (nm_kring_pending_on(kring)) {
kring->nr_mode = NKR_NETMAP_ON;
}
}
}
netmap_krings_mode_commit(na, onoff);
nm_set_native_flags(na);
}
} else {
if (native) {
nm_clear_native_flags(na);
for_rx_tx(t) {
for (i = 0; i <= nma_get_nrings(na, t); i++) {
struct netmap_kring *kring = NMR(na, t)[i];
if (nm_kring_pending_off(kring)) {
kring->nr_mode = NKR_NETMAP_OFF;
}
}
}
netmap_krings_mode_commit(na, onoff);
}
if (sc->ptna->backend_users == 0) {
@ -1728,7 +1711,7 @@ ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
if (!PTNET_Q_TRYLOCK(pq)) {
/* We failed to acquire the lock, schedule the taskqueue. */
RD(1, "Deferring TX work");
nm_prlim(1, "Deferring TX work");
if (may_resched) {
taskqueue_enqueue(pq->taskq, &pq->task);
}
@ -1738,7 +1721,7 @@ ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
PTNET_Q_UNLOCK(pq);
RD(1, "Interface is down");
nm_prlim(1, "Interface is down");
return ENETDOWN;
}
@ -1776,7 +1759,7 @@ ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
break;
}
RD(1, "Found more slots by doublecheck");
nm_prlim(1, "Found more slots by doublecheck");
/* More slots were freed before reactivating
* the interrupts. */
atok->appl_need_kick = 0;
@ -1815,7 +1798,7 @@ ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
continue;
}
}
ND(1, "%s: [csum_flags %lX] vnet hdr: flags %x "
nm_prdis(1, "%s: [csum_flags %lX] vnet hdr: flags %x "
"csum_start %u csum_ofs %u hdr_len = %u "
"gso_size %u gso_type %x", __func__,
mhead->m_pkthdr.csum_flags, vh->flags,
@ -1890,7 +1873,7 @@ ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
}
if (count >= budget && may_resched) {
DBG(RD(1, "out of budget: resched, %d mbufs pending\n",
DBG(nm_prlim(1, "out of budget: resched, %d mbufs pending\n",
drbr_inuse(ifp, pq->bufring)));
taskqueue_enqueue(pq->taskq, &pq->task);
}
@ -1932,7 +1915,7 @@ ptnet_transmit(if_t ifp, struct mbuf *m)
err = drbr_enqueue(ifp, pq->bufring, m);
if (err) {
/* ENOBUFS when the bufring is full */
RD(1, "%s: drbr_enqueue() failed %d\n",
nm_prlim(1, "%s: drbr_enqueue() failed %d\n",
__func__, err);
pq->stats.errors ++;
return err;
@ -2077,13 +2060,13 @@ ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
/* There is no good reason why host should
* put the header in multiple netmap slots.
* If this is the case, discard. */
RD(1, "Fragmented vnet-hdr: dropping");
nm_prlim(1, "Fragmented vnet-hdr: dropping");
head = ptnet_rx_discard(kring, head);
pq->stats.iqdrops ++;
deliver = 0;
goto skip;
}
ND(1, "%s: vnet hdr: flags %x csum_start %u "
nm_prdis(1, "%s: vnet hdr: flags %x csum_start %u "
"csum_ofs %u hdr_len = %u gso_size %u "
"gso_type %x", __func__, vh->flags,
vh->csum_start, vh->csum_offset, vh->hdr_len,
@ -2147,7 +2130,7 @@ ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
/* The very last slot prepared by the host has
* the NS_MOREFRAG set. Drop it and continue
* the outer cycle (to do the double-check). */
RD(1, "Incomplete packet: dropping");
nm_prlim(1, "Incomplete packet: dropping");
m_freem(mhead);
pq->stats.iqdrops ++;
goto host_sync;
@ -2185,7 +2168,7 @@ ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
| VIRTIO_NET_HDR_F_DATA_VALID))) {
if (unlikely(ptnet_rx_csum(mhead, vh))) {
m_freem(mhead);
RD(1, "Csum offload error: dropping");
nm_prlim(1, "Csum offload error: dropping");
pq->stats.iqdrops ++;
deliver = 0;
}
@ -2231,7 +2214,7 @@ ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
if (count >= budget && may_resched) {
/* If we ran out of budget or the double-check found new
* slots to process, schedule the taskqueue. */
DBG(RD(1, "out of budget: resched h %u t %u\n",
DBG(nm_prlim(1, "out of budget: resched h %u t %u\n",
head, ring->tail));
taskqueue_enqueue(pq->taskq, &pq->task);
}
@ -2246,7 +2229,7 @@ ptnet_rx_task(void *context, int pending)
{
struct ptnet_queue *pq = context;
DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id));
DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id));
ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
}
@ -2255,7 +2238,7 @@ ptnet_tx_task(void *context, int pending)
{
struct ptnet_queue *pq = context;
DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id));
DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id));
ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
}
@ -2273,7 +2256,7 @@ ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget)
KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet"));
queue_budget = MAX(budget / sc->num_rings, 1);
RD(1, "Per-queue budget is %d", queue_budget);
nm_prlim(1, "Per-queue budget is %d", queue_budget);
while (budget) {
unsigned int rcnt = 0;

View File

@ -90,7 +90,6 @@ vtnet_netmap_reg(struct netmap_adapter *na, int state)
struct ifnet *ifp = na->ifp;
struct vtnet_softc *sc = ifp->if_softc;
int success;
enum txrx t;
int i;
/* Drain the taskqueues to make sure that there are no worker threads
@ -132,44 +131,11 @@ vtnet_netmap_reg(struct netmap_adapter *na, int state)
success = (ifp->if_drv_flags & IFF_DRV_RUNNING) ? 0 : ENXIO;
if (state) {
for_rx_tx(t) {
/* Hardware rings. */
for (i = 0; i < nma_get_nrings(na, t); i++) {
struct netmap_kring *kring = NMR(na, t)[i];
if (nm_kring_pending_on(kring))
kring->nr_mode = NKR_NETMAP_ON;
}
/* Host rings. */
for (i = 0; i < nma_get_host_nrings(na, t); i++) {
struct netmap_kring *kring =
NMR(na, t)[nma_get_nrings(na, t) + i];
if (nm_kring_pending_on(kring))
kring->nr_mode = NKR_NETMAP_ON;
}
}
netmap_krings_mode_commit(na, state);
nm_set_native_flags(na);
} else {
nm_clear_native_flags(na);
for_rx_tx(t) {
/* Hardware rings. */
for (i = 0; i < nma_get_nrings(na, t); i++) {
struct netmap_kring *kring = NMR(na, t)[i];
if (nm_kring_pending_off(kring))
kring->nr_mode = NKR_NETMAP_OFF;
}
/* Host rings. */
for (i = 0; i < nma_get_host_nrings(na, t); i++) {
struct netmap_kring *kring =
NMR(na, t)[nma_get_nrings(na, t) + i];
if (nm_kring_pending_off(kring))
kring->nr_mode = NKR_NETMAP_OFF;
}
}
netmap_krings_mode_commit(na, state);
}
VTNET_CORE_UNLOCK(sc);
@ -396,7 +362,7 @@ vtnet_netmap_rxsync(struct netmap_kring *kring, int flags)
/* Skip the virtio-net header. */
len -= sc->vtnet_hdr_size;
if (unlikely(len < 0)) {
RD(1, "Truncated virtio-net-header, "
nm_prlim(1, "Truncated virtio-net-header, "
"missing %d bytes", -len);
len = 0;
}
@ -408,7 +374,7 @@ vtnet_netmap_rxsync(struct netmap_kring *kring, int flags)
kring->nr_hwtail = nm_i;
kring->nr_kflags &= ~NKR_PENDINTR;
}
ND("[B] h %d c %d hwcur %d hwtail %d", ring->head, ring->cur,
nm_prdis("[B] h %d c %d hwcur %d hwtail %d", ring->head, ring->cur,
kring->nr_hwcur, kring->nr_hwtail);
/*
@ -423,7 +389,7 @@ vtnet_netmap_rxsync(struct netmap_kring *kring, int flags)
virtqueue_notify(vq);
}
ND("[C] h %d c %d t %d hwcur %d hwtail %d", ring->head, ring->cur,
nm_prdis("[C] h %d c %d t %d hwcur %d hwtail %d", ring->head, ring->cur,
ring->tail, kring->nr_hwcur, kring->nr_hwtail);
return 0;

View File

@ -893,7 +893,7 @@ netmap_krings_create(struct netmap_adapter *na, u_int tailroom)
kring->rtail = kring->nr_hwtail = (t == NR_TX ? ndesc - 1 : 0);
snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name,
nm_txrx2str(t), i);
ND("ktx %s h %d c %d t %d",
nm_prdis("ktx %s h %d c %d t %d",
kring->name, kring->rhead, kring->rcur, kring->rtail);
err = nm_os_selinfo_init(&kring->si, kring->name);
if (err) {
@ -955,7 +955,7 @@ netmap_hw_krings_delete(struct netmap_adapter *na)
for (i = nma_get_nrings(na, NR_RX); i < lim; i++) {
struct mbq *q = &NMR(na, NR_RX)[i]->rx_queue;
ND("destroy sw mbq with len %d", mbq_len(q));
nm_prdis("destroy sw mbq with len %d", mbq_len(q));
mbq_purge(q);
mbq_safe_fini(q);
}
@ -1176,7 +1176,7 @@ netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force)
if ((slot->flags & NS_FORWARD) == 0 && !force)
continue;
if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) {
RD(5, "bad pkt at %d len %d", n, slot->len);
nm_prlim(5, "bad pkt at %d len %d", n, slot->len);
continue;
}
slot->flags &= ~NS_FORWARD; // XXX needed ?
@ -1290,7 +1290,7 @@ netmap_txsync_to_host(struct netmap_kring *kring, int flags)
*/
mbq_init(&q);
netmap_grab_packets(kring, &q, 1 /* force */);
ND("have %d pkts in queue", mbq_len(&q));
nm_prdis("have %d pkts in queue", mbq_len(&q));
kring->nr_hwcur = head;
kring->nr_hwtail = head + lim;
if (kring->nr_hwtail > lim)
@ -1338,7 +1338,7 @@ netmap_rxsync_from_host(struct netmap_kring *kring, int flags)
struct netmap_slot *slot = &ring->slot[nm_i];
m_copydata(m, 0, len, NMB(na, slot));
ND("nm %d len %d", nm_i, len);
nm_prdis("nm %d len %d", nm_i, len);
if (netmap_debug & NM_DEBUG_HOST)
nm_prinf("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL));
@ -1603,7 +1603,7 @@ netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp)
#define NM_FAIL_ON(t) do { \
if (unlikely(t)) { \
RD(5, "%s: fail '" #t "' " \
nm_prlim(5, "%s: fail '" #t "' " \
"h %d c %d t %d " \
"rh %d rc %d rt %d " \
"hc %d ht %d", \
@ -1635,7 +1635,7 @@ nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
u_int cur = ring->cur; /* read only once */
u_int n = kring->nkr_num_slots;
ND(5, "%s kcur %d ktail %d head %d cur %d tail %d",
nm_prdis(5, "%s kcur %d ktail %d head %d cur %d tail %d",
kring->name,
kring->nr_hwcur, kring->nr_hwtail,
ring->head, ring->cur, ring->tail);
@ -1671,7 +1671,7 @@ nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
}
}
if (ring->tail != kring->rtail) {
RD(5, "%s tail overwritten was %d need %d", kring->name,
nm_prlim(5, "%s tail overwritten was %d need %d", kring->name,
ring->tail, kring->rtail);
ring->tail = kring->rtail;
}
@ -1698,7 +1698,7 @@ nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
uint32_t const n = kring->nkr_num_slots;
uint32_t head, cur;
ND(5,"%s kc %d kt %d h %d c %d t %d",
nm_prdis(5,"%s kc %d kt %d h %d c %d t %d",
kring->name,
kring->nr_hwcur, kring->nr_hwtail,
ring->head, ring->cur, ring->tail);
@ -1733,7 +1733,7 @@ nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
}
}
if (ring->tail != kring->rtail) {
RD(5, "%s tail overwritten was %d need %d",
nm_prlim(5, "%s tail overwritten was %d need %d",
kring->name,
ring->tail, kring->rtail);
ring->tail = kring->rtail;
@ -1762,7 +1762,7 @@ netmap_ring_reinit(struct netmap_kring *kring)
int errors = 0;
// XXX KASSERT nm_kr_tryget
RD(10, "called for %s", kring->name);
nm_prlim(10, "called for %s", kring->name);
// XXX probably wrong to trust userspace
kring->rhead = ring->head;
kring->rcur = ring->cur;
@ -1778,17 +1778,17 @@ netmap_ring_reinit(struct netmap_kring *kring)
u_int idx = ring->slot[i].buf_idx;
u_int len = ring->slot[i].len;
if (idx < 2 || idx >= kring->na->na_lut.objtotal) {
RD(5, "bad index at slot %d idx %d len %d ", i, idx, len);
nm_prlim(5, "bad index at slot %d idx %d len %d ", i, idx, len);
ring->slot[i].buf_idx = 0;
ring->slot[i].len = 0;
} else if (len > NETMAP_BUF_SIZE(kring->na)) {
ring->slot[i].len = 0;
RD(5, "bad len at slot %d idx %d len %d", i, idx, len);
nm_prlim(5, "bad len at slot %d idx %d len %d", i, idx, len);
}
}
if (errors) {
RD(10, "total %d errors", errors);
RD(10, "%s reinit, cur %d -> %d tail %d -> %d",
nm_prlim(10, "total %d errors", errors);
nm_prlim(10, "%s reinit, cur %d -> %d tail %d -> %d",
kring->name,
ring->cur, kring->nr_hwcur,
ring->tail, kring->nr_hwtail);
@ -1825,7 +1825,7 @@ netmap_interp_ringid(struct netmap_priv_d *priv, uint32_t nr_mode,
case NR_REG_NULL:
priv->np_qfirst[t] = 0;
priv->np_qlast[t] = nma_get_nrings(na, t);
ND("ALL/PIPE: %s %d %d", nm_txrx2str(t),
nm_prdis("ALL/PIPE: %s %d %d", nm_txrx2str(t),
priv->np_qfirst[t], priv->np_qlast[t]);
break;
case NR_REG_SW:
@ -1837,7 +1837,7 @@ netmap_interp_ringid(struct netmap_priv_d *priv, uint32_t nr_mode,
priv->np_qfirst[t] = (nr_mode == NR_REG_SW ?
nma_get_nrings(na, t) : 0);
priv->np_qlast[t] = netmap_all_rings(na, t);
ND("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW",
nm_prdis("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW",
nm_txrx2str(t),
priv->np_qfirst[t], priv->np_qlast[t]);
break;
@ -1853,7 +1853,7 @@ netmap_interp_ringid(struct netmap_priv_d *priv, uint32_t nr_mode,
j = 0;
priv->np_qfirst[t] = j;
priv->np_qlast[t] = j + 1;
ND("ONE_NIC: %s %d %d", nm_txrx2str(t),
nm_prdis("ONE_NIC: %s %d %d", nm_txrx2str(t),
priv->np_qfirst[t], priv->np_qlast[t]);
break;
default:
@ -1962,7 +1962,7 @@ netmap_krings_get(struct netmap_priv_d *priv)
if ((kring->nr_kflags & NKR_EXCLUSIVE) ||
(kring->users && excl))
{
ND("ring %s busy", kring->name);
nm_prdis("ring %s busy", kring->name);
return EBUSY;
}
}
@ -1997,7 +1997,7 @@ netmap_krings_put(struct netmap_priv_d *priv)
int excl = (priv->np_flags & NR_EXCLUSIVE);
enum txrx t;
ND("%s: releasing tx [%d, %d) rx [%d, %d)",
nm_prdis("%s: releasing tx [%d, %d) rx [%d, %d)",
na->name,
priv->np_qfirst[NR_TX],
priv->np_qlast[NR_TX],
@ -2262,7 +2262,7 @@ netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
error = netmap_mem_get_lut(na->nm_mem, &na->na_lut);
if (error)
goto err_drop_mem;
ND("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal,
nm_prdis("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal,
na->na_lut.objsize);
/* ring configuration may have changed, fetch from the card */
@ -2284,7 +2284,7 @@ netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
/* This netmap adapter is attached to an ifnet. */
unsigned mtu = nm_os_ifnet_mtu(na->ifp);
ND("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d",
nm_prdis("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d",
na->name, mtu, na->rx_buf_maxsize, NETMAP_BUF_SIZE(na));
if (na->rx_buf_maxsize == 0) {
@ -2381,7 +2381,7 @@ nm_sync_finalize(struct netmap_kring *kring)
*/
kring->ring->tail = kring->rtail = kring->nr_hwtail;
ND(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d",
nm_prdis(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d",
kring->name, kring->nr_hwcur, kring->nr_hwtail,
kring->rhead, kring->rcur, kring->rtail);
}
@ -3711,7 +3711,8 @@ netmap_attach_ext(struct netmap_adapter *arg, size_t size, int override_reg)
hwna->up.nm_dtor = netmap_hw_dtor;
}
if_printf(ifp, "netmap queues/slots: TX %d/%d, RX %d/%d\n",
nm_prinf("%s: netmap queues/slots: TX %d/%d, RX %d/%d\n",
hwna->up.name,
hwna->up.num_tx_rings, hwna->up.num_tx_desc,
hwna->up.num_rx_rings, hwna->up.num_rx_desc);
return 0;
@ -3779,7 +3780,7 @@ netmap_hw_krings_create(struct netmap_adapter *na)
for (i = na->num_rx_rings; i < lim; i++) {
mbq_safe_init(&NMR(na, NR_RX)[i]->rx_queue);
}
ND("initialized sw rx queue %d", na->num_rx_rings);
nm_prdis("initialized sw rx queue %d", na->num_rx_rings);
}
return ret;
}
@ -3880,13 +3881,13 @@ netmap_transmit(struct ifnet *ifp, struct mbuf *m)
if (!netmap_generic_hwcsum) {
if (nm_os_mbuf_has_csum_offld(m)) {
RD(1, "%s drop mbuf that needs checksum offload", na->name);
nm_prlim(1, "%s drop mbuf that needs checksum offload", na->name);
goto done;
}
}
if (nm_os_mbuf_has_seg_offld(m)) {
RD(1, "%s drop mbuf that needs generic segmentation offload", na->name);
nm_prlim(1, "%s drop mbuf that needs generic segmentation offload", na->name);
goto done;
}
@ -3906,11 +3907,11 @@ netmap_transmit(struct ifnet *ifp, struct mbuf *m)
if (busy < 0)
busy += kring->nkr_num_slots;
if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) {
RD(2, "%s full hwcur %d hwtail %d qlen %d", na->name,
nm_prlim(2, "%s full hwcur %d hwtail %d qlen %d", na->name,
kring->nr_hwcur, kring->nr_hwtail, mbq_len(q));
} else {
mbq_enqueue(q, m);
ND(2, "%s %d bufs in queue", na->name, mbq_len(q));
nm_prdis(2, "%s %d bufs in queue", na->name, mbq_len(q));
/* notify outside the lock */
m = NULL;
error = 0;
@ -3946,7 +3947,7 @@ netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n,
int new_hwofs, lim;
if (!nm_native_on(na)) {
ND("interface not in native netmap mode");
nm_prdis("interface not in native netmap mode");
return NULL; /* nothing to reinitialize */
}
@ -4088,7 +4089,7 @@ netmap_rx_irq(struct ifnet *ifp, u_int q, u_int *work_done)
return NM_IRQ_PASS;
if (na->na_flags & NAF_SKIP_INTR) {
ND("use regular interrupt");
nm_prdis("use regular interrupt");
return NM_IRQ_PASS;
}
@ -4129,6 +4130,25 @@ nm_clear_native_flags(struct netmap_adapter *na)
na->na_flags &= ~NAF_NETMAP_ON;
}
void
netmap_krings_mode_commit(struct netmap_adapter *na, int onoff)
{
enum txrx t;
for_rx_tx(t) {
int i;
for (i = 0; i < netmap_real_rings(na, t); i++) {
struct netmap_kring *kring = NMR(na, t)[i];
if (onoff && nm_kring_pending_on(kring))
kring->nr_mode = NKR_NETMAP_ON;
else if (!onoff && nm_kring_pending_off(kring))
kring->nr_mode = NKR_NETMAP_OFF;
}
}
}
/*
* Module loader and unloader
*

View File

@ -203,14 +203,14 @@ nm_find_bridge(const char *name, int create, struct netmap_bdg_ops *ops)
} else if (x->bdg_namelen != namelen) {
continue;
} else if (strncmp(name, x->bdg_basename, namelen) == 0) {
ND("found '%.*s' at %d", namelen, name, i);
nm_prdis("found '%.*s' at %d", namelen, name, i);
b = x;
break;
}
}
if (i == num_bridges && b) { /* name not found, can create entry */
/* initialize the bridge */
ND("create new bridge %s with ports %d", b->bdg_basename,
nm_prdis("create new bridge %s with ports %d", b->bdg_basename,
b->bdg_active_ports);
b->ht = nm_os_malloc(sizeof(struct nm_hash_ent) * NM_BDG_HASH);
if (b->ht == NULL) {
@ -239,7 +239,7 @@ netmap_bdg_free(struct nm_bridge *b)
return EBUSY;
}
ND("marking bridge %s as free", b->bdg_basename);
nm_prdis("marking bridge %s as free", b->bdg_basename);
nm_os_free(b->ht);
memset(&b->bdg_ops, 0, sizeof(b->bdg_ops));
memset(&b->bdg_saved_ops, 0, sizeof(b->bdg_saved_ops));
@ -312,13 +312,13 @@ netmap_bdg_detach_common(struct nm_bridge *b, int hw, int sw)
memcpy(b->tmp_bdg_port_index, b->bdg_port_index, sizeof(b->tmp_bdg_port_index));
for (i = 0; (hw >= 0 || sw >= 0) && i < lim; ) {
if (hw >= 0 && tmp[i] == hw) {
ND("detach hw %d at %d", hw, i);
nm_prdis("detach hw %d at %d", hw, i);
lim--; /* point to last active port */
tmp[i] = tmp[lim]; /* swap with i */
tmp[lim] = hw; /* now this is inactive */
hw = -1;
} else if (sw >= 0 && tmp[i] == sw) {
ND("detach sw %d at %d", sw, i);
nm_prdis("detach sw %d at %d", sw, i);
lim--;
tmp[i] = tmp[lim];
tmp[lim] = sw;
@ -342,7 +342,7 @@ netmap_bdg_detach_common(struct nm_bridge *b, int hw, int sw)
b->bdg_active_ports = lim;
BDG_WUNLOCK(b);
ND("now %d active ports", lim);
nm_prdis("now %d active ports", lim);
netmap_bdg_free(b);
}
@ -408,7 +408,7 @@ netmap_get_bdg_na(struct nmreq_header *hdr, struct netmap_adapter **na,
b = nm_find_bridge(nr_name, create, ops);
if (b == NULL) {
ND("no bridges available for '%s'", nr_name);
nm_prdis("no bridges available for '%s'", nr_name);
return (create ? ENOMEM : ENXIO);
}
if (strlen(nr_name) < b->bdg_namelen) /* impossible */
@ -425,10 +425,10 @@ netmap_get_bdg_na(struct nmreq_header *hdr, struct netmap_adapter **na,
for (j = 0; j < b->bdg_active_ports; j++) {
i = b->bdg_port_index[j];
vpna = b->bdg_ports[i];
ND("checking %s", vpna->up.name);
nm_prdis("checking %s", vpna->up.name);
if (!strcmp(vpna->up.name, nr_name)) {
netmap_adapter_get(&vpna->up);
ND("found existing if %s refs %d", nr_name)
nm_prdis("found existing if %s refs %d", nr_name)
*na = &vpna->up;
return 0;
}
@ -445,7 +445,7 @@ netmap_get_bdg_na(struct nmreq_header *hdr, struct netmap_adapter **na,
/* record the next two ports available, but do not allocate yet */
cand = b->bdg_port_index[b->bdg_active_ports];
cand2 = b->bdg_port_index[b->bdg_active_ports + 1];
ND("+++ bridge %s port %s used %d avail %d %d",
nm_prdis("+++ bridge %s port %s used %d avail %d %d",
b->bdg_basename, ifname, b->bdg_active_ports, cand, cand2);
/*
@ -515,7 +515,7 @@ netmap_get_bdg_na(struct nmreq_header *hdr, struct netmap_adapter **na,
BDG_WLOCK(b);
vpna->bdg_port = cand;
ND("NIC %p to bridge port %d", vpna, cand);
nm_prdis("NIC %p to bridge port %d", vpna, cand);
/* bind the port to the bridge (virtual ports are not active) */
b->bdg_ports[cand] = vpna;
vpna->na_bdg = b;
@ -526,9 +526,9 @@ netmap_get_bdg_na(struct nmreq_header *hdr, struct netmap_adapter **na,
hostna->bdg_port = cand2;
hostna->na_bdg = b;
b->bdg_active_ports++;
ND("host %p to bridge port %d", hostna, cand2);
nm_prdis("host %p to bridge port %d", hostna, cand2);
}
ND("if %s refs %d", ifname, vpna->up.na_refcount);
nm_prdis("if %s refs %d", ifname, vpna->up.na_refcount);
BDG_WUNLOCK(b);
*na = &vpna->up;
netmap_adapter_get(*na);
@ -920,8 +920,6 @@ netmap_vp_reg(struct netmap_adapter *na, int onoff)
{
struct netmap_vp_adapter *vpna =
(struct netmap_vp_adapter*)na;
enum txrx t;
int i;
/* persistent ports may be put in netmap mode
* before being attached to a bridge
@ -929,14 +927,7 @@ netmap_vp_reg(struct netmap_adapter *na, int onoff)
if (vpna->na_bdg)
BDG_WLOCK(vpna->na_bdg);
if (onoff) {
for_rx_tx(t) {
for (i = 0; i < netmap_real_rings(na, t); i++) {
struct netmap_kring *kring = NMR(na, t)[i];
if (nm_kring_pending_on(kring))
kring->nr_mode = NKR_NETMAP_ON;
}
}
netmap_krings_mode_commit(na, onoff);
if (na->active_fds == 0)
na->na_flags |= NAF_NETMAP_ON;
/* XXX on FreeBSD, persistent VALE ports should also
@ -945,14 +936,7 @@ netmap_vp_reg(struct netmap_adapter *na, int onoff)
} else {
if (na->active_fds == 0)
na->na_flags &= ~NAF_NETMAP_ON;
for_rx_tx(t) {
for (i = 0; i < netmap_real_rings(na, t); i++) {
struct netmap_kring *kring = NMR(na, t)[i];
if (nm_kring_pending_off(kring))
kring->nr_mode = NKR_NETMAP_OFF;
}
}
netmap_krings_mode_commit(na, onoff);
}
if (vpna->na_bdg)
BDG_WUNLOCK(vpna->na_bdg);
@ -1077,7 +1061,7 @@ netmap_bwrap_dtor(struct netmap_adapter *na)
(bh ? bna->host.bdg_port : -1));
}
ND("na %p", na);
nm_prdis("na %p", na);
na->ifp = NULL;
bna->host.up.ifp = NULL;
hwna->na_vp = bna->saved_na_vp;
@ -1182,7 +1166,7 @@ netmap_bwrap_reg(struct netmap_adapter *na, int onoff)
int error, i;
enum txrx t;
ND("%s %s", na->name, onoff ? "on" : "off");
nm_prdis("%s %s", na->name, onoff ? "on" : "off");
if (onoff) {
/* netmap_do_regif has been called on the bwrap na.
@ -1387,7 +1371,7 @@ netmap_bwrap_krings_delete_common(struct netmap_adapter *na)
enum txrx t;
int i;
ND("%s", na->name);
nm_prdis("%s", na->name);
/* decrement the usage counter for all the hwna krings */
for_rx_tx(t) {
@ -1414,7 +1398,7 @@ netmap_bwrap_notify(struct netmap_kring *kring, int flags)
struct netmap_kring *hw_kring;
int error;
ND("%s: na %s hwna %s",
nm_prdis("%s: na %s hwna %s",
(kring ? kring->name : "NULL!"),
(na ? na->name : "NULL!"),
(hwna ? hwna->name : "NULL!"));
@ -1426,7 +1410,7 @@ netmap_bwrap_notify(struct netmap_kring *kring, int flags)
/* first step: simulate a user wakeup on the rx ring */
netmap_vp_rxsync(kring, flags);
ND("%s[%d] PRE rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)",
nm_prdis("%s[%d] PRE rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)",
na->name, ring_n,
kring->nr_hwcur, kring->nr_hwtail, kring->nkr_hwlease,
kring->rhead, kring->rcur, kring->rtail,
@ -1445,7 +1429,7 @@ netmap_bwrap_notify(struct netmap_kring *kring, int flags)
/* fourth step: the user goes to sleep again, causing another rxsync */
netmap_vp_rxsync(kring, flags);
ND("%s[%d] PST rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)",
nm_prdis("%s[%d] PST rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)",
na->name, ring_n,
kring->nr_hwcur, kring->nr_hwtail, kring->nkr_hwlease,
kring->rhead, kring->rcur, kring->rtail,
@ -1595,7 +1579,7 @@ netmap_bwrap_attach_common(struct netmap_adapter *na,
if (hwna->na_flags & NAF_MOREFRAG)
na->na_flags |= NAF_MOREFRAG;
ND("%s<->%s txr %d txd %d rxr %d rxd %d",
nm_prdis("%s<->%s txr %d txd %d rxr %d rxd %d",
na->name, ifp->if_xname,
na->num_tx_rings, na->num_tx_desc,
na->num_rx_rings, na->num_rx_desc);

View File

@ -1350,8 +1350,6 @@ nm_os_kctx_destroy(struct nm_kctx *nmk)
void
nm_os_selwakeup(struct nm_selinfo *si)
{
if (netmap_verbose)
nm_prinf("on knote %p", &si->si.si_note);
selwakeuppri(&si->si, PI_NET);
taskqueue_enqueue(si->ntfytq, &si->ntfytask);
}

View File

@ -237,18 +237,7 @@ generic_netmap_unregister(struct netmap_adapter *na)
nm_os_catch_tx(gna, 0);
}
for_each_rx_kring_h(r, kring, na) {
if (nm_kring_pending_off(kring)) {
nm_prinf("Emulated adapter: ring '%s' deactivated", kring->name);
kring->nr_mode = NKR_NETMAP_OFF;
}
}
for_each_tx_kring_h(r, kring, na) {
if (nm_kring_pending_off(kring)) {
kring->nr_mode = NKR_NETMAP_OFF;
nm_prinf("Emulated adapter: ring '%s' deactivated", kring->name);
}
}
netmap_krings_mode_commit(na, /*onoff=*/0);
for_each_rx_kring(r, kring, na) {
/* Free the mbufs still pending in the RX queues,
@ -371,19 +360,7 @@ generic_netmap_register(struct netmap_adapter *na, int enable)
}
}
for_each_rx_kring_h(r, kring, na) {
if (nm_kring_pending_on(kring)) {
nm_prinf("Emulated adapter: ring '%s' activated", kring->name);
kring->nr_mode = NKR_NETMAP_ON;
}
}
for_each_tx_kring_h(r, kring, na) {
if (nm_kring_pending_on(kring)) {
nm_prinf("Emulated adapter: ring '%s' activated", kring->name);
kring->nr_mode = NKR_NETMAP_ON;
}
}
netmap_krings_mode_commit(na, /*onoff=*/1);
for_each_tx_kring(r, kring, na) {
/* Initialize tx_pool and tx_event. */

View File

@ -271,7 +271,7 @@ typedef struct hrtimer{
__LINE__, __FUNCTION__, ##__VA_ARGS__); \
} while (0)
/* Disabled printf (used to be ND). */
/* Disabled printf (used to be nm_prdis). */
#define nm_prdis(format, ...)
/* Rate limited, lps indicates how many per second. */
@ -286,11 +286,6 @@ typedef struct hrtimer{
nm_prinf(format, ##__VA_ARGS__); \
} while (0)
/* Old macros. */
#define ND nm_prdis
#define D nm_prerr
#define RD nm_prlim
struct netmap_adapter;
struct nm_bdg_fwd;
struct nm_bridge;
@ -1149,7 +1144,7 @@ nm_kr_rxspace(struct netmap_kring *k)
int space = k->nr_hwtail - k->nr_hwcur;
if (space < 0)
space += k->nkr_num_slots;
ND("preserving %d rx slots %d -> %d", space, k->nr_hwcur, k->nr_hwtail);
nm_prdis("preserving %d rx slots %d -> %d", space, k->nr_hwcur, k->nr_hwtail);
return space;
}
@ -1375,6 +1370,8 @@ nm_update_hostrings_mode(struct netmap_adapter *na)
void nm_set_native_flags(struct netmap_adapter *);
void nm_clear_native_flags(struct netmap_adapter *);
void netmap_krings_mode_commit(struct netmap_adapter *na, int onoff);
/*
* nm_*sync_prologue() functions are used in ioctl/poll and ptnetmap
* kthreads.
@ -1402,7 +1399,7 @@ uint32_t nm_rxsync_prologue(struct netmap_kring *, struct netmap_ring *);
#if 1 /* debug version */
#define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \
if (_a == NETMAP_BUF_BASE(_na) || _l > NETMAP_BUF_SIZE(_na)) { \
RD(5, "bad addr/len ring %d slot %d idx %d len %d", \
nm_prlim(5, "bad addr/len ring %d slot %d idx %d len %d", \
kring->ring_id, nm_i, slot->buf_idx, len); \
if (_l > NETMAP_BUF_SIZE(_na)) \
_l = NETMAP_BUF_SIZE(_na); \
@ -1564,7 +1561,7 @@ void __netmap_adapter_get(struct netmap_adapter *na);
#define netmap_adapter_get(na) \
do { \
struct netmap_adapter *__na = na; \
D("getting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \
nm_prinf("getting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \
__netmap_adapter_get(__na); \
} while (0)
@ -1573,7 +1570,7 @@ int __netmap_adapter_put(struct netmap_adapter *na);
#define netmap_adapter_put(na) \
({ \
struct netmap_adapter *__na = na; \
D("putting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \
nm_prinf("putting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \
__netmap_adapter_put(__na); \
})
@ -1735,7 +1732,7 @@ int nm_iommu_group_id(bus_dma_tag_t dev);
addr, NETMAP_BUF_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
D("dma mapping error");
nm_prerr("dma mapping error");
/* goto dma_error; See e1000_put_txbuf() */
/* XXX reset */
}
@ -1994,6 +1991,12 @@ nm_si_user(struct netmap_priv_d *priv, enum txrx t)
#ifdef WITH_PIPES
int netmap_pipe_txsync(struct netmap_kring *txkring, int flags);
int netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags);
int netmap_pipe_krings_create_both(struct netmap_adapter *na,
struct netmap_adapter *ona);
void netmap_pipe_krings_delete_both(struct netmap_adapter *na,
struct netmap_adapter *ona);
int netmap_pipe_reg_both(struct netmap_adapter *na,
struct netmap_adapter *ona);
#endif /* WITH_PIPES */
#ifdef WITH_MONITOR
@ -2328,7 +2331,7 @@ nm_os_get_mbuf(struct ifnet *ifp, int len)
m->m_ext.ext_arg1 = m->m_ext.ext_buf; // XXX save
m->m_ext.ext_free = (void *)void_mbuf_dtor;
m->m_ext.ext_type = EXT_EXTREF;
ND(5, "create m %p refcnt %d", m, MBUF_REFCNT(m));
nm_prdis(5, "create m %p refcnt %d", m, MBUF_REFCNT(m));
}
return m;
}

View File

@ -365,7 +365,14 @@ netmap_ioctl_legacy(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
/* Request for the legacy control API. Convert it to a
* NIOCCTRL request. */
struct nmreq *nmr = (struct nmreq *) data;
struct nmreq_header *hdr = nmreq_from_legacy(nmr, cmd);
struct nmreq_header *hdr;
if (nmr->nr_version < 11) {
nm_prerr("Minimum supported API is 11 (requested %u)",
nmr->nr_version);
return EINVAL;
}
hdr = nmreq_from_legacy(nmr, cmd);
if (hdr == NULL) { /* out of memory */
return ENOMEM;
}
@ -390,14 +397,14 @@ netmap_ioctl_legacy(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
#ifdef __FreeBSD__
case FIONBIO:
case FIOASYNC:
ND("FIONBIO/FIOASYNC are no-ops");
/* FIONBIO/FIOASYNC are no-ops. */
break;
case BIOCIMMEDIATE:
case BIOCGHDRCMPLT:
case BIOCSHDRCMPLT:
case BIOCSSEESENT:
D("ignore BIOCIMMEDIATE/BIOCSHDRCMPLT/BIOCSHDRCMPLT/BIOCSSEESENT");
/* Ignore these commands. */
break;
default: /* allow device-specific ioctls */

View File

@ -979,7 +979,7 @@ netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
continue;
ofs = ofs + relofs;
ND("%s: return offset %d (cluster %d) for pointer %p",
nm_prdis("%s: return offset %d (cluster %d) for pointer %p",
p->name, ofs, i, vaddr);
return ofs;
}
@ -1043,7 +1043,7 @@ netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_
if (index)
*index = i * 32 + j;
}
ND("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr);
nm_prdis("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr);
if (start)
*start = i;
@ -1143,7 +1143,7 @@ netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n)
*head = cur; /* restore */
break;
}
ND(5, "allocate buffer %d -> %d", *head, cur);
nm_prdis(5, "allocate buffer %d -> %d", *head, cur);
*p = cur; /* link to previous head */
}
@ -1160,7 +1160,7 @@ netmap_extra_free(struct netmap_adapter *na, uint32_t head)
struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
uint32_t i, cur, *buf;
ND("freeing the extra list");
nm_prdis("freeing the extra list");
for (i = 0; head >=2 && head < p->objtotal; i++) {
cur = head;
buf = lut[head].vaddr;
@ -1197,7 +1197,7 @@ netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
slot[i].ptr = 0;
}
ND("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos);
nm_prdis("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos);
return (0);
cleanup:
@ -1245,7 +1245,7 @@ netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
if (slot[i].buf_idx > 1)
netmap_free_buf(nmd, slot[i].buf_idx);
}
ND("%s: released some buffers, available: %u",
nm_prdis("%s: released some buffers, available: %u",
p->name, p->objfree);
}
@ -1539,7 +1539,7 @@ netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na)
(void)lut;
nm_prerr("unsupported on Windows");
#else /* linux */
ND("unmapping and freeing plut for %s", na->name);
nm_prdis("unmapping and freeing plut for %s", na->name);
if (lut->plut == NULL)
return 0;
for (i = 0; i < lim; i += p->_clustentries) {
@ -1577,11 +1577,11 @@ netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na)
#else /* linux */
if (lut->plut != NULL) {
ND("plut already allocated for %s", na->name);
nm_prdis("plut already allocated for %s", na->name);
return 0;
}
ND("allocating physical lut for %s", na->name);
nm_prdis("allocating physical lut for %s", na->name);
lut->plut = nm_alloc_plut(lim);
if (lut->plut == NULL) {
nm_prerr("Failed to allocate physical lut for %s", na->name);
@ -1775,7 +1775,7 @@ netmap_mem2_config(struct netmap_mem_d *nmd)
if (!netmap_mem_params_changed(nmd->params))
goto out;
ND("reconfiguring");
nm_prdis("reconfiguring");
if (nmd->flags & NETMAP_MEM_FINALIZED) {
/* reset previous allocation */
@ -1870,10 +1870,10 @@ netmap_free_rings(struct netmap_adapter *na)
if (netmap_debug & NM_DEBUG_MEM)
nm_prinf("deleting ring %s", kring->name);
if (!(kring->nr_kflags & NKR_FAKERING)) {
ND("freeing bufs for %s", kring->name);
nm_prdis("freeing bufs for %s", kring->name);
netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots);
} else {
ND("NOT freeing bufs for %s", kring->name);
nm_prdis("NOT freeing bufs for %s", kring->name);
}
netmap_ring_free(na->nm_mem, ring);
kring->ring = NULL;
@ -1918,7 +1918,7 @@ netmap_mem2_rings_create(struct netmap_adapter *na)
nm_prerr("Cannot allocate %s_ring", nm_txrx2str(t));
goto cleanup;
}
ND("txring at %p", ring);
nm_prdis("txring at %p", ring);
kring->ring = ring;
*(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
*(int64_t *)(uintptr_t)&ring->buf_ofs =
@ -1932,9 +1932,9 @@ netmap_mem2_rings_create(struct netmap_adapter *na)
ring->tail = kring->rtail;
*(uint32_t *)(uintptr_t)&ring->nr_buf_size =
netmap_mem_bufsize(na->nm_mem);
ND("%s h %d c %d t %d", kring->name,
nm_prdis("%s h %d c %d t %d", kring->name,
ring->head, ring->cur, ring->tail);
ND("initializing slots for %s_ring", nm_txrx2str(t));
nm_prdis("initializing slots for %s_ring", nm_txrx2str(t));
if (!(kring->nr_kflags & NKR_FAKERING)) {
/* this is a real ring */
if (netmap_debug & NM_DEBUG_MEM)
@ -2306,19 +2306,19 @@ netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror)
#if !defined(linux) && !defined(_WIN32)
p->lut[j].paddr = vtophys(p->lut[j].vaddr);
#endif
ND("%s %d at %p", p->name, j, p->lut[j].vaddr);
nm_prdis("%s %d at %p", p->name, j, p->lut[j].vaddr);
noff = off + p->_objsize;
if (noff < PAGE_SIZE) {
off = noff;
continue;
}
ND("too big, recomputing offset...");
nm_prdis("too big, recomputing offset...");
while (noff >= PAGE_SIZE) {
char *old_clust = clust;
noff -= PAGE_SIZE;
clust = nm_os_extmem_nextpage(nme->os);
nr_pages--;
ND("noff %zu page %p nr_pages %d", noff,
nm_prdis("noff %zu page %p nr_pages %d", noff,
page_to_virt(*pages), nr_pages);
if (noff > 0 && !nm_isset(p->invalid_bitmap, j) &&
(nr_pages == 0 ||
@ -2328,7 +2328,7 @@ netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror)
* drop this object
* */
p->invalid_bitmap[ (j>>5) ] |= 1U << (j & 31U);
ND("non contiguous at off %zu, drop", noff);
nm_prdis("non contiguous at off %zu, drop", noff);
}
if (nr_pages == 0)
break;
@ -2338,7 +2338,7 @@ netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror)
p->objtotal = j;
p->numclusters = p->objtotal;
p->memtotal = j * p->_objsize;
ND("%d memtotal %u", j, p->memtotal);
nm_prdis("%d memtotal %u", j, p->memtotal);
}
netmap_mem_ext_register(nme);
@ -2442,7 +2442,7 @@ netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, struct ifnet *ifp)
} else {
ptnmd->pt_ifs = curr->next;
}
D("removed (ifp=%p,nifp_offset=%u)",
nm_prinf("removed (ifp=%p,nifp_offset=%u)",
curr->ifp, curr->nifp_offset);
nm_os_free(curr);
ret = 0;
@ -2498,7 +2498,7 @@ netmap_mem_pt_guest_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
vm_paddr_t paddr;
/* if the offset is valid, just return csb->base_addr + off */
paddr = (vm_paddr_t)(ptnmd->nm_paddr + off);
ND("off %lx padr %lx", off, (unsigned long)paddr);
nm_prdis("off %lx padr %lx", off, (unsigned long)paddr);
return paddr;
}
@ -2528,7 +2528,7 @@ netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd)
goto out;
if (ptnmd->ptn_dev == NULL) {
D("ptnetmap memdev not attached");
nm_prerr("ptnetmap memdev not attached");
error = ENOMEM;
goto out;
}
@ -2547,10 +2547,10 @@ netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd)
/* allocate the lut */
if (ptnmd->buf_lut.lut == NULL) {
D("allocating lut");
nm_prinf("allocating lut");
ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers);
if (ptnmd->buf_lut.lut == NULL) {
D("lut allocation failed");
nm_prerr("lut allocation failed");
return ENOMEM;
}
}
@ -2615,11 +2615,11 @@ netmap_mem_pt_guest_delete(struct netmap_mem_d *nmd)
if (nmd == NULL)
return;
if (netmap_verbose)
D("deleting %p", nmd);
nm_prinf("deleting %p", nmd);
if (nmd->active > 0)
D("bug: deleting mem allocator with active=%d!", nmd->active);
nm_prerr("bug: deleting mem allocator with active=%d!", nmd->active);
if (netmap_verbose)
D("done deleting %p", nmd);
nm_prinf("done deleting %p", nmd);
NMA_LOCK_DESTROY(nmd);
nm_os_free(nmd);
}
@ -2633,7 +2633,7 @@ netmap_mem_pt_guest_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv
ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp);
if (ptif == NULL) {
D("Error: interface %p is not in passthrough", na->ifp);
nm_prerr("interface %s is not in passthrough", na->name);
goto out;
}
@ -2650,7 +2650,7 @@ netmap_mem_pt_guest_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp);
if (ptif == NULL) {
D("Error: interface %p is not in passthrough", na->ifp);
nm_prerr("interface %s is not in passthrough", na->name);
}
}
@ -2664,7 +2664,7 @@ netmap_mem_pt_guest_rings_create(struct netmap_adapter *na)
ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp);
if (ptif == NULL) {
D("Error: interface %p is not in passthrough", na->ifp);
nm_prerr("interface %s is not in passthrough", na->name);
goto out;
}

View File

@ -139,7 +139,7 @@ nm_is_zmon(struct netmap_adapter *na)
static int
netmap_monitor_txsync(struct netmap_kring *kring, int flags)
{
RD(1, "%s %x", kring->name, flags);
nm_prlim(1, "%s %x", kring->name, flags);
return EIO;
}
@ -158,7 +158,7 @@ netmap_monitor_rxsync(struct netmap_kring *kring, int flags)
/* parent left netmap mode */
return EIO;
}
ND("%s %x", kring->name, flags);
nm_prdis("%s %x", kring->name, flags);
kring->nr_hwcur = kring->rhead;
mb();
return 0;
@ -230,8 +230,8 @@ nm_monitor_dealloc(struct netmap_kring *kring)
{
if (kring->monitors) {
if (kring->n_monitors > 0) {
D("freeing not empty monitor array for %s (%d dangling monitors)!", kring->name,
kring->n_monitors);
nm_prerr("freeing not empty monitor array for %s (%d dangling monitors)!",
kring->name, kring->n_monitors);
}
nm_os_free(kring->monitors);
kring->monitors = NULL;
@ -270,7 +270,7 @@ nm_monitor_dummycb(struct netmap_kring *kring, int flags)
static void
nm_monitor_intercept_callbacks(struct netmap_kring *kring)
{
ND("intercept callbacks on %s", kring->name);
nm_prdis("intercept callbacks on %s", kring->name);
kring->mon_sync = kring->nm_sync != NULL ?
kring->nm_sync : nm_monitor_dummycb;
kring->mon_notify = kring->nm_notify;
@ -286,7 +286,7 @@ nm_monitor_intercept_callbacks(struct netmap_kring *kring)
static void
nm_monitor_restore_callbacks(struct netmap_kring *kring)
{
ND("restoring callbacks on %s", kring->name);
nm_prdis("restoring callbacks on %s", kring->name);
kring->nm_sync = kring->mon_sync;
kring->mon_sync = NULL;
if (kring->tx == NR_RX) {
@ -333,7 +333,7 @@ netmap_monitor_add(struct netmap_kring *mkring, struct netmap_kring *kring, int
if (nm_monitor_none(ikring)) {
/* this is the first monitor, intercept the callbacks */
ND("%s: intercept callbacks on %s", mkring->name, ikring->name);
nm_prdis("%s: intercept callbacks on %s", mkring->name, ikring->name);
nm_monitor_intercept_callbacks(ikring);
}
@ -513,11 +513,11 @@ netmap_monitor_reg_common(struct netmap_adapter *na, int onoff, int zmon)
int i;
enum txrx t, s;
ND("%p: onoff %d", na, onoff);
nm_prdis("%p: onoff %d", na, onoff);
if (onoff) {
if (pna == NULL) {
/* parent left netmap mode, fatal */
D("%s: internal error", na->name);
nm_prerr("%s: parent left netmap mode", na->name);
return ENXIO;
}
for_rx_tx(t) {
@ -592,7 +592,7 @@ netmap_zmon_parent_sync(struct netmap_kring *kring, int flags, enum txrx tx)
mlim; // = mkring->nkr_num_slots - 1;
if (mkring == NULL) {
RD(5, "NULL monitor on %s", kring->name);
nm_prlim(5, "NULL monitor on %s", kring->name);
return 0;
}
mring = mkring->ring;
@ -653,7 +653,7 @@ netmap_zmon_parent_sync(struct netmap_kring *kring, int flags, enum txrx tx)
tmp = ms->buf_idx;
ms->buf_idx = s->buf_idx;
s->buf_idx = tmp;
ND(5, "beg %d buf_idx %d", beg, tmp);
nm_prdis(5, "beg %d buf_idx %d", beg, tmp);
tmp = ms->len;
ms->len = s->len;
@ -770,7 +770,7 @@ netmap_monitor_parent_sync(struct netmap_kring *kring, u_int first_new, int new_
*dst = NMB(mkring->na, ms);
if (unlikely(copy_len > max_len)) {
RD(5, "%s->%s: truncating %d to %d", kring->name,
nm_prlim(5, "%s->%s: truncating %d to %d", kring->name,
mkring->name, copy_len, max_len);
copy_len = max_len;
}
@ -849,7 +849,7 @@ static int
netmap_monitor_parent_notify(struct netmap_kring *kring, int flags)
{
int (*notify)(struct netmap_kring*, int);
ND(5, "%s %x", kring->name, flags);
nm_prdis(5, "%s %x", kring->name, flags);
/* ?xsync callbacks have tryget called by their callers
* (NIOCREGIF and poll()), but here we have to call it
* by ourself
@ -909,12 +909,12 @@ netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na,
req->nr_flags |= (NR_MONITOR_TX | NR_MONITOR_RX);
}
if ((req->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX)) == 0) {
ND("not a monitor");
nm_prdis("not a monitor");
return 0;
}
/* this is a request for a monitor adapter */
ND("flags %lx", req->nr_flags);
nm_prdis("flags %lx", req->nr_flags);
/* First, try to find the adapter that we want to monitor.
* We use the same req, after we have turned off the monitor flags.
@ -927,24 +927,23 @@ netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na,
error = netmap_get_na(hdr, &pna, &ifp, nmd, create);
hdr->nr_body = (uintptr_t)req;
if (error) {
D("parent lookup failed: %d", error);
nm_prerr("parent lookup failed: %d", error);
return error;
}
ND("found parent: %s", pna->name);
nm_prdis("found parent: %s", pna->name);
if (!nm_netmap_on(pna)) {
/* parent not in netmap mode */
/* XXX we can wait for the parent to enter netmap mode,
* by intercepting its nm_register callback (2014-03-16)
*/
D("%s not in netmap mode", pna->name);
nm_prerr("%s not in netmap mode", pna->name);
error = EINVAL;
goto put_out;
}
mna = nm_os_malloc(sizeof(*mna));
if (mna == NULL) {
D("memory error");
error = ENOMEM;
goto put_out;
}
@ -954,7 +953,7 @@ netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na,
error = netmap_interp_ringid(&mna->priv, req->nr_mode, req->nr_ringid,
req->nr_flags);
if (error) {
D("ringid error");
nm_prerr("ringid error");
goto free_out;
}
snprintf(mna->up.name, sizeof(mna->up.name), "%s/%s%s%s#%lu", pna->name,
@ -1013,7 +1012,7 @@ netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na,
error = netmap_attach_common(&mna->up);
if (error) {
D("attach_common error");
nm_prerr("netmap_attach_common failed");
goto mem_put_out;
}
@ -1024,7 +1023,7 @@ netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na,
netmap_adapter_get(*na);
/* keep the reference to the parent */
ND("monitor ok");
nm_prdis("monitor ok");
/* drop the reference to the ifp, if any */
if (ifp)

View File

@ -74,15 +74,7 @@
#ifdef WITH_NMNULL
static int
netmap_null_txsync(struct netmap_kring *kring, int flags)
{
(void)kring;
(void)flags;
return 0;
}
static int
netmap_null_rxsync(struct netmap_kring *kring, int flags)
netmap_null_sync(struct netmap_kring *kring, int flags)
{
(void)kring;
(void)flags;
@ -95,12 +87,6 @@ netmap_null_krings_create(struct netmap_adapter *na)
return netmap_krings_create(na, 0);
}
static void
netmap_null_krings_delete(struct netmap_adapter *na)
{
netmap_krings_delete(na);
}
static int
netmap_null_reg(struct netmap_adapter *na, int onoff)
{
@ -153,11 +139,11 @@ netmap_get_null_na(struct nmreq_header *hdr, struct netmap_adapter **na,
}
snprintf(nna->up.name, sizeof(nna->up.name), "null:%s", hdr->nr_name);
nna->up.nm_txsync = netmap_null_txsync;
nna->up.nm_rxsync = netmap_null_rxsync;
nna->up.nm_txsync = netmap_null_sync;
nna->up.nm_rxsync = netmap_null_sync;
nna->up.nm_register = netmap_null_reg;
nna->up.nm_krings_create = netmap_null_krings_create;
nna->up.nm_krings_delete = netmap_null_krings_delete;
nna->up.nm_krings_delete = netmap_krings_delete;
nna->up.nm_bdg_attach = netmap_null_bdg_attach;
nna->up.nm_mem = netmap_mem_get(nmd);

View File

@ -82,16 +82,16 @@ gso_fix_segment(uint8_t *pkt, size_t len, u_int ipv4, u_int iphlen, u_int tcp,
if (ipv4) {
/* Set the IPv4 "Total Length" field. */
iph->tot_len = htobe16(len);
ND("ip total length %u", be16toh(ip->tot_len));
nm_prdis("ip total length %u", be16toh(ip->tot_len));
/* Set the IPv4 "Identification" field. */
iph->id = htobe16(be16toh(iph->id) + idx);
ND("ip identification %u", be16toh(iph->id));
nm_prdis("ip identification %u", be16toh(iph->id));
/* Compute and insert the IPv4 header checksum. */
iph->check = 0;
iph->check = nm_os_csum_ipv4(iph);
ND("IP csum %x", be16toh(iph->check));
nm_prdis("IP csum %x", be16toh(iph->check));
} else {
/* Set the IPv6 "Payload Len" field. */
ip6h->payload_len = htobe16(len-iphlen);
@ -102,13 +102,13 @@ gso_fix_segment(uint8_t *pkt, size_t len, u_int ipv4, u_int iphlen, u_int tcp,
/* Set the TCP sequence number. */
tcph->seq = htobe32(be32toh(tcph->seq) + segmented_bytes);
ND("tcp seq %u", be32toh(tcph->seq));
nm_prdis("tcp seq %u", be32toh(tcph->seq));
/* Zero the PSH and FIN TCP flags if this is not the last
segment. */
if (!last_segment)
tcph->flags &= ~(0x8 | 0x1);
ND("last_segment %u", last_segment);
nm_prdis("last_segment %u", last_segment);
check = &tcph->check;
check_data = (uint8_t *)tcph;
@ -129,7 +129,7 @@ gso_fix_segment(uint8_t *pkt, size_t len, u_int ipv4, u_int iphlen, u_int tcp,
else
nm_os_csum_tcpudp_ipv6(ip6h, check_data, len-iphlen, check);
ND("TCP/UDP csum %x", be16toh(*check));
nm_prdis("TCP/UDP csum %x", be16toh(*check));
}
static inline int
@ -170,7 +170,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
u_int dst_slots = 0;
if (unlikely(ft_p == ft_end)) {
RD(1, "No source slots to process");
nm_prlim(1, "No source slots to process");
return;
}
@ -189,11 +189,11 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
/* Initial sanity check on the source virtio-net header. If
* something seems wrong, just drop the packet. */
if (src_len < na->up.virt_hdr_len) {
RD(1, "Short src vnet header, dropping");
nm_prlim(1, "Short src vnet header, dropping");
return;
}
if (unlikely(vnet_hdr_is_bad(vh))) {
RD(1, "Bad src vnet header, dropping");
nm_prlim(1, "Bad src vnet header, dropping");
return;
}
}
@ -266,7 +266,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
if (dst_slots >= *howmany) {
/* We still have work to do, but we've run out of
* dst slots, so we have to drop the packet. */
ND(1, "Not enough slots, dropping GSO packet");
nm_prdis(1, "Not enough slots, dropping GSO packet");
return;
}
@ -281,7 +281,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
* encapsulation. */
for (;;) {
if (src_len < ethhlen) {
RD(1, "Short GSO fragment [eth], dropping");
nm_prlim(1, "Short GSO fragment [eth], dropping");
return;
}
ethertype = be16toh(*((uint16_t *)
@ -297,7 +297,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
(gso_hdr + ethhlen);
if (src_len < ethhlen + 20) {
RD(1, "Short GSO fragment "
nm_prlim(1, "Short GSO fragment "
"[IPv4], dropping");
return;
}
@ -310,14 +310,14 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
iphlen = 40;
break;
default:
RD(1, "Unsupported ethertype, "
nm_prlim(1, "Unsupported ethertype, "
"dropping GSO packet");
return;
}
ND(3, "type=%04x", ethertype);
nm_prdis(3, "type=%04x", ethertype);
if (src_len < ethhlen + iphlen) {
RD(1, "Short GSO fragment [IP], dropping");
nm_prlim(1, "Short GSO fragment [IP], dropping");
return;
}
@ -329,7 +329,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
(gso_hdr + ethhlen + iphlen);
if (src_len < ethhlen + iphlen + 20) {
RD(1, "Short GSO fragment "
nm_prlim(1, "Short GSO fragment "
"[TCP], dropping");
return;
}
@ -340,11 +340,11 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
}
if (src_len < gso_hdr_len) {
RD(1, "Short GSO fragment [TCP/UDP], dropping");
nm_prlim(1, "Short GSO fragment [TCP/UDP], dropping");
return;
}
ND(3, "gso_hdr_len %u gso_mtu %d", gso_hdr_len,
nm_prdis(3, "gso_hdr_len %u gso_mtu %d", gso_hdr_len,
dst_na->mfs);
/* Advance source pointers. */
@ -386,7 +386,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
gso_idx, segmented_bytes,
src_len == 0 && ft_p + 1 == ft_end);
ND("frame %u completed with %d bytes", gso_idx, (int)gso_bytes);
nm_prdis("frame %u completed with %d bytes", gso_idx, (int)gso_bytes);
dst_slot->len = gso_bytes;
dst_slot->flags = 0;
dst_slots++;
@ -410,7 +410,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
src_len = ft_p->ft_len;
}
}
ND(3, "%d bytes segmented", segmented_bytes);
nm_prdis(3, "%d bytes segmented", segmented_bytes);
} else {
/* Address of a checksum field into a destination slot. */
@ -423,7 +423,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
/* Init 'check' if necessary. */
if (vh && (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) {
if (unlikely(vh->csum_offset + vh->csum_start > src_len))
D("invalid checksum request");
nm_prerr("invalid checksum request");
else
check = (uint16_t *)(dst + vh->csum_start +
vh->csum_offset);
@ -468,7 +468,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
if (check && vh && (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) {
*check = nm_os_csum_fold(csum);
}
ND(3, "using %u dst_slots", dst_slots);
nm_prdis(3, "using %u dst_slots", dst_slots);
/* A second pass on the destination slots to set the slot flags,
* using the right number of destination slots.
@ -485,7 +485,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
/* Update howmany and j. This is to commit the use of
* those slots in the destination ring. */
if (unlikely(dst_slots > *howmany)) {
D("Slot allocation error: This is a bug");
nm_prerr("bug: slot allocation error");
}
*j = j_cur;
*howmany -= dst_slots;

View File

@ -118,8 +118,8 @@ netmap_pipe_dealloc(struct netmap_adapter *na)
{
if (na->na_pipes) {
if (na->na_next_pipe > 0) {
D("freeing not empty pipe array for %s (%d dangling pipes)!", na->name,
na->na_next_pipe);
nm_prerr("freeing not empty pipe array for %s (%d dangling pipes)!",
na->name, na->na_next_pipe);
}
nm_os_free(na->na_pipes);
na->na_pipes = NULL;
@ -190,8 +190,8 @@ netmap_pipe_txsync(struct netmap_kring *txkring, int flags)
int complete; /* did we see a complete packet ? */
struct netmap_ring *txring = txkring->ring, *rxring = rxkring->ring;
ND("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name);
ND(20, "TX before: hwcur %d hwtail %d cur %d head %d tail %d",
nm_prdis("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name);
nm_prdis(20, "TX before: hwcur %d hwtail %d cur %d head %d tail %d",
txkring->nr_hwcur, txkring->nr_hwtail,
txkring->rcur, txkring->rhead, txkring->rtail);
@ -221,7 +221,7 @@ netmap_pipe_txsync(struct netmap_kring *txkring, int flags)
txkring->nr_hwcur = k;
ND(20, "TX after : hwcur %d hwtail %d cur %d head %d tail %d k %d",
nm_prdis(20, "TX after : hwcur %d hwtail %d cur %d head %d tail %d k %d",
txkring->nr_hwcur, txkring->nr_hwtail,
txkring->rcur, txkring->rhead, txkring->rtail, k);
@ -242,8 +242,8 @@ netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags)
int m; /* slots to release */
struct netmap_ring *txring = txkring->ring, *rxring = rxkring->ring;
ND("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name);
ND(20, "RX before: hwcur %d hwtail %d cur %d head %d tail %d",
nm_prdis("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name);
nm_prdis(20, "RX before: hwcur %d hwtail %d cur %d head %d tail %d",
rxkring->nr_hwcur, rxkring->nr_hwtail,
rxkring->rcur, rxkring->rhead, rxkring->rtail);
@ -274,7 +274,7 @@ netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags)
txkring->pipe_tail = nm_prev(k, lim);
rxkring->nr_hwcur = k;
ND(20, "RX after : hwcur %d hwtail %d cur %d head %d tail %d k %d",
nm_prdis(20, "RX after : hwcur %d hwtail %d cur %d head %d tail %d k %d",
rxkring->nr_hwcur, rxkring->nr_hwtail,
rxkring->rcur, rxkring->rhead, rxkring->rtail, k);
@ -312,6 +312,47 @@ netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags)
*/
int netmap_pipe_krings_create_both(struct netmap_adapter *na,
struct netmap_adapter *ona)
{
enum txrx t;
int error;
int i;
/* case 1) below */
nm_prdis("%p: case 1, create both ends", na);
error = netmap_krings_create(na, 0);
if (error)
return error;
/* create the krings of the other end */
error = netmap_krings_create(ona, 0);
if (error)
goto del_krings1;
/* cross link the krings and initialize the pipe_tails */
for_rx_tx(t) {
enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */
for (i = 0; i < nma_get_nrings(na, t); i++) {
struct netmap_kring *k1 = NMR(na, t)[i],
*k2 = NMR(ona, r)[i];
k1->pipe = k2;
k2->pipe = k1;
/* mark all peer-adapter rings as fake */
k2->nr_kflags |= NKR_FAKERING;
/* init tails */
k1->pipe_tail = k1->nr_hwtail;
k2->pipe_tail = k2->nr_hwtail;
}
}
return 0;
del_krings1:
netmap_krings_delete(na);
return error;
}
/* netmap_pipe_krings_create.
*
* There are two cases:
@ -336,46 +377,83 @@ netmap_pipe_krings_create(struct netmap_adapter *na)
struct netmap_pipe_adapter *pna =
(struct netmap_pipe_adapter *)na;
struct netmap_adapter *ona = &pna->peer->up;
int error = 0;
if (pna->peer_ref)
return netmap_pipe_krings_create_both(na, ona);
return 0;
}
int
netmap_pipe_reg_both(struct netmap_adapter *na, struct netmap_adapter *ona)
{
int i, error = 0;
enum txrx t;
if (pna->peer_ref) {
int i;
for_rx_tx(t) {
for (i = 0; i < nma_get_nrings(na, t); i++) {
struct netmap_kring *kring = NMR(na, t)[i];
/* case 1) above */
ND("%p: case 1, create both ends", na);
error = netmap_krings_create(na, 0);
if (error)
goto err;
/* create the krings of the other end */
error = netmap_krings_create(ona, 0);
if (error)
goto del_krings1;
/* cross link the krings and initialize the pipe_tails */
for_rx_tx(t) {
enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */
for (i = 0; i < nma_get_nrings(na, t); i++) {
struct netmap_kring *k1 = NMR(na, t)[i],
*k2 = NMR(ona, r)[i];
k1->pipe = k2;
k2->pipe = k1;
/* mark all peer-adapter rings as fake */
k2->nr_kflags |= NKR_FAKERING;
/* init tails */
k1->pipe_tail = k1->nr_hwtail;
k2->pipe_tail = k2->nr_hwtail;
if (nm_kring_pending_on(kring)) {
/* mark the peer ring as needed */
kring->pipe->nr_kflags |= NKR_NEEDRING;
}
}
}
return 0;
del_krings1:
netmap_krings_delete(na);
err:
return error;
/* create all missing needed rings on the other end.
* Either our end, or the other, has been marked as
* fake, so the allocation will not be done twice.
*/
error = netmap_mem_rings_create(ona);
if (error)
return error;
/* In case of no error we put our rings in netmap mode */
for_rx_tx(t) {
for (i = 0; i < nma_get_nrings(na, t); i++) {
struct netmap_kring *kring = NMR(na, t)[i];
if (nm_kring_pending_on(kring)) {
struct netmap_kring *sring, *dring;
kring->nr_mode = NKR_NETMAP_ON;
if ((kring->nr_kflags & NKR_FAKERING) &&
(kring->pipe->nr_kflags & NKR_FAKERING)) {
/* this is a re-open of a pipe
* end-point kept alive by the other end.
* We need to leave everything as it is
*/
continue;
}
/* copy the buffers from the non-fake ring */
if (kring->nr_kflags & NKR_FAKERING) {
sring = kring->pipe;
dring = kring;
} else {
sring = kring;
dring = kring->pipe;
}
memcpy(dring->ring->slot,
sring->ring->slot,
sizeof(struct netmap_slot) *
sring->nkr_num_slots);
/* mark both rings as fake and needed,
* so that buffers will not be
* deleted by the standard machinery
* (we will delete them by ourselves in
* netmap_pipe_krings_delete)
*/
sring->nr_kflags |=
(NKR_FAKERING | NKR_NEEDRING);
dring->nr_kflags |=
(NKR_FAKERING | NKR_NEEDRING);
kring->nr_mode = NKR_NETMAP_ON;
}
}
}
return 0;
}
/* netmap_pipe_reg.
@ -417,110 +495,105 @@ netmap_pipe_reg(struct netmap_adapter *na, int onoff)
struct netmap_pipe_adapter *pna =
(struct netmap_pipe_adapter *)na;
struct netmap_adapter *ona = &pna->peer->up;
int i, error = 0;
enum txrx t;
int error = 0;
ND("%p: onoff %d", na, onoff);
nm_prdis("%p: onoff %d", na, onoff);
if (onoff) {
for_rx_tx(t) {
for (i = 0; i < nma_get_nrings(na, t); i++) {
struct netmap_kring *kring = NMR(na, t)[i];
if (nm_kring_pending_on(kring)) {
/* mark the peer ring as needed */
kring->pipe->nr_kflags |= NKR_NEEDRING;
}
}
}
/* create all missing needed rings on the other end.
* Either our end, or the other, has been marked as
* fake, so the allocation will not be done twice.
*/
error = netmap_mem_rings_create(ona);
if (error)
error = netmap_pipe_reg_both(na, ona);
if (error) {
return error;
/* In case of no error we put our rings in netmap mode */
for_rx_tx(t) {
for (i = 0; i < nma_get_nrings(na, t); i++) {
struct netmap_kring *kring = NMR(na, t)[i];
if (nm_kring_pending_on(kring)) {
struct netmap_kring *sring, *dring;
kring->nr_mode = NKR_NETMAP_ON;
if ((kring->nr_kflags & NKR_FAKERING) &&
(kring->pipe->nr_kflags & NKR_FAKERING)) {
/* this is a re-open of a pipe
* end-point kept alive by the other end.
* We need to leave everything as it is
*/
continue;
}
/* copy the buffers from the non-fake ring */
if (kring->nr_kflags & NKR_FAKERING) {
sring = kring->pipe;
dring = kring;
} else {
sring = kring;
dring = kring->pipe;
}
memcpy(dring->ring->slot,
sring->ring->slot,
sizeof(struct netmap_slot) *
sring->nkr_num_slots);
/* mark both rings as fake and needed,
* so that buffers will not be
* deleted by the standard machinery
* (we will delete them by ourselves in
* netmap_pipe_krings_delete)
*/
sring->nr_kflags |=
(NKR_FAKERING | NKR_NEEDRING);
dring->nr_kflags |=
(NKR_FAKERING | NKR_NEEDRING);
kring->nr_mode = NKR_NETMAP_ON;
}
}
}
if (na->active_fds == 0)
na->na_flags |= NAF_NETMAP_ON;
} else {
if (na->active_fds == 0)
na->na_flags &= ~NAF_NETMAP_ON;
for_rx_tx(t) {
for (i = 0; i < nma_get_nrings(na, t); i++) {
struct netmap_kring *kring = NMR(na, t)[i];
if (nm_kring_pending_off(kring)) {
kring->nr_mode = NKR_NETMAP_OFF;
}
}
}
netmap_krings_mode_commit(na, onoff);
}
if (na->active_fds) {
ND("active_fds %d", na->active_fds);
nm_prdis("active_fds %d", na->active_fds);
return 0;
}
if (pna->peer_ref) {
ND("%p: case 1.a or 2.a, nothing to do", na);
nm_prdis("%p: case 1.a or 2.a, nothing to do", na);
return 0;
}
if (onoff) {
ND("%p: case 1.b, drop peer", na);
nm_prdis("%p: case 1.b, drop peer", na);
pna->peer->peer_ref = 0;
netmap_adapter_put(na);
} else {
ND("%p: case 2.b, grab peer", na);
nm_prdis("%p: case 2.b, grab peer", na);
netmap_adapter_get(na);
pna->peer->peer_ref = 1;
}
return error;
}
void
netmap_pipe_krings_delete_both(struct netmap_adapter *na,
struct netmap_adapter *ona)
{
struct netmap_adapter *sna;
enum txrx t;
int i;
/* case 1) below */
nm_prdis("%p: case 1, deleting everything", na);
/* To avoid double-frees we zero-out all the buffers in the kernel part
* of each ring. The reason is this: If the user is behaving correctly,
* all buffers are found in exactly one slot in the userspace part of
* some ring. If the user is not behaving correctly, we cannot release
* buffers cleanly anyway. In the latter case, the allocator will
* return to a clean state only when all its users will close.
*/
sna = na;
cleanup:
for_rx_tx(t) {
for (i = 0; i < nma_get_nrings(sna, t); i++) {
struct netmap_kring *kring = NMR(sna, t)[i];
struct netmap_ring *ring = kring->ring;
uint32_t j, lim = kring->nkr_num_slots - 1;
nm_prdis("%s ring %p hwtail %u hwcur %u",
kring->name, ring, kring->nr_hwtail, kring->nr_hwcur);
if (ring == NULL)
continue;
if (kring->tx == NR_RX)
ring->slot[kring->pipe_tail].buf_idx = 0;
for (j = nm_next(kring->pipe_tail, lim);
j != kring->nr_hwcur;
j = nm_next(j, lim))
{
nm_prdis("%s[%d] %u", kring->name, j, ring->slot[j].buf_idx);
ring->slot[j].buf_idx = 0;
}
kring->nr_kflags &= ~(NKR_FAKERING | NKR_NEEDRING);
}
}
if (sna != ona && ona->tx_rings) {
sna = ona;
goto cleanup;
}
netmap_mem_rings_delete(na);
netmap_krings_delete(na); /* also zeroes tx_rings etc. */
if (ona->tx_rings == NULL) {
/* already deleted, we must be on an
* cleanup-after-error path */
return;
}
netmap_mem_rings_delete(ona);
netmap_krings_delete(ona);
}
/* netmap_pipe_krings_delete.
*
* There are two cases:
@ -546,67 +619,14 @@ netmap_pipe_krings_delete(struct netmap_adapter *na)
{
struct netmap_pipe_adapter *pna =
(struct netmap_pipe_adapter *)na;
struct netmap_adapter *sna, *ona; /* na of the other end */
enum txrx t;
int i;
struct netmap_adapter *ona; /* na of the other end */
if (!pna->peer_ref) {
ND("%p: case 2, kept alive by peer", na);
nm_prdis("%p: case 2, kept alive by peer", na);
return;
}
ona = &pna->peer->up;
/* case 1) above */
ND("%p: case 1, deleting everything", na);
/* To avoid double-frees we zero-out all the buffers in the kernel part
* of each ring. The reason is this: If the user is behaving correctly,
* all buffers are found in exactly one slot in the userspace part of
* some ring. If the user is not behaving correctly, we cannot release
* buffers cleanly anyway. In the latter case, the allocator will
* return to a clean state only when all its users will close.
*/
sna = na;
cleanup:
for_rx_tx(t) {
for (i = 0; i < nma_get_nrings(sna, t); i++) {
struct netmap_kring *kring = NMR(sna, t)[i];
struct netmap_ring *ring = kring->ring;
uint32_t j, lim = kring->nkr_num_slots - 1;
ND("%s ring %p hwtail %u hwcur %u",
kring->name, ring, kring->nr_hwtail, kring->nr_hwcur);
if (ring == NULL)
continue;
if (kring->tx == NR_RX)
ring->slot[kring->pipe_tail].buf_idx = 0;
for (j = nm_next(kring->pipe_tail, lim);
j != kring->nr_hwcur;
j = nm_next(j, lim))
{
ND("%s[%d] %u", kring->name, j, ring->slot[j].buf_idx);
ring->slot[j].buf_idx = 0;
}
kring->nr_kflags &= ~(NKR_FAKERING | NKR_NEEDRING);
}
}
if (sna != ona && ona->tx_rings) {
sna = ona;
goto cleanup;
}
netmap_mem_rings_delete(na);
netmap_krings_delete(na); /* also zeroes tx_rings etc. */
if (ona->tx_rings == NULL) {
/* already deleted, we must be on an
* cleanup-after-error path */
return;
}
netmap_mem_rings_delete(ona);
netmap_krings_delete(ona);
netmap_pipe_krings_delete_both(na, ona);
}
@ -615,9 +635,9 @@ netmap_pipe_dtor(struct netmap_adapter *na)
{
struct netmap_pipe_adapter *pna =
(struct netmap_pipe_adapter *)na;
ND("%p %p", na, pna->parent_ifp);
nm_prdis("%p %p", na, pna->parent_ifp);
if (pna->peer_ref) {
ND("%p: clean up peer", na);
nm_prdis("%p: clean up peer", na);
pna->peer_ref = 0;
netmap_adapter_put(&pna->peer->up);
}
@ -651,7 +671,7 @@ netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
if (cbra != NULL) {
role = NM_PIPE_ROLE_SLAVE;
} else {
ND("not a pipe");
nm_prdis("not a pipe");
return 0;
}
}
@ -682,10 +702,10 @@ netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
if (!error)
break;
if (error != ENXIO || retries++) {
ND("parent lookup failed: %d", error);
nm_prdis("parent lookup failed: %d", error);
return error;
}
ND("try to create a persistent vale port");
nm_prdis("try to create a persistent vale port");
/* create a persistent vale port and try again */
*cbra = '\0';
NMG_UNLOCK();
@ -694,14 +714,15 @@ netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
strlcpy(hdr->nr_name, nr_name_orig, sizeof(hdr->nr_name));
if (create_error && create_error != EEXIST) {
if (create_error != EOPNOTSUPP) {
D("failed to create a persistent vale port: %d", create_error);
nm_prerr("failed to create a persistent vale port: %d",
create_error);
}
return error;
}
}
if (NETMAP_OWNED_BY_KERN(pna)) {
ND("parent busy");
nm_prdis("parent busy");
error = EBUSY;
goto put_out;
}
@ -711,10 +732,10 @@ netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
mna = netmap_pipe_find(pna, pipe_id);
if (mna) {
if (mna->role == role) {
ND("found %s directly at %d", pipe_id, mna->parent_slot);
nm_prdis("found %s directly at %d", pipe_id, mna->parent_slot);
reqna = mna;
} else {
ND("found %s indirectly at %d", pipe_id, mna->parent_slot);
nm_prdis("found %s indirectly at %d", pipe_id, mna->parent_slot);
reqna = mna->peer;
}
/* the pipe we have found already holds a ref to the parent,
@ -723,7 +744,7 @@ netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
netmap_unget_na(pna, ifp);
goto found;
}
ND("pipe %s not found, create %d", pipe_id, create);
nm_prdis("pipe %s not found, create %d", pipe_id, create);
if (!create) {
error = ENODEV;
goto put_out;
@ -814,10 +835,10 @@ netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
sna->peer_ref = 1;
netmap_adapter_get(&mna->up);
}
ND("created master %p and slave %p", mna, sna);
nm_prdis("created master %p and slave %p", mna, sna);
found:
ND("pipe %s %s at %p", pipe_id,
nm_prdis("pipe %s %s at %p", pipe_id,
(reqna->role == NM_PIPE_ROLE_MASTER ? "master" : "slave"), reqna);
*na = &reqna->up;
netmap_adapter_get(*na);

View File

@ -445,7 +445,7 @@ netmap_vale_attach(struct nmreq_header *hdr, void *auth_token)
error = na->nm_bdg_ctl(hdr, na);
if (error)
goto unref_exit;
ND("registered %s to netmap-mode", na->name);
nm_prdis("registered %s to netmap-mode", na->name);
}
vpna = (struct netmap_vp_adapter *)na;
req->port_index = vpna->bdg_port;
@ -533,7 +533,7 @@ netmap_vale_vp_dtor(struct netmap_adapter *na)
struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter*)na;
struct nm_bridge *b = vpna->na_bdg;
ND("%s has %d references", na->name, na->na_refcount);
nm_prdis("%s has %d references", na->name, na->na_refcount);
if (b) {
netmap_bdg_detach_common(b, vpna->bdg_port, -1);
@ -542,7 +542,7 @@ netmap_vale_vp_dtor(struct netmap_adapter *na)
if (na->ifp != NULL && !nm_iszombie(na)) {
NM_DETACH_NA(na->ifp);
if (vpna->autodelete) {
ND("releasing %s", na->ifp->if_xname);
nm_prdis("releasing %s", na->ifp->if_xname);
NMG_UNLOCK();
nm_os_vi_detach(na->ifp);
NMG_LOCK();
@ -628,12 +628,12 @@ nm_vale_preflush(struct netmap_kring *kring, u_int end)
* shared lock, waiting if we can sleep (if the source port is
* attached to a user process) or with a trylock otherwise (NICs).
*/
ND("wait rlock for %d packets", ((j > end ? lim+1 : 0) + end) - j);
nm_prdis("wait rlock for %d packets", ((j > end ? lim+1 : 0) + end) - j);
if (na->up.na_flags & NAF_BDG_MAYSLEEP)
BDG_RLOCK(b);
else if (!BDG_RTRYLOCK(b))
return j;
ND(5, "rlock acquired for %d packets", ((j > end ? lim+1 : 0) + end) - j);
nm_prdis(5, "rlock acquired for %d packets", ((j > end ? lim+1 : 0) + end) - j);
ft = kring->nkr_ft;
for (; likely(j != end); j = nm_next(j, lim)) {
@ -644,7 +644,7 @@ nm_vale_preflush(struct netmap_kring *kring, u_int end)
ft[ft_i].ft_flags = slot->flags;
ft[ft_i].ft_offset = 0;
ND("flags is 0x%x", slot->flags);
nm_prdis("flags is 0x%x", slot->flags);
/* we do not use the buf changed flag, but we still need to reset it */
slot->flags &= ~NS_BUF_CHANGED;
@ -667,7 +667,7 @@ nm_vale_preflush(struct netmap_kring *kring, u_int end)
continue;
}
if (unlikely(netmap_verbose && frags > 1))
RD(5, "%d frags at %d", frags, ft_i - frags);
nm_prlim(5, "%d frags at %d", frags, ft_i - frags);
ft[ft_i - frags].ft_frags = frags;
frags = 1;
if (unlikely((int)ft_i >= bridge_batch))
@ -815,8 +815,9 @@ nm_kr_space(struct netmap_kring *k, int is_rx)
k->nr_tail >= k->nkr_num_slots ||
busy < 0 ||
busy >= k->nkr_num_slots) {
D("invalid kring, cur %d tail %d lease %d lease_idx %d lim %d", k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease,
k->nkr_lease_idx, k->nkr_num_slots);
nm_prerr("invalid kring, cur %d tail %d lease %d lease_idx %d lim %d",
k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease,
k->nkr_lease_idx, k->nkr_num_slots);
}
#endif
return space;
@ -893,7 +894,7 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
struct nm_vale_q *d;
struct nm_bdg_fwd *start_ft = NULL;
ND("slot %d frags %d", i, ft[i].ft_frags);
nm_prdis("slot %d frags %d", i, ft[i].ft_frags);
if (na->up.virt_hdr_len < ft[i].ft_len) {
ft[i].ft_offset = na->up.virt_hdr_len;
@ -909,7 +910,7 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
}
dst_port = b->bdg_ops.lookup(start_ft, &dst_ring, na, b->private_data);
if (netmap_verbose > 255)
RD(5, "slot %d port %d -> %d", i, me, dst_port);
nm_prlim(5, "slot %d port %d -> %d", i, me, dst_port);
if (dst_port >= NM_BDG_NOPORT)
continue; /* this packet is identified to be dropped */
else if (dst_port == NM_BDG_BROADCAST)
@ -956,7 +957,7 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
}
}
ND(5, "pass 1 done %d pkts %d dsts", n, num_dsts);
nm_prdis(5, "pass 1 done %d pkts %d dsts", n, num_dsts);
/* second pass: scan destinations */
for (i = 0; i < num_dsts; i++) {
struct netmap_vp_adapter *dst_na;
@ -971,7 +972,7 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
int virt_hdr_mismatch = 0;
d_i = dsts[i];
ND("second pass %d port %d", i, d_i);
nm_prdis("second pass %d port %d", i, d_i);
d = dst_ents + d_i;
// XXX fix the division
dst_na = b->bdg_ports[d_i/NM_BDG_MAXRINGS];
@ -988,7 +989,7 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
* - when na is being deactivated but is still attached.
*/
if (unlikely(!nm_netmap_on(&dst_na->up))) {
ND("not in netmap mode!");
nm_prdis("not in netmap mode!");
goto cleanup;
}
@ -1006,7 +1007,7 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
if (unlikely(dst_na->up.virt_hdr_len != na->up.virt_hdr_len)) {
if (netmap_verbose) {
RD(3, "virt_hdr_mismatch, src %d dst %d", na->up.virt_hdr_len,
nm_prlim(3, "virt_hdr_mismatch, src %d dst %d", na->up.virt_hdr_len,
dst_na->up.virt_hdr_len);
}
/* There is a virtio-net header/offloadings mismatch between
@ -1028,11 +1029,11 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
KASSERT(dst_na->mfs > 0, ("vpna->mfs is 0"));
needed = (needed * na->mfs) /
(dst_na->mfs - WORST_CASE_GSO_HEADER) + 1;
ND(3, "srcmtu=%u, dstmtu=%u, x=%u", na->mfs, dst_na->mfs, needed);
nm_prdis(3, "srcmtu=%u, dstmtu=%u, x=%u", na->mfs, dst_na->mfs, needed);
}
}
ND(5, "pass 2 dst %d is %x %s",
nm_prdis(5, "pass 2 dst %d is %x %s",
i, d_i, is_vp ? "virtual" : "nic/host");
dst_nr = d_i & (NM_BDG_MAXRINGS-1);
nrings = dst_na->up.num_rx_rings;
@ -1098,7 +1099,7 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
if (unlikely(cnt > howmany))
break; /* no more space */
if (netmap_verbose && cnt > 1)
RD(5, "rx %d frags to %d", cnt, j);
nm_prlim(5, "rx %d frags to %d", cnt, j);
ft_end = ft_p + cnt;
if (unlikely(virt_hdr_mismatch)) {
bdg_mismatch_datapath(na, dst_na, ft_p, ring, &j, lim, &howmany);
@ -1111,7 +1112,7 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
slot = &ring->slot[j];
dst = NMB(&dst_na->up, slot);
ND("send [%d] %d(%d) bytes at %s:%d",
nm_prdis("send [%d] %d(%d) bytes at %s:%d",
i, (int)copy_len, (int)dst_len,
NM_IFPNAME(dst_ifp), j);
/* round to a multiple of 64 */
@ -1119,7 +1120,7 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
if (unlikely(copy_len > NETMAP_BUF_SIZE(&dst_na->up) ||
copy_len > NETMAP_BUF_SIZE(&na->up))) {
RD(5, "invalid len %d, down to 64", (int)copy_len);
nm_prlim(5, "invalid len %d, down to 64", (int)copy_len);
copy_len = dst_len = 64; // XXX
}
if (ft_p->ft_flags & NS_INDIRECT) {
@ -1155,10 +1156,10 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
* i can recover the slots, otherwise must
* fill them with 0 to mark empty packets.
*/
ND("leftover %d bufs", howmany);
nm_prdis("leftover %d bufs", howmany);
if (nm_next(lease_idx, lim) == kring->nkr_lease_idx) {
/* yes i am the last one */
ND("roll back nkr_hwlease to %d", j);
nm_prdis("roll back nkr_hwlease to %d", j);
kring->nkr_hwlease = j;
} else {
while (howmany-- > 0) {
@ -1323,7 +1324,7 @@ netmap_vale_vp_create(struct nmreq_header *hdr, struct ifnet *ifp,
na->nm_krings_create = netmap_vale_vp_krings_create;
na->nm_krings_delete = netmap_vale_vp_krings_delete;
na->nm_dtor = netmap_vale_vp_dtor;
ND("nr_mem_id %d", req->nr_mem_id);
nm_prdis("nr_mem_id %d", req->nr_mem_id);
na->nm_mem = nmd ?
netmap_mem_get(nmd):
netmap_mem_private_new(
@ -1594,11 +1595,11 @@ netmap_vi_create(struct nmreq_header *hdr, int autodelete)
if (error) {
goto err_2;
}
ND("returning nr_mem_id %d", req->nr_mem_id);
nm_prdis("returning nr_mem_id %d", req->nr_mem_id);
if (nmd)
netmap_mem_put(nmd);
NMG_UNLOCK();
ND("created %s", ifp->if_xname);
nm_prdis("created %s", ifp->if_xname);
return 0;
err_2:

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2016 Netflix, Inc
* Copyright (c) 2016 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -144,6 +144,7 @@ scteken_init(scr_stat *scp, void **softc, int code)
#ifdef TEKEN_CONS25
teken_set_cons25(&ts->ts_teken);
#endif /* TEKEN_CONS25 */
teken_set_cons25keys(&ts->ts_teken);
scteken_sync_internal(scp, ts);
break;
}

View File

@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2016-2017
* Netflix Inc. All rights reserved.
* Copyright (c) 2016-2017 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2016
* Netflix Inc. All rights reserved.
* Copyright (c) 2016 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -2114,12 +2114,21 @@ vtterm_ioctl(struct terminal *tm, u_long cmd, caddr_t data,
case _IO('K', 8):
cmd = KDMKTONE;
break;
case _IO('K', 10):
cmd = KDSETMODE;
break;
case _IO('K', 13):
cmd = KDSBORDER;
break;
case _IO('K', 63):
cmd = KIOCSOUND;
break;
case _IO('K', 66):
cmd = KDSETLED;
break;
case _IO('c', 104):
cmd = CONS_SETWINORG;
break;
case _IO('c', 110):
cmd = CONS_SETKBD;
break;

View File

@ -72,7 +72,12 @@ counter_64_inc_8b(uint64_t *p, int64_t inc)
}
#ifdef IN_SUBR_COUNTER_C
static inline uint64_t
struct counter_u64_fetch_cx8_arg {
uint64_t res;
uint64_t *p;
};
static uint64_t
counter_u64_read_one_8b(uint64_t *p)
{
uint32_t res_lo, res_high;
@ -87,9 +92,22 @@ counter_u64_read_one_8b(uint64_t *p)
return (res_lo + ((uint64_t)res_high << 32));
}
static void
counter_u64_fetch_cx8_one(void *arg1)
{
struct counter_u64_fetch_cx8_arg *arg;
uint64_t val;
arg = arg1;
val = counter_u64_read_one_8b((uint64_t *)((char *)arg->p +
UMA_PCPU_ALLOC_SIZE * PCPU_GET(cpuid)));
atomic_add_64(&arg->res, val);
}
static inline uint64_t
counter_u64_fetch_inline(uint64_t *p)
{
struct counter_u64_fetch_cx8_arg arg;
uint64_t res;
int i;
@ -108,9 +126,10 @@ counter_u64_fetch_inline(uint64_t *p)
}
critical_exit();
} else {
CPU_FOREACH(i)
res += counter_u64_read_one_8b((uint64_t *)((char *)p +
UMA_PCPU_ALLOC_SIZE * i));
arg.p = p;
arg.res = 0;
smp_rendezvous(NULL, counter_u64_fetch_cx8_one, NULL, &arg);
res = arg.res;
}
return (res);
}

View File

@ -250,6 +250,23 @@ init_static_kenv(char *buf, size_t len)
char *eval;
KASSERT(!dynamic_kenv, ("kenv: dynamic_kenv already initialized"));
/*
* We may be called twice, with the second call needed to relocate
* md_envp after enabling paging. md_envp is then garbage if it is
* not null and the relocation will move it. Discard it so as to
* not crash using its old value in our first call to kern_getenv().
*
* The second call gives the same environment as the first except
* in silly configurations where the static env disables itself.
*
* Other env calls don't handle possibly-garbage pointers, so must
* not be made between enabling paging and calling here.
*/
md_envp = NULL;
md_env_len = 0;
md_env_pos = 0;
/*
* Give the static environment a chance to disable the loader(8)
* environment first. This is done with loader_env.disabled=1.
@ -275,12 +292,16 @@ init_static_kenv(char *buf, size_t len)
md_env_pos = 0;
eval = kern_getenv("static_env.disabled");
if (eval != NULL && strcmp(eval, "1") == 0)
*kern_envp = '\0';
if (eval != NULL && strcmp(eval, "1") == 0) {
kern_envp[0] = '\0';
kern_envp[1] = '\0';
}
}
eval = kern_getenv("static_hints.disabled");
if (eval != NULL && strcmp(eval, "1") == 0)
*static_hints = '\0';
if (eval != NULL && strcmp(eval, "1") == 0) {
static_hints[0] = '\0';
static_hints[1] = '\0';
}
}
static void

View File

@ -3112,8 +3112,8 @@ stop_all_proc(void)
PROC_UNLOCK(p);
continue;
}
_PHOLD(p);
sx_xunlock(&allproc_lock);
_PHOLD(p);
r = thread_single(p, SINGLE_ALLPROC);
if (r != 0)
restart = true;

View File

@ -10,7 +10,7 @@
* Copyright (c) 2014 Roger Pau Monné <roger.pau@citrix.com>
* All Rights Reserved.
* Copyright (c) 2018 Kyle Evans <kevans@FreeBSD.org>
* Copyright (c) 2018 Netflix
* Copyright (c) 2018 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1605,8 +1605,10 @@ m_dispose_extcontrolm(struct mbuf *m)
fd = *fds++;
error = fget(td, fd, &cap_no_rights,
&fp);
if (error == 0)
if (error == 0) {
fdclose(td, fp, fd);
fdrop(fp, td);
}
}
}
clen -= datalen;

View File

@ -633,8 +633,6 @@ extattr_list_vp(struct vnode *vp, int attrnamespace, void *data,
if (nbytes > IOSIZE_MAX)
return (EINVAL);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
auiop = NULL;
sizep = NULL;
cnt = 0;
@ -653,24 +651,25 @@ extattr_list_vp(struct vnode *vp, int attrnamespace, void *data,
} else
sizep = &size;
vn_lock(vp, LK_SHARED | LK_RETRY);
#ifdef MAC
error = mac_vnode_check_listextattr(td->td_ucred, vp, attrnamespace);
if (error)
goto done;
if (error) {
VOP_UNLOCK(vp, 0);
return (error);
}
#endif
error = VOP_LISTEXTATTR(vp, attrnamespace, auiop, sizep,
td->td_ucred, td);
VOP_UNLOCK(vp, 0);
if (auiop != NULL) {
cnt -= auio.uio_resid;
td->td_retval[0] = cnt;
} else
td->td_retval[0] = size;
#ifdef MAC
done:
#endif
VOP_UNLOCK(vp, 0);
return (error);
}

View File

@ -353,8 +353,8 @@ struct iflib_txq {
uint8_t ift_closed;
uint8_t ift_update_freq;
struct iflib_filter_info ift_filter_info;
bus_dma_tag_t ift_desc_tag;
bus_dma_tag_t ift_tso_desc_tag;
bus_dma_tag_t ift_buf_tag;
bus_dma_tag_t ift_tso_buf_tag;
iflib_dma_info_t ift_ifdi;
#define MTX_NAME_LEN 16
char ift_mtx_name[MTX_NAME_LEN];
@ -389,7 +389,7 @@ struct iflib_fl {
iflib_rxsd_array_t ifl_sds;
iflib_rxq_t ifl_rxq;
uint8_t ifl_id;
bus_dma_tag_t ifl_desc_tag;
bus_dma_tag_t ifl_buf_tag;
iflib_dma_info_t ifl_ifdi;
uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH];
@ -922,10 +922,9 @@ iflib_netmap_txsync(struct netmap_kring *kring, int flags)
if_ctx_t ctx = ifp->if_softc;
iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
bus_dmamap_sync(txq->ift_buf_tag, txq->ift_ifdi->idi_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/*
* First part: process new packets to send.
* nm_i is the current index in the netmap kring,
@ -992,7 +991,8 @@ iflib_netmap_txsync(struct netmap_kring *kring, int flags)
if (slot->flags & NS_BUF_CHANGED) {
/* buffer has changed, reload map */
netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr);
netmap_reload_map(na, txq->ift_buf_tag,
txq->ift_sds.ifsd_map[nic_i], addr);
}
/* make sure changes to the buffer are synced */
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i],
@ -1005,7 +1005,7 @@ iflib_netmap_txsync(struct netmap_kring *kring, int flags)
kring->nr_hwcur = nm_i;
/* synchronize the NIC ring */
bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
bus_dmamap_sync(txq->ift_buf_tag, txq->ift_ifdi->idi_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* (re)start the tx unit up to slot nic_i (excluded) */
@ -1072,8 +1072,9 @@ iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) {
if (fl->ifl_sds.ifsd_map == NULL)
continue;
bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_sync(rxq->ifr_fl[i].ifl_buf_tag,
fl->ifl_ifdi->idi_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
}
/*
* First part: import newly received packets.
@ -1199,7 +1200,8 @@ iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
* netmap slot index, si
*/
int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i);
netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si));
netmap_load_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[i],
NMB(na, slot + si));
}
}
@ -1576,12 +1578,13 @@ _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
/*********************************************************************
*
* Allocate memory for tx_buffer structures. The tx_buffer stores all
* the information needed to transmit a packet on the wire. This is
* called only once at attach, setup is done every reset.
* Allocate DMA resources for TX buffers as well as memory for the TX
* mbuf map. TX DMA maps (non-TSO/TSO) and TX mbuf map are kept in a
* iflib_sw_tx_desc_array structure, storing all the information that
* is needed to transmit a packet on the wire. This is called only
* once at attach, setup is done every reset.
*
**********************************************************************/
static int
iflib_txsd_alloc(iflib_txq_t txq)
{
@ -1607,7 +1610,7 @@ iflib_txsd_alloc(iflib_txq_t txq)
}
/*
* Setup DMA descriptor areas.
* Set up DMA tags for TX buffers.
*/
if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
1, 0, /* alignment, bounds */
@ -1620,7 +1623,7 @@ iflib_txsd_alloc(iflib_txq_t txq)
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&txq->ift_desc_tag))) {
&txq->ift_buf_tag))) {
device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err);
device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n",
(uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
@ -1638,38 +1641,42 @@ iflib_txsd_alloc(iflib_txq_t txq)
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&txq->ift_tso_desc_tag))) {
device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err);
&txq->ift_tso_buf_tag))) {
device_printf(dev, "Unable to allocate TSO TX DMA tag: %d\n",
err);
goto fail;
}
/* Allocate memory for the TX mbuf map. */
if (!(txq->ift_sds.ifsd_m =
(struct mbuf **) malloc(sizeof(struct mbuf *) *
scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
device_printf(dev, "Unable to allocate TX mbuf map memory\n");
err = ENOMEM;
goto fail;
}
/* Create the descriptor buffer dma maps */
/*
* Create the DMA maps for TX buffers.
*/
if ((txq->ift_sds.ifsd_map = (bus_dmamap_t *)malloc(
sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
device_printf(dev, "Unable to allocate tx_buffer map memory\n");
device_printf(dev,
"Unable to allocate TX buffer DMA map memory\n");
err = ENOMEM;
goto fail;
}
if (tso && (txq->ift_sds.ifsd_tso_map = (bus_dmamap_t *)malloc(
sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
device_printf(dev, "Unable to allocate TSO tx_buffer "
"map memory\n");
device_printf(dev,
"Unable to allocate TSO TX buffer map memory\n");
err = ENOMEM;
goto fail;
}
for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
err = bus_dmamap_create(txq->ift_desc_tag, 0,
err = bus_dmamap_create(txq->ift_buf_tag, 0,
&txq->ift_sds.ifsd_map[i]);
if (err != 0) {
device_printf(dev, "Unable to create TX DMA map\n");
@ -1677,7 +1684,7 @@ iflib_txsd_alloc(iflib_txq_t txq)
}
if (!tso)
continue;
err = bus_dmamap_create(txq->ift_tso_desc_tag, 0,
err = bus_dmamap_create(txq->ift_tso_buf_tag, 0,
&txq->ift_sds.ifsd_tso_map[i]);
if (err != 0) {
device_printf(dev, "Unable to create TSO TX DMA map\n");
@ -1700,9 +1707,9 @@ iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
if (txq->ift_sds.ifsd_map != NULL)
map = txq->ift_sds.ifsd_map[i];
if (map != NULL) {
bus_dmamap_sync(txq->ift_desc_tag, map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->ift_desc_tag, map);
bus_dmamap_destroy(txq->ift_desc_tag, map);
bus_dmamap_sync(txq->ift_buf_tag, map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->ift_buf_tag, map);
bus_dmamap_destroy(txq->ift_buf_tag, map);
txq->ift_sds.ifsd_map[i] = NULL;
}
@ -1710,10 +1717,10 @@ iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
if (txq->ift_sds.ifsd_tso_map != NULL)
map = txq->ift_sds.ifsd_tso_map[i];
if (map != NULL) {
bus_dmamap_sync(txq->ift_tso_desc_tag, map,
bus_dmamap_sync(txq->ift_tso_buf_tag, map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->ift_tso_desc_tag, map);
bus_dmamap_destroy(txq->ift_tso_desc_tag, map);
bus_dmamap_unload(txq->ift_tso_buf_tag, map);
bus_dmamap_destroy(txq->ift_tso_buf_tag, map);
txq->ift_sds.ifsd_tso_map[i] = NULL;
}
}
@ -1737,13 +1744,13 @@ iflib_txq_destroy(iflib_txq_t txq)
free(txq->ift_sds.ifsd_m, M_IFLIB);
txq->ift_sds.ifsd_m = NULL;
}
if (txq->ift_desc_tag != NULL) {
bus_dma_tag_destroy(txq->ift_desc_tag);
txq->ift_desc_tag = NULL;
if (txq->ift_buf_tag != NULL) {
bus_dma_tag_destroy(txq->ift_buf_tag);
txq->ift_buf_tag = NULL;
}
if (txq->ift_tso_desc_tag != NULL) {
bus_dma_tag_destroy(txq->ift_tso_desc_tag);
txq->ift_tso_desc_tag = NULL;
if (txq->ift_tso_buf_tag != NULL) {
bus_dma_tag_destroy(txq->ift_tso_buf_tag);
txq->ift_tso_buf_tag = NULL;
}
}
@ -1757,14 +1764,14 @@ iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
return;
if (txq->ift_sds.ifsd_map != NULL) {
bus_dmamap_sync(txq->ift_desc_tag,
bus_dmamap_sync(txq->ift_buf_tag,
txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->ift_desc_tag, txq->ift_sds.ifsd_map[i]);
bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i]);
}
if (txq->ift_sds.ifsd_tso_map != NULL) {
bus_dmamap_sync(txq->ift_tso_desc_tag,
bus_dmamap_sync(txq->ift_tso_buf_tag,
txq->ift_sds.ifsd_tso_map[i], BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->ift_tso_desc_tag,
bus_dmamap_unload(txq->ift_tso_buf_tag,
txq->ift_sds.ifsd_tso_map[i]);
}
m_free(*mp);
@ -1803,10 +1810,13 @@ iflib_txq_setup(iflib_txq_t txq)
/*********************************************************************
*
* Allocate memory for rx_buffer structures. Since we use one
* rx_buffer per received packet, the maximum number of rx_buffer's
* that we'll need is equal to the number of receive descriptors
* that we've allocated.
* Allocate DMA resources for RX buffers as well as memory for the RX
* mbuf map, direct RX cluster pointer map and RX cluster bus address
* map. RX DMA map, RX mbuf map, direct RX cluster pointer map and
* RX cluster map are kept in a iflib_sw_rx_desc_array structure.
* Since we use use one entry in iflib_sw_rx_desc_array per received
* packet, the maximum number of entries we'll need is equal to the
* number of hardware receive descriptors that we've allocated.
*
**********************************************************************/
static int
@ -1825,6 +1835,7 @@ iflib_rxsd_alloc(iflib_rxq_t rxq)
fl = rxq->ifr_fl;
for (int i = 0; i < rxq->ifr_nfl; i++, fl++) {
fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
/* Set up DMA tag for RX buffers. */
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
@ -1836,45 +1847,56 @@ iflib_rxsd_alloc(iflib_rxq_t rxq)
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&fl->ifl_desc_tag);
&fl->ifl_buf_tag);
if (err) {
device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
__func__, err);
device_printf(dev,
"Unable to allocate RX DMA tag: %d\n", err);
goto fail;
}
/* Allocate memory for the RX mbuf map. */
if (!(fl->ifl_sds.ifsd_m =
(struct mbuf **) malloc(sizeof(struct mbuf *) *
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
device_printf(dev,
"Unable to allocate RX mbuf map memory\n");
err = ENOMEM;
goto fail;
}
/* Allocate memory for the direct RX cluster pointer map. */
if (!(fl->ifl_sds.ifsd_cl =
(caddr_t *) malloc(sizeof(caddr_t) *
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
device_printf(dev,
"Unable to allocate RX cluster map memory\n");
err = ENOMEM;
goto fail;
}
/* Allocate memory for the RX cluster bus address map. */
if (!(fl->ifl_sds.ifsd_ba =
(bus_addr_t *) malloc(sizeof(bus_addr_t) *
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate rx bus addr memory\n");
device_printf(dev,
"Unable to allocate RX bus address map memory\n");
err = ENOMEM;
goto fail;
}
/* Create the descriptor buffer dma maps */
/*
* Create the DMA maps for RX buffers.
*/
if (!(fl->ifl_sds.ifsd_map =
(bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer map memory\n");
device_printf(dev,
"Unable to allocate RX buffer DMA map memory\n");
err = ENOMEM;
goto fail;
}
for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]);
err = bus_dmamap_create(fl->ifl_buf_tag, 0,
&fl->ifl_sds.ifsd_map[i]);
if (err != 0) {
device_printf(dev, "Unable to create RX buffer DMA map\n");
goto fail;
@ -1974,7 +1996,7 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
cb_arg.error = 0;
MPASS(sd_map != NULL);
err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[frag_idx],
err = bus_dmamap_load(fl->ifl_buf_tag, sd_map[frag_idx],
cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg,
BUS_DMA_NOWAIT);
if (err != 0 || cb_arg.error) {
@ -1986,7 +2008,7 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
break;
}
bus_dmamap_sync(fl->ifl_desc_tag, sd_map[frag_idx],
bus_dmamap_sync(fl->ifl_buf_tag, sd_map[frag_idx],
BUS_DMASYNC_PREREAD);
sd_ba[frag_idx] = bus_addr = cb_arg.seg.ds_addr;
sd_cl[frag_idx] = cl;
@ -2087,14 +2109,14 @@ iflib_fl_bufs_free(iflib_fl_t fl)
if (*sd_cl != NULL) {
sd_map = fl->ifl_sds.ifsd_map[i];
bus_dmamap_sync(fl->ifl_desc_tag, sd_map,
bus_dmamap_sync(fl->ifl_buf_tag, sd_map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(fl->ifl_desc_tag, sd_map);
bus_dmamap_unload(fl->ifl_buf_tag, sd_map);
if (*sd_cl != NULL)
uma_zfree(fl->ifl_zone, *sd_cl);
// XXX: Should this get moved out?
if (iflib_in_detach(fl->ifl_rxq->ifr_ctx))
bus_dmamap_destroy(fl->ifl_desc_tag, sd_map);
bus_dmamap_destroy(fl->ifl_buf_tag, sd_map);
if (*sd_m != NULL) {
m_init(*sd_m, M_NOWAIT, MT_DATA, 0);
uma_zfree(zone_mbuf, *sd_m);
@ -2196,23 +2218,23 @@ iflib_rx_sds_free(iflib_rxq_t rxq)
if (rxq->ifr_fl != NULL) {
for (i = 0; i < rxq->ifr_nfl; i++) {
fl = &rxq->ifr_fl[i];
if (fl->ifl_desc_tag != NULL) {
if (fl->ifl_buf_tag != NULL) {
if (fl->ifl_sds.ifsd_map != NULL) {
for (j = 0; j < fl->ifl_size; j++) {
if (fl->ifl_sds.ifsd_map[j] ==
NULL)
continue;
bus_dmamap_sync(
fl->ifl_desc_tag,
fl->ifl_buf_tag,
fl->ifl_sds.ifsd_map[j],
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(
fl->ifl_desc_tag,
fl->ifl_buf_tag,
fl->ifl_sds.ifsd_map[j]);
}
}
bus_dma_tag_destroy(fl->ifl_desc_tag);
fl->ifl_desc_tag = NULL;
bus_dma_tag_destroy(fl->ifl_buf_tag);
fl->ifl_buf_tag = NULL;
}
free(fl->ifl_sds.ifsd_m, M_IFLIB);
free(fl->ifl_sds.ifsd_cl, M_IFLIB);
@ -2497,9 +2519,9 @@ rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd)
/* not valid assert if bxe really does SGE from non-contiguous elements */
MPASS(fl->ifl_cidx == cidx);
bus_dmamap_sync(fl->ifl_desc_tag, map, BUS_DMASYNC_POSTREAD);
bus_dmamap_sync(fl->ifl_buf_tag, map, BUS_DMASYNC_POSTREAD);
if (unload)
bus_dmamap_unload(fl->ifl_desc_tag, map);
bus_dmamap_unload(fl->ifl_buf_tag, map);
fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1);
if (__predict_false(fl->ifl_cidx == 0))
fl->ifl_gen = 0;
@ -2582,7 +2604,7 @@ iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
m->m_data += 2;
#endif
memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
bus_dmamap_sync(rxq->ifr_fl->ifl_desc_tag,
bus_dmamap_sync(rxq->ifr_fl->ifl_buf_tag,
rxq->ifr_fl->ifl_sds.ifsd_map[ri->iri_frags[0].irf_idx],
BUS_DMASYNC_PREREAD);
m->m_len = ri->iri_frags[0].irf_len;
@ -3083,9 +3105,9 @@ iflib_remove_mbuf(iflib_txq_t txq)
ifsd_m = txq->ift_sds.ifsd_m;
m = ifsd_m[pidx];
ifsd_m[pidx] = NULL;
bus_dmamap_unload(txq->ift_desc_tag, txq->ift_sds.ifsd_map[pidx]);
bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[pidx]);
if (txq->ift_sds.ifsd_tso_map != NULL)
bus_dmamap_unload(txq->ift_tso_desc_tag,
bus_dmamap_unload(txq->ift_tso_buf_tag,
txq->ift_sds.ifsd_tso_map[pidx]);
#if MEMORY_LOGGING
txq->ift_dequeued++;
@ -3162,6 +3184,7 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
if_ctx_t ctx;
if_shared_ctx_t sctx;
if_softc_ctx_t scctx;
bus_dma_tag_t buf_tag;
bus_dma_segment_t *segs;
struct mbuf *m_head, **ifsd_m;
void *next_txd;
@ -3169,7 +3192,6 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
struct if_pkt_info pi;
int remap = 0;
int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd;
bus_dma_tag_t desc_tag;
ctx = txq->ift_ctx;
sctx = ctx->ifc_sctx;
@ -3200,13 +3222,13 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
ifsd_m = txq->ift_sds.ifsd_m;
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
desc_tag = txq->ift_tso_desc_tag;
buf_tag = txq->ift_tso_buf_tag;
max_segs = scctx->isc_tx_tso_segments_max;
map = txq->ift_sds.ifsd_tso_map[pidx];
MPASS(desc_tag != NULL);
MPASS(buf_tag != NULL);
MPASS(max_segs > 0);
} else {
desc_tag = txq->ift_desc_tag;
buf_tag = txq->ift_buf_tag;
max_segs = scctx->isc_tx_nsegments;
map = txq->ift_sds.ifsd_map[pidx];
}
@ -3238,7 +3260,7 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
}
retry:
err = bus_dmamap_load_mbuf_sg(desc_tag, map, m_head, segs, &nsegs,
err = bus_dmamap_load_mbuf_sg(buf_tag, map, m_head, segs, &nsegs,
BUS_DMA_NOWAIT);
defrag:
if (__predict_false(err)) {
@ -3284,7 +3306,7 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
*/
if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
txq->ift_no_desc_avail++;
bus_dmamap_unload(desc_tag, map);
bus_dmamap_unload(buf_tag, map);
DBG_COUNTER_INC(encap_txq_avail_fail);
DBG_COUNTER_INC(encap_txd_encap_fail);
if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
@ -3311,7 +3333,7 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
#ifdef PKT_DEBUG
print_pkt(&pi);
#endif
bus_dmamap_sync(desc_tag, map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(buf_tag, map, BUS_DMASYNC_PREWRITE);
if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@ -3387,16 +3409,16 @@ iflib_tx_desc_free(iflib_txq_t txq, int n)
if ((m = ifsd_m[cidx]) != NULL) {
prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]);
if (m->m_pkthdr.csum_flags & CSUM_TSO) {
bus_dmamap_sync(txq->ift_tso_desc_tag,
bus_dmamap_sync(txq->ift_tso_buf_tag,
txq->ift_sds.ifsd_tso_map[cidx],
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->ift_tso_desc_tag,
bus_dmamap_unload(txq->ift_tso_buf_tag,
txq->ift_sds.ifsd_tso_map[cidx]);
} else {
bus_dmamap_sync(txq->ift_desc_tag,
bus_dmamap_sync(txq->ift_buf_tag,
txq->ift_sds.ifsd_map[cidx],
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->ift_desc_tag,
bus_dmamap_unload(txq->ift_buf_tag,
txq->ift_sds.ifsd_map[cidx]);
}
/* XXX we don't support any drivers that batch packets yet */
@ -5203,15 +5225,18 @@ iflib_queues_alloc(if_ctx_t ctx)
for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
/* Set up some basics */
if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
device_printf(dev, "failed to allocate iflib_dma_info\n");
if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs,
M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
device_printf(dev,
"Unable to allocate TX DMA info memory\n");
err = ENOMEM;
goto err_tx_desc;
}
txq->ift_ifdi = ifdip;
for (j = 0; j < ntxqs; j++, ifdip++) {
if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) {
device_printf(dev, "Unable to allocate Descriptor memory\n");
if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, 0)) {
device_printf(dev,
"Unable to allocate TX descriptors\n");
err = ENOMEM;
goto err_tx_desc;
}
@ -5255,8 +5280,10 @@ iflib_queues_alloc(if_ctx_t ctx)
for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
/* Set up some basics */
if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
device_printf(dev, "failed to allocate iflib_dma_info\n");
if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs,
M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
device_printf(dev,
"Unable to allocate RX DMA info memory\n");
err = ENOMEM;
goto err_tx_desc;
}
@ -5266,8 +5293,9 @@ iflib_queues_alloc(if_ctx_t ctx)
rxq->ifr_ntxqirq = 1;
rxq->ifr_txqid[0] = i;
for (j = 0; j < nrxqs; j++, ifdip++) {
if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, BUS_DMA_NOWAIT)) {
device_printf(dev, "Unable to allocate Descriptor memory\n");
if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, 0)) {
device_printf(dev,
"Unable to allocate RX descriptors\n");
err = ENOMEM;
goto err_tx_desc;
}
@ -5319,7 +5347,8 @@ iflib_queues_alloc(if_ctx_t ctx)
}
}
if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) {
device_printf(ctx->ifc_dev, "device queue allocation failed\n");
device_printf(ctx->ifc_dev,
"Unable to allocate device TX queue\n");
iflib_tx_structures_free(ctx);
free(vaddrs, M_IFLIB);
free(paddrs, M_IFLIB);
@ -5340,7 +5369,8 @@ iflib_queues_alloc(if_ctx_t ctx)
}
}
if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) {
device_printf(ctx->ifc_dev, "device queue allocation failed\n");
device_printf(ctx->ifc_dev,
"Unable to allocate device RX queue\n");
iflib_tx_structures_free(ctx);
free(vaddrs, M_IFLIB);
free(paddrs, M_IFLIB);

View File

@ -4289,10 +4289,12 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
if (net->port) {
mtu -= sizeof(struct udphdr);
}
if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
if (mtu < net->mtu) {
if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
}
net->mtu = mtu;
}
net->mtu = mtu;
}
} else if (ro->ro_rt == NULL) {
/* route was freed */
@ -4647,10 +4649,12 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
if (net->port) {
mtu -= sizeof(struct udphdr);
}
if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
if (mtu < net->mtu) {
if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
}
net->mtu = mtu;
}
net->mtu = mtu;
}
} else if (ifp) {
if (ND_IFINFO(ifp)->linkmtu &&

View File

@ -4654,13 +4654,13 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
}
for (i = 0; i < strrst->srs_number_streams; i++) {
if ((send_in) &&
(strrst->srs_stream_list[i] > stcb->asoc.streamincnt)) {
(strrst->srs_stream_list[i] >= stcb->asoc.streamincnt)) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
error = EINVAL;
break;
}
if ((send_out) &&
(strrst->srs_stream_list[i] > stcb->asoc.streamoutcnt)) {
(strrst->srs_stream_list[i] >= stcb->asoc.streamoutcnt)) {
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
error = EINVAL;
break;

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2016-2018 Netflix Inc.
* Copyright (c) 2016-2018 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2016-2018 Netflix Inc.
* Copyright (c) 2016-2018 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2016-2018
* Netflix Inc. All rights reserved.
* Copyright (c) 2016-2018 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2016-2018
* Netflix Inc. All rights reserved.
* Copyright (c) 2016-2018 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,6 +1,5 @@
/*-
* Copyright (c) 2016-2018
* Netflix Inc. All rights reserved.
* Copyright (c) 2016-2018 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,9 +1,7 @@
#ifndef __pacer_timer_h__
#define __pacer_timer_h__
/*-
* Copyright (c) 2017
* Netflix Inc.
* All rights reserved.
* Copyright (c) 2017 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,7 +1,5 @@
/*-
* Copyright (c) 2017
* Netflix Inc.
* All rights reserved.
* Copyright (c) 2017 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,9 +1,7 @@
#ifndef __sack_filter_h__
#define __sack_filter_h__
/*-
* Copyright (c) 2017
* Netflix Inc.
* All rights reserved.
* Copyright (c) 2017 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,6 +1,5 @@
/*-
* Copyright (c) 2016
* Netflix Inc. All rights reserved.
* Copyright (c) 2016 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -60,6 +60,12 @@ GLOBAL(__endkernel)
GLOBAL(tmpstk)
.space TMPSTKSZ
#ifdef KDB
#define TRAPSTKSZ 4096 /* 4k trap stack */
GLOBAL(trapstk)
.space TRAPSTKSZ
#endif
.text
.globl btext
btext:

View File

@ -65,6 +65,14 @@ GLOBAL(tmpstk)
TOC_ENTRY(tmpstk)
TOC_ENTRY(can_wakeup)
#ifdef KDB
#define TRAPSTKSZ 4096 /* 4k trap stack */
GLOBAL(trapstk)
.space TRAPSTKSZ
TOC_ENTRY(trapstk)
#endif
/*
* Entry point for bootloaders that do not fully implement ELF and start
* at the beginning of the image (kexec, notably). In its own section so

View File

@ -864,8 +864,8 @@ dbtrap:
mtsprg3 %r1
lwz %r1,TRAP_TOCBASE(0) /* get new SP */
lwz %r1,tmpstk@got(%r1)
addi %r1,%r1,TMPSTKSZ-16
lwz %r1,trapstk@got(%r1)
addi %r1,%r1,TRAPSTKSZ-16
FRAME_SETUP(PC_DBSAVE)
/* Call C trap code: */

View File

@ -897,8 +897,8 @@ dbtrap:
mtsprg3 %r1
GET_TOCBASE(%r1) /* get new SP */
ld %r1,TOC_REF(tmpstk)(%r1)
addi %r1,%r1,(TMPSTKSZ-48)
ld %r1,TOC_REF(trapstk)(%r1)
addi %r1,%r1,(TRAPSTKSZ-48)
FRAME_SETUP(PC_DBSAVE)
/* Call C trap code: */

View File

@ -767,11 +767,6 @@ cpu_idle_booke(sbintime_t sbt)
case FSL_E500mc:
case FSL_E5500:
case FSL_E6500:
/*
* Base binutils doesn't know what the 'wait' instruction is, so
* use the opcode encoding here.
*/
__asm __volatile(".long 0x7c00007c");
break;
default:
powerpc_sync();

View File

@ -386,8 +386,6 @@ llan_intr(void *xsc)
/* llan_add_rxbuf does DMA sync and unload as well as requeue */
if (llan_add_rxbuf(sc, rx) != 0) {
if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
phyp_hcall(H_ADD_LOGICAL_LAN_BUFFER, sc->unit,
rx->rx_bufdesc);
continue;
}

View File

@ -1,9 +1,9 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2018 Netflix
* Copyright (c) 2014 Roger Pau Monné <roger.pau@citrix.com>
* All rights reserved.
* Copyright (c) 2018 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,6 +1,5 @@
/*-
* Copyright (c) 2016 Netflix, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2016-2018 Netflix Inc.
* Copyright (c) 2016-2018 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -66,6 +66,8 @@
.Fn teken_set_8bit "teken_t *t"
.Ft void
.Fn teken_set_cons25 "teken_t *t"
.Ft void
.Fn teken_set_cons25keys "teken_t *t"
.Sh DESCRIPTION
The
.Nm
@ -194,11 +196,24 @@ which can be used to support character sets like CP437 and ISO-8859-1.
.Pp
The
.Fn teken_set_cons25
function switches terminal emulation to
function sets the terminal emulation to
.Dv cons25 ,
which is used by versions of
which was the default for
.Xr syscons 4
in versions of
.Fx
prior to 9.0.
This function is only useful for initialization.
The emulation can be changed at any time using an escape sequence,
and this function is not used then.
.Pp
The
.Fn teken_set_cons25keys
function tells the
.Fn teken_get_sequence
function to not interpret special keys in
.Dv cons25
mode.
.Sh SEE ALSO
.Xr ncurses 3 ,
.Xr termcap 3 ,

View File

@ -412,7 +412,14 @@ void
teken_set_cons25(teken_t *t)
{
t->t_stateflags |= TS_CONS25 | TS_CONS25KEYS;
t->t_stateflags |= TS_CONS25;
}
void
teken_set_cons25keys(teken_t *t)
{
t->t_stateflags |= TS_CONS25KEYS;
}
/*

View File

@ -212,6 +212,7 @@ const char *teken_get_sequence(const teken_t *, unsigned int);
/* Legacy features. */
void teken_set_8bit(teken_t *);
void teken_set_cons25(teken_t *);
void teken_set_cons25keys(teken_t *);
/* Color conversion. */
teken_color_t teken_256to16(teken_color_t);

View File

@ -1,8 +1,7 @@
#ifndef __callout_test_h__
#define __callout_test_h__
/*-
* Copyright (c) 2015
* Netflix Incorporated, All rights reserved.
* Copyright (c) 2015 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -1,5 +1,6 @@
/*-
* Copyright (c) 2015 Netflix Inc. All rights reserved.
* Copyright (c) 2015 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:

Some files were not shown because too many files have changed in this diff Show More