This commit is contained in:
Mark Murray 2013-09-01 13:33:05 +00:00
commit b881742f2e
35 changed files with 3515 additions and 1734 deletions

View File

@ -1,3 +1,9 @@
2007-06-05 Joerg Wunsch <j.gnu@uriah.heep.sax.de> (r23479)
PR preprocessor/23479
* doc/extend.texi: Document the 0b-prefixed binary integer
constant extension.
2007-05-01 Dwarakanath Rajagopal <dwarak.rajagopal@amd.com> (r124341)
* doc/invoke.texi: Fix typo, 'AMD Family 10h core' instead of

View File

@ -116,5 +116,6 @@ UNWIND_WRAPPER _Unwind_RaiseException 1
UNWIND_WRAPPER _Unwind_Resume 1
UNWIND_WRAPPER _Unwind_Resume_or_Rethrow 1
UNWIND_WRAPPER _Unwind_ForcedUnwind 3
UNWIND_WRAPPER _Unwind_Backtrace 2
#endif /* __symbian__ */
#endif /* ndef __symbian__ */

View File

@ -747,6 +747,66 @@ _Unwind_DeleteException (_Unwind_Exception * exc)
}
/* Perform stack backtrace through unwind data. */
_Unwind_Reason_Code
__gnu_Unwind_Backtrace(_Unwind_Trace_Fn trace, void * trace_argument,
phase2_vrs * entry_vrs);
_Unwind_Reason_Code
__gnu_Unwind_Backtrace(_Unwind_Trace_Fn trace, void * trace_argument,
phase2_vrs * entry_vrs)
{
phase1_vrs saved_vrs;
_Unwind_Reason_Code code;
_Unwind_Control_Block ucb;
_Unwind_Control_Block *ucbp = &ucb;
/* Set the pc to the call site. */
entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR];
/* Save the core registers. */
saved_vrs.core = entry_vrs->core;
/* Set demand-save flags. */
saved_vrs.demand_save_flags = ~(_uw) 0;
do
{
/* Find the entry for this routine. */
if (get_eit_entry (ucbp, saved_vrs.core.r[R_PC]) != _URC_OK)
{
code = _URC_FAILURE;
break;
}
/* The dwarf unwinder assumes the context structure holds things
like the function and LSDA pointers. The ARM implementation
caches these in the exception header (UCB). To avoid
rewriting everything we make the virtual IP register point at
the UCB. */
_Unwind_SetGR((_Unwind_Context *)&saved_vrs, 12, (_Unwind_Ptr) ucbp);
/* Call trace function. */
if ((*trace) ((_Unwind_Context *) &saved_vrs, trace_argument)
!= _URC_NO_REASON)
{
code = _URC_FAILURE;
break;
}
/* Call the pr to decide what to do. */
code = ((personality_routine) UCB_PR_ADDR (ucbp))
(_US_VIRTUAL_UNWIND_FRAME | _US_FORCE_UNWIND,
ucbp, (void *) &saved_vrs);
}
while (code != _URC_END_OF_STACK
&& code != _URC_FAILURE);
finish:
restore_non_core_regs (&saved_vrs);
return code;
}
/* Common implementation for ARM ABI defined personality routines.
ID is the index of the personality routine, other arguments are as defined
by __aeabi_unwind_cpp_pr{0,1,2}. */
@ -1014,3 +1074,19 @@ _Unwind_GetTextRelBase (_Unwind_Context *context __attribute__ ((unused)))
{
abort ();
}
#ifdef __FreeBSD__
/* FreeBSD expects these to be functions */
_Unwind_Ptr
_Unwind_GetIP (struct _Unwind_Context *context)
{
return _Unwind_GetGR (context, 15) & ~(_Unwind_Word)1;
}
_Unwind_Ptr
_Unwind_GetIPInfo (struct _Unwind_Context *context, int *ip_before_insn)
{
*ip_before_insn = 0;
return _Unwind_GetGR (context, 15) & ~(_Unwind_Word)1;
}
#endif

View File

@ -205,6 +205,13 @@ extern "C" {
_Unwind_Control_Block *, struct _Unwind_Context *, void *);
_Unwind_Reason_Code _Unwind_ForcedUnwind (_Unwind_Control_Block *,
_Unwind_Stop_Fn, void *);
/* @@@ Use unwind data to perform a stack backtrace. The trace callback
is called for every stack frame in the call chain, but no cleanup
actions are performed. */
typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn) (_Unwind_Context *, void *);
_Unwind_Reason_Code _Unwind_Backtrace(_Unwind_Trace_Fn,
void*);
_Unwind_Word _Unwind_GetCFA (struct _Unwind_Context *);
void _Unwind_Complete(_Unwind_Control_Block *ucbp);
void _Unwind_DeleteException (_Unwind_Exception *);
@ -246,12 +253,17 @@ extern "C" {
return val;
}
#ifndef __FreeBSD__
/* Return the address of the instruction, not the actual IP value. */
#define _Unwind_GetIP(context) \
(_Unwind_GetGR (context, 15) & ~(_Unwind_Word)1)
#define _Unwind_GetIPInfo(context, ip_before_insn) \
(*ip_before_insn = 0, _Unwind_GetGR (context, 15) & ~(_Unwind_Word)1)
#else
_Unwind_Ptr _Unwind_GetIP (struct _Unwind_Context *);
_Unwind_Ptr _Unwind_GetIPInfo (struct _Unwind_Context *, int *);
#endif
static inline void
_Unwind_SetGR (_Unwind_Context *context, int regno, _Unwind_Word val)

View File

@ -81,6 +81,7 @@ extensions, accepted by GCC in C89 mode and in C++.
* Pragmas:: Pragmas accepted by GCC.
* Unnamed Fields:: Unnamed struct/union fields within structs/unions.
* Thread-Local:: Per-thread variables.
* Binary constants:: Binary constants using the @samp{0b} prefix.
@end menu
@node Statement Exprs
@ -10424,6 +10425,28 @@ Non-@code{static} members shall not be @code{__thread}.
@end quotation
@end itemize
@node Binary constants
@section Binary constants using the @samp{0b} prefix
@cindex Binary constants using the @samp{0b} prefix
Integer constants can be written as binary constants, consisting of a
sequence of @samp{0} and @samp{1} digits, prefixed by @samp{0b} or
@samp{0B}. This is particularly useful in environments that operate a
lot on the bit-level (like microcontrollers).
The following statements are identical:
@smallexample
i = 42;
i = 0x2a;
i = 052;
i = 0b101010;
@end smallexample
The type of these constants follows the same rules as for octal or
hexadecimal integer constants, so suffixes like @samp{L} or @samp{UL}
can be applied.
@node C++ Extensions
@chapter Extensions to the C++ Language
@cindex extensions, C++ language

View File

@ -188,6 +188,11 @@ cpp_classify_number (cpp_reader *pfile, const cpp_token *token)
radix = 16;
str++;
}
else if ((*str == 'b' || *str == 'B') && (str[1] == '0' || str[1] == '1'))
{
radix = 2;
str++;
}
}
/* Now scan for a well-formed integer or float. */
@ -226,10 +231,22 @@ cpp_classify_number (cpp_reader *pfile, const cpp_token *token)
radix = 10;
if (max_digit >= radix)
SYNTAX_ERROR2 ("invalid digit \"%c\" in octal constant", '0' + max_digit);
{
if (radix == 2)
SYNTAX_ERROR2 ("invalid digit \"%c\" in binary constant", '0' + max_digit);
else
SYNTAX_ERROR2 ("invalid digit \"%c\" in octal constant", '0' + max_digit);
}
if (float_flag != NOT_FLOAT)
{
if (radix == 2)
{
cpp_error (pfile, CPP_DL_ERROR,
"invalid prefix \"0b\" for floating constant");
return CPP_N_INVALID;
}
if (radix == 16 && CPP_PEDANTIC (pfile) && !CPP_OPTION (pfile, c99))
cpp_error (pfile, CPP_DL_PEDWARN,
"use of C99 hexadecimal floating constant");
@ -321,11 +338,16 @@ cpp_classify_number (cpp_reader *pfile, const cpp_token *token)
if ((result & CPP_N_IMAGINARY) && CPP_PEDANTIC (pfile))
cpp_error (pfile, CPP_DL_PEDWARN,
"imaginary constants are a GCC extension");
if (radix == 2 && CPP_PEDANTIC (pfile))
cpp_error (pfile, CPP_DL_PEDWARN,
"binary constants are a GCC extension");
if (radix == 10)
result |= CPP_N_DECIMAL;
else if (radix == 16)
result |= CPP_N_HEX;
else if (radix == 2)
result |= CPP_N_BINARY;
else
result |= CPP_N_OCTAL;
@ -376,6 +398,11 @@ cpp_interpret_integer (cpp_reader *pfile, const cpp_token *token,
base = 16;
p += 2;
}
else if ((type & CPP_N_RADIX) == CPP_N_BINARY)
{
base = 2;
p += 2;
}
/* We can add a digit to numbers strictly less than this without
needing the precision and slowness of double integers. */
@ -431,12 +458,25 @@ static cpp_num
append_digit (cpp_num num, int digit, int base, size_t precision)
{
cpp_num result;
unsigned int shift = 3 + (base == 16);
unsigned int shift;
bool overflow;
cpp_num_part add_high, add_low;
/* Multiply by 8 or 16. Catching this overflow here means we don't
/* Multiply by 2, 8 or 16. Catching this overflow here means we don't
need to worry about add_high overflowing. */
switch (base)
{
case 2:
shift = 1;
break;
case 16:
shift = 4;
break;
default:
shift = 3;
}
overflow = !!(num.high >> (PART_PRECISION - shift));
result.high = num.high << shift;
result.low = num.low << shift;

View File

@ -745,6 +745,7 @@ struct cpp_num
#define CPP_N_DECIMAL 0x0100
#define CPP_N_HEX 0x0200
#define CPP_N_OCTAL 0x0400
#define CPP_N_BINARY 0x0800
#define CPP_N_UNSIGNED 0x1000 /* Properties. */
#define CPP_N_IMAGINARY 0x2000

View File

@ -34,6 +34,7 @@ static char sccsid[] = "@(#)errlst.c 8.2 (Berkeley) 11/16/93";
__FBSDID("$FreeBSD$");
#include <stdio.h>
#include "errlst.h"
const char *const sys_errlist[] = {
"No error: 0", /* 0 - ENOERROR */
@ -156,3 +157,8 @@ const char *const sys_errlist[] = {
"Previous owner died", /* 96 - EOWNERDEAD */
};
const int sys_nerr = sizeof(sys_errlist) / sizeof(sys_errlist[0]);
#ifdef PIC
__strong_reference(sys_errlist, __hidden_sys_errlist);
__strong_reference(sys_nerr, __hidden_sys_nerr);
#endif

43
lib/libc/include/errlst.h Normal file
View File

@ -0,0 +1,43 @@
/*-
* Copyright (c) 2013 Jilles Tjoelker
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __ERRLST_H__
#define __ERRLST_H__
#include <sys/cdefs.h>
#ifdef PIC
/* If the main executable imports these, do not use its copy from libc.so. */
extern const char *const __hidden_sys_errlist[] __hidden;
extern const int __hidden_sys_nerr __hidden;
#else
#define __hidden_sys_errlist sys_errlist
#define __hidden_sys_nerr sys_nerr
#endif
#endif /* __ERRLST_H__ */

View File

@ -34,6 +34,7 @@
#include <vis.h>
#include <assert.h>
#include <sys/time.h>
#include "errlst.h"
#include "printf.h"
int
@ -54,7 +55,7 @@ __printf_render_errno(struct __printf_io *io, const struct printf_info *pi __unu
ret = 0;
error = *((const int *)arg[0]);
if (error >= 0 && error < sys_nerr) {
if (error >= 0 && error < __hidden_sys_nerr) {
p = strerror(error);
return (__printf_out(io, pi, p, strlen(p)));
}

View File

@ -42,6 +42,8 @@ __FBSDID("$FreeBSD$");
#include <string.h>
#include <stdio.h>
#include "errlst.h"
#define UPREFIX "Unknown error"
/*
@ -87,7 +89,7 @@ strerror_r(int errnum, char *strerrbuf, size_t buflen)
catd = catopen("libc", NL_CAT_LOCALE);
#endif
if (errnum < 0 || errnum >= sys_nerr) {
if (errnum < 0 || errnum >= __hidden_sys_nerr) {
errstr(errnum,
#if defined(NLS)
catgets(catd, 1, 0xffff, UPREFIX),
@ -99,9 +101,9 @@ strerror_r(int errnum, char *strerrbuf, size_t buflen)
} else {
if (strlcpy(strerrbuf,
#if defined(NLS)
catgets(catd, 1, errnum, sys_errlist[errnum]),
catgets(catd, 1, errnum, __hidden_sys_errlist[errnum]),
#else
sys_errlist[errnum],
__hidden_sys_errlist[errnum],
#endif
buflen) >= buflen)
retval = ERANGE;

View File

@ -84,13 +84,21 @@ _libelf_xlate_shtype(uint32_t sht)
case SHT_SUNW_dof:
return (ELF_T_BYTE);
#endif
case SHT_ARM_PREEMPTMAP:
/* FALLTHROUGH */
case SHT_ARM_ATTRIBUTES:
/* FALLTHROUGH */
case SHT_ARM_DEBUGOVERLAY:
/* FALLTHROUGH */
case SHT_ARM_OVERLAYSECTION:
/* FALLTHROUGH */
case SHT_MIPS_DWARF:
/* FALLTHROUGH */
case SHT_MIPS_REGINFO:
/* FALLTHROUGH */
case SHT_MIPS_OPTIONS:
/* FALLTHROUGH */
case SHT_AMD64_UNWIND: /* == SHT_IA_64_UNWIND */
case SHT_AMD64_UNWIND: /* == SHT_IA_64_UNWIND == SHT_ARM_EXIDX */
return (ELF_T_BYTE);
default:
return (-1);

View File

@ -365,7 +365,7 @@ init_ccbs()
for (i = 0; i < MAX_INITIATORS; i++) {
struct ccb_accept_tio *atio;
struct atio_descr *a_descr;
struct ccb_immed_notify *inot;
struct ccb_immediate_notify *inot;
atio = (struct ccb_accept_tio *)malloc(sizeof(*atio));
if (atio == NULL) {
@ -382,7 +382,7 @@ init_ccbs()
atio->ccb_h.targ_descr = a_descr;
send_ccb((union ccb *)atio, /*priority*/1);
inot = (struct ccb_immed_notify *)malloc(sizeof(*inot));
inot = (struct ccb_immediate_notify *)malloc(sizeof(*inot));
if (inot == NULL) {
warn("malloc INOT");
return (-1);
@ -593,7 +593,7 @@ handle_read()
oo += run_queue(c_descr->atio);
break;
}
case XPT_IMMED_NOTIFY:
case XPT_IMMEDIATE_NOTIFY:
/* INOTs are handled with priority */
TAILQ_INSERT_HEAD(&work_queue, &ccb->ccb_h,
periph_links.tqe);
@ -903,7 +903,7 @@ free_ccb(union ccb *ccb)
case XPT_ACCEPT_TARGET_IO:
free(ccb->ccb_h.targ_descr);
/* FALLTHROUGH */
case XPT_IMMED_NOTIFY:
case XPT_IMMEDIATE_NOTIFY:
default:
free(ccb);
break;

View File

@ -69,14 +69,30 @@ prompt before booting the kernel or stored in
.Xr loader.conf 5 .
.Bl -tag -width "xxxxxx"
.It Va hw.vtnet.csum_disable
.It Va hw.vtnet. Ns Ar X Ns Va .csum_disable
This tunable disables receive and send checksum offload.
The default value is 0.
.It Va hw.vtnet.tso_disable
.It Va hw.vtnet. Ns Ar X Ns Va .tso_disable
This tunable disables TSO.
The default value is 0.
.It Va hw.vtnet.lro_disable
.It Va hw.vtnet. Ns Ar X Ns Va .lro_disable
This tunable disables LRO.
The default value is 0.
.It Va hw.vtnet.mq_disable
.It Va hw.vtnet. Ns Ar X Ns Va .mq_disable
This tunable disables multiqueue.
The default value is 0.
.It Va hw.vtnet.mq_max_pairs
.It Va hw.vtnet. Ns Ar X Ns Va .mq_max_pairs
This tunable sets the maximum number of transmit and receive queue pairs.
Multiple queues are only supported when the Multiqueue feature is negotiated.
This driver supports a maximum of 8 queue pairs.
The number of queue pairs used is the lesser of the maximum supported by the
driver and the hypervisor, the number of CPUs present in the guest, and this
tunable if not zero.
The default value is 0.
.El
.Sh SEE ALSO
.Xr arp 4 ,

View File

@ -160,11 +160,11 @@ IDTVEC(xen_intr_upcall)
SUPERALIGN_TEXT
global_invltlb:
movl %cr4,%eax
andl $~0x80,%eax
movl %eax,%cr4
orl $0x80,%eax
movl %eax,%cr4
movq %cr4,%rax
andq $~0x80,%rax /* PGE */
movq %rax,%cr4
orq $0x80,%rax
movq %rax,%cr4
invltlb_ret_clear_pm_save:
movq smp_tlb_pmap,%rdx
testq %rdx,%rdx

View File

@ -762,7 +762,6 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
/* Initialize the PAT MSR. */
pmap_init_pat();
#ifdef SMP
/* Initialize TLB Context Id. */
TUNABLE_INT_FETCH("vm.pmap.pcid_enabled", &pmap_pcid_enabled);
if ((cpu_feature2 & CPUID2_PCID) != 0 && pmap_pcid_enabled) {
@ -773,8 +772,10 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
invpcid_works = (cpu_stdext_feature & CPUID_STDEXT_INVPCID)
!= 0;
kernel_pmap->pm_pcid = 0;
} else
#ifndef SMP
pmap_pcid_enabled = 0;
#endif
} else
pmap_pcid_enabled = 0;
}

View File

@ -961,23 +961,23 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
/*
* Valid combinations:
* - CAM_SEND_STATUS, SCATTER_VALID = 0, dxfer_len = 0,
* - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0,
* sglist_cnt = 0
* - CAM_SEND_STATUS = 0, SCATTER_VALID = 0, dxfer_len != 0,
* - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0,
* sglist_cnt = 0
* - CAM_SEND_STATUS = 0, SCATTER_VALID, dxfer_len != 0,
* - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0,
* sglist_cnt != 0
*/
#ifdef CTLFEDEBUG
if (((flags & CAM_SEND_STATUS)
&& (((flags & CAM_SCATTER_VALID) != 0)
&& (((flags & CAM_DATA_SG) != 0)
|| (dxfer_len != 0)
|| (csio->sglist_cnt != 0)))
|| (((flags & CAM_SEND_STATUS) == 0)
&& (dxfer_len == 0))
|| ((flags & CAM_SCATTER_VALID)
|| ((flags & CAM_DATA_SG)
&& (csio->sglist_cnt == 0))
|| (((flags & CAM_SCATTER_VALID) == 0)
|| (((flags & CAM_DATA_SG) == 0)
&& (csio->sglist_cnt != 0))) {
printf("%s: tag %04x cdb %02x flags %#x dxfer_len "
"%d sg %u\n", __func__, atio->tag_id,

View File

@ -56,6 +56,8 @@ __FBSDID("$FreeBSD$");
#include <cam/scsi/scsi_enc.h>
#include <cam/scsi/scsi_enc_internal.h>
#include <opt_ses.h>
MALLOC_DEFINE(M_SCSIENC, "SCSI ENC", "SCSI ENC buffers");
/* Enclosure type independent driver */
@ -719,12 +721,12 @@ enc_type(struct ccb_getdev *cgd)
return (ENC_NONE);
}
#ifdef ENC_ENABLE_PASSTHROUGH
#ifdef SES_ENABLE_PASSTHROUGH
if ((iqd[6] & 0x40) && (iqd[2] & 0x7) >= 2) {
/*
* PassThrough Device.
*/
return (ENC_ENC_PASSTHROUGH);
return (ENC_SES_PASSTHROUGH);
}
#endif

View File

@ -283,16 +283,13 @@ targbhenlun(struct cam_periph *periph)
xpt_setup_ccb(&atio->ccb_h, periph->path, CAM_PRIORITY_NORMAL);
atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
atio->ccb_h.cbfcnp = targbhdone;
xpt_action((union ccb *)atio);
status = atio->ccb_h.status;
if (status != CAM_REQ_INPROG) {
targbhfreedescr(atio->ccb_h.ccb_descr);
free(atio, M_SCSIBH);
break;
}
((struct targbh_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link =
softc->accept_tio_list;
softc->accept_tio_list = atio;
xpt_action((union ccb *)atio);
status = atio->ccb_h.status;
if (status != CAM_REQ_INPROG)
break;
}
if (i == 0) {
@ -308,10 +305,10 @@ targbhenlun(struct cam_periph *periph)
* so the SIM can tell us of asynchronous target mode events.
*/
for (i = 0; i < MAX_ACCEPT; i++) {
struct ccb_immed_notify *inot;
struct ccb_immediate_notify *inot;
inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_SCSIBH,
M_NOWAIT);
inot = (struct ccb_immediate_notify*)malloc(sizeof(*inot),
M_SCSIBH, M_NOWAIT);
if (inot == NULL) {
status = CAM_RESRC_UNAVAIL;
@ -319,16 +316,14 @@ targbhenlun(struct cam_periph *periph)
}
xpt_setup_ccb(&inot->ccb_h, periph->path, CAM_PRIORITY_NORMAL);
inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
inot->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
inot->ccb_h.cbfcnp = targbhdone;
xpt_action((union ccb *)inot);
status = inot->ccb_h.status;
if (status != CAM_REQ_INPROG) {
free(inot, M_SCSIBH);
break;
}
SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h,
periph_links.sle);
xpt_action((union ccb *)inot);
status = inot->ccb_h.status;
if (status != CAM_REQ_INPROG)
break;
}
if (i == 0) {
@ -413,7 +408,9 @@ targbhctor(struct cam_periph *periph, void *arg)
periph->softc = softc;
softc->init_level++;
return (targbhenlun(periph));
if (targbhenlun(periph) != CAM_REQ_CMP)
cam_periph_invalidate(periph);
return (CAM_REQ_CMP);
}
static void
@ -715,7 +712,7 @@ targbhdone(struct cam_periph *periph, union ccb *done_ccb)
}
break;
}
case XPT_IMMED_NOTIFY:
case XPT_IMMEDIATE_NOTIFY:
{
int frozen;

View File

@ -551,6 +551,7 @@ targwrite(struct cdev *dev, struct uio *uio, int ioflag)
switch (func_code) {
case XPT_ACCEPT_TARGET_IO:
case XPT_IMMED_NOTIFY:
case XPT_IMMEDIATE_NOTIFY:
cam_periph_lock(softc->periph);
ccb = targgetccb(softc, func_code, priority);
descr = (struct targ_cmd_descr *)ccb->ccb_h.targ_descr;
@ -781,6 +782,7 @@ targdone(struct cam_periph *periph, union ccb *done_ccb)
switch (done_ccb->ccb_h.func_code) {
/* All FC_*_QUEUED CCBs go back to userland */
case XPT_IMMED_NOTIFY:
case XPT_IMMEDIATE_NOTIFY:
case XPT_ACCEPT_TARGET_IO:
case XPT_CONT_TARGET_IO:
TAILQ_INSERT_TAIL(&softc->user_ccb_queue, &done_ccb->ccb_h,
@ -961,6 +963,7 @@ targfreeccb(struct targ_softc *softc, union ccb *ccb)
switch (ccb->ccb_h.func_code) {
case XPT_ACCEPT_TARGET_IO:
case XPT_IMMED_NOTIFY:
case XPT_IMMEDIATE_NOTIFY:
CAM_DEBUG_PRINT(CAM_DEBUG_PERIPH, ("freeing ccb %p\n", ccb));
free(ccb, M_TARG);
break;
@ -1131,6 +1134,9 @@ targccblen(xpt_opcode func_code)
case XPT_IMMED_NOTIFY:
len = sizeof(struct ccb_immed_notify);
break;
case XPT_IMMEDIATE_NOTIFY:
len = sizeof(struct ccb_immediate_notify);
break;
case XPT_REL_SIMQ:
len = sizeof(struct ccb_relsim);
break;

View File

@ -349,51 +349,85 @@ zero:
uint64_t
dtrace_getarg(int arg, int aframes)
{
uintptr_t val;
uintptr_t *fp = (uintptr_t *)dtrace_getfp();
uintptr_t *stack;
int i;
/*
* A total of 8 arguments are passed via registers; any argument with
* index of 7 or lower is therefore in a register.
*/
int inreg = 7;
for (i = 1; i <= aframes; i++) {
fp = (uintptr_t *)*fp;
/*
* On ppc32 AIM, and booke, trapexit() is the immediately following
* label. On ppc64 AIM trapexit() follows a nop.
*/
if (((long)(fp[1]) == (long)trapexit) ||
(((long)(fp[1]) + 4 == (long)trapexit))) {
/*
* In the case of powerpc, we will use the pointer to the regs
* structure that was pushed when we took the trap. To get this
* structure, we must increment beyond the frame structure. If the
* argument that we're seeking is passed on the stack, we'll pull
* the true stack pointer out of the saved registers and decrement
* our argument by the number of arguments passed in registers; if
* the argument we're seeking is passed in regsiters, we can just
* load it directly.
*/
#ifdef __powerpc64__
struct reg *rp = (struct reg *)((uintptr_t)fp[0] + 48);
#else
struct reg *rp = (struct reg *)((uintptr_t)fp[0] + 8);
#endif
if (arg <= inreg) {
stack = &rp->fixreg[3];
} else {
stack = (uintptr_t *)(rp->fixreg[1]);
arg -= inreg;
}
goto load;
}
}
/*
* We know that we did not come through a trap to get into
* dtrace_probe() -- the provider simply called dtrace_probe()
* directly. As this is the case, we need to shift the argument
* that we're looking for: the probe ID is the first argument to
* dtrace_probe(), so the argument n will actually be found where
* one would expect to find argument (n + 1).
*/
arg++;
if (arg <= inreg) {
/*
* This shouldn't happen. If the argument is passed in a
* register then it should have been, well, passed in a
* register...
*/
DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
return (0);
}
arg -= (inreg + 1);
stack = fp + 2;
load:
DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
val = stack[arg];
DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
return (val);
return (0);
}
#ifdef notyet
{
int depth = 0;
register_t sp;
vm_offset_t callpc;
pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller;
if (intrpc != 0)
pcstack[depth++] = (pc_t) intrpc;
aframes++;
sp = dtrace_getfp();
while (depth < pcstack_limit) {
if (!INKERNEL((long) frame))
break;
callpc = *(void **)(sp + RETURN_OFFSET);
if (!INKERNEL(callpc))
break;
if (aframes > 0) {
aframes--;
if ((aframes == 0) && (caller != 0)) {
pcstack[depth++] = caller;
}
}
else {
pcstack[depth++] = callpc;
}
sp = *(void **)sp;
}
for (; depth < pcstack_limit; depth++) {
pcstack[depth] = 0;
}
}
#endif
int
dtrace_getstackdepth(int aframes)
{

View File

@ -51,6 +51,8 @@ extern int dtrace_in_probe;
extern dtrace_id_t dtrace_probeid_error;
extern int (*dtrace_invop_jump_addr)(struct trapframe *);
extern void dtrace_getnanotime(struct timespec *tsp);
int dtrace_invop(uintptr_t, uintptr_t *, uintptr_t);
void dtrace_invop_init(void);
void dtrace_invop_uninit(void);
@ -63,13 +65,13 @@ typedef struct dtrace_invop_hdlr {
dtrace_invop_hdlr_t *dtrace_invop_hdlr;
int
dtrace_invop(uintptr_t addr, uintptr_t *stack, uintptr_t eax)
dtrace_invop(uintptr_t addr, uintptr_t *stack, uintptr_t arg0)
{
dtrace_invop_hdlr_t *hdlr;
int rval;
for (hdlr = dtrace_invop_hdlr; hdlr != NULL; hdlr = hdlr->dtih_next)
if ((rval = hdlr->dtih_func(addr, stack, eax)) != 0)
if ((rval = hdlr->dtih_func(addr, stack, arg0)) != 0)
return (rval);
return (0);
@ -134,7 +136,7 @@ dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
CPU_SETOF(cpu, &cpus);
smp_rendezvous_cpus(cpus, smp_no_rendevous_barrier, func,
smp_no_rendevous_barrier, arg);
smp_no_rendevous_barrier, arg);
}
static void
@ -145,9 +147,82 @@ dtrace_sync_func(void)
void
dtrace_sync(void)
{
dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
}
static int64_t tgt_cpu_tsc;
static int64_t hst_cpu_tsc;
static int64_t timebase_skew[MAXCPU];
static uint64_t nsec_scale;
/* See below for the explanation of this macro. */
/* This is taken from the amd64 dtrace_subr, to provide a synchronized timer
* between multiple processors in dtrace. Since PowerPC Timebases can be much
* lower than x86, the scale shift is 26 instead of 28, allowing for a 15.63MHz
* timebase.
*/
#define SCALE_SHIFT 26
static void
dtrace_gethrtime_init_cpu(void *arg)
{
uintptr_t cpu = (uintptr_t) arg;
if (cpu == curcpu)
tgt_cpu_tsc = mftb();
else
hst_cpu_tsc = mftb();
}
static void
dtrace_gethrtime_init(void *arg)
{
struct pcpu *pc;
uint64_t tb_f;
cpuset_t map;
int i;
tb_f = cpu_tickrate();
/*
* The following line checks that nsec_scale calculated below
* doesn't overflow 32-bit unsigned integer, so that it can multiply
* another 32-bit integer without overflowing 64-bit.
* Thus minimum supported Timebase frequency is 15.63MHz.
*/
KASSERT(tb_f > (NANOSEC >> (32 - SCALE_SHIFT)), ("Timebase frequency is too low"));
/*
* We scale up NANOSEC/tb_f ratio to preserve as much precision
* as possible.
* 2^26 factor was chosen quite arbitrarily from practical
* considerations:
* - it supports TSC frequencies as low as 15.63MHz (see above);
*/
nsec_scale = ((uint64_t)NANOSEC << SCALE_SHIFT) / tb_f;
/* The current CPU is the reference one. */
sched_pin();
timebase_skew[curcpu] = 0;
CPU_FOREACH(i) {
if (i == curcpu)
continue;
pc = pcpu_find(i);
CPU_SETOF(PCPU_GET(cpuid), &map);
CPU_SET(pc->pc_cpuid, &map);
smp_rendezvous_cpus(map, NULL,
dtrace_gethrtime_init_cpu,
smp_no_rendevous_barrier, (void *)(uintptr_t) i);
timebase_skew[i] = tgt_cpu_tsc - hst_cpu_tsc;
}
sched_unpin();
}
SYSINIT(dtrace_gethrtime_init, SI_SUB_SMP, SI_ORDER_ANY, dtrace_gethrtime_init, NULL);
/*
* DTrace needs a high resolution time function which can
* be called from a probe context and guaranteed not to have
@ -158,12 +233,21 @@ dtrace_sync(void)
uint64_t
dtrace_gethrtime()
{
struct timespec curtime;
nanouptime(&curtime);
return (curtime.tv_sec * 1000000000UL + curtime.tv_nsec);
uint64_t timebase;
uint32_t lo;
uint32_t hi;
/*
* We split timebase value into lower and higher 32-bit halves and separately
* scale them with nsec_scale, then we scale them down by 2^28
* (see nsec_scale calculations) taking into account 32-bit shift of
* the higher half and finally add.
*/
timebase = mftb() - timebase_skew[curcpu];
lo = timebase;
hi = timebase >> 32;
return (((lo * nsec_scale) >> SCALE_SHIFT) +
((hi * nsec_scale) << (32 - SCALE_SHIFT)));
}
uint64_t
@ -171,12 +255,12 @@ dtrace_gethrestime(void)
{
struct timespec curtime;
getnanotime(&curtime);
dtrace_getnanotime(&curtime);
return (curtime.tv_sec * 1000000000UL + curtime.tv_nsec);
}
/* Function to handle DTrace traps during probes. See amd64/amd64/trap.c */
/* Function to handle DTrace traps during probes. See powerpc/powerpc/trap.c */
int
dtrace_trap(struct trapframe *frame, u_int type)
{
@ -196,34 +280,34 @@ dtrace_trap(struct trapframe *frame, u_int type)
* All the rest will be handled in the usual way.
*/
switch (type) {
/* Page fault. */
case EXC_DSI:
case EXC_DSE:
/* Flag a bad address. */
cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
cpu_core[curcpu].cpuc_dtrace_illval = frame->cpu.aim.dar;
/* Page fault. */
case EXC_DSI:
case EXC_DSE:
/* Flag a bad address. */
cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
cpu_core[curcpu].cpuc_dtrace_illval = frame->cpu.aim.dar;
/*
* Offset the instruction pointer to the instruction
* following the one causing the fault.
*/
frame->srr0 += sizeof(int);
return (1);
case EXC_ISI:
case EXC_ISE:
/* Flag a bad address. */
cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
cpu_core[curcpu].cpuc_dtrace_illval = frame->srr0;
/*
* Offset the instruction pointer to the instruction
* following the one causing the fault.
*/
frame->srr0 += sizeof(int);
return (1);
case EXC_ISI:
case EXC_ISE:
/* Flag a bad address. */
cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
cpu_core[curcpu].cpuc_dtrace_illval = frame->srr0;
/*
* Offset the instruction pointer to the instruction
* following the one causing the fault.
*/
frame->srr0 += sizeof(int);
return (1);
default:
/* Handle all other traps in the usual way. */
break;
/*
* Offset the instruction pointer to the instruction
* following the one causing the fault.
*/
frame->srr0 += sizeof(int);
return (1);
default:
/* Handle all other traps in the usual way. */
break;
}
}
@ -237,28 +321,29 @@ dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which,
{
dtrace_probe(dtrace_probeid_error, (uint64_t)(uintptr_t)state,
(uintptr_t)epid,
(uintptr_t)which, (uintptr_t)fault, (uintptr_t)fltoffs);
(uintptr_t)epid,
(uintptr_t)which, (uintptr_t)fault, (uintptr_t)fltoffs);
}
static int
dtrace_invop_start(struct trapframe *frame)
{
switch (dtrace_invop(frame->srr0, (uintptr_t *)frame, frame->fixreg[3])) {
case DTRACE_INVOP_JUMP:
break;
case DTRACE_INVOP_BCTR:
frame->srr0 = frame->ctr;
break;
case DTRACE_INVOP_BLR:
frame->srr0 = frame->lr;
break;
case DTRACE_INVOP_MFLR_R0:
frame->fixreg[0] = frame->lr ;
break;
default:
return (-1);
break;
case DTRACE_INVOP_JUMP:
break;
case DTRACE_INVOP_BCTR:
frame->srr0 = frame->ctr;
break;
case DTRACE_INVOP_BLR:
frame->srr0 = frame->lr;
break;
case DTRACE_INVOP_MFLR_R0:
frame->fixreg[0] = frame->lr;
frame->srr0 = frame->srr0 + 4;
break;
default:
return (-1);
break;
}
return (0);

View File

@ -57,6 +57,7 @@
#include <sys/sysproto.h>
#include <sys/uio.h>
#include <sys/unistd.h>
#include <machine/md_var.h>
#include <machine/stdarg.h>
#include <sys/dtrace.h>
@ -172,7 +173,11 @@ fbt_invop(uintptr_t addr, uintptr_t *stack, uintptr_t rval)
tmp = fbt->fbtp_savedval & FBT_BR_MASK;
/* Sign extend. */
if (tmp & 0x02000000)
tmp |= 0xFC000000;
#ifdef __powerpc64__
tmp |= 0xfffffffffc000000ULL;
#else
tmp |= 0xfc000000UL;
#endif
frame->srr0 += tmp;
}
cpu->cpu_dtrace_caller = 0;
@ -193,9 +198,12 @@ fbt_provide_module_function(linker_file_t lf, int symindx,
const char *name = symval->name;
fbt_probe_t *fbt, *retfbt;
int j;
int size;
u_int32_t *instr, *limit;
/* PowerPC64 uses '.' prefixes on symbol names, ignore it. */
if (name[0] == '.')
name++;
if (strncmp(name, "dtrace_", 7) == 0 &&
strncmp(name, "dtrace_safe_", 12) != 0) {
/*
@ -210,8 +218,6 @@ fbt_provide_module_function(linker_file_t lf, int symindx,
if (name[0] == '_' && name[1] == '_')
return (0);
size = symval->size;
instr = (u_int32_t *) symval->value;
limit = (u_int32_t *) symval->value + symval->size;
@ -219,7 +225,7 @@ fbt_provide_module_function(linker_file_t lf, int symindx,
if (*instr == FBT_MFLR_R0)
break;
if (*instr != FBT_MFLR_R0);
if (*instr != FBT_MFLR_R0)
return (0);
fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO);
@ -264,9 +270,6 @@ again:
}
}
if (*instr == FBT_MFLR_R0)
return (0);
if (*instr != FBT_MTLR_R0) {
instr++;
goto again;
@ -291,7 +294,7 @@ again:
if (retfbt == NULL) {
fbt->fbtp_id = dtrace_probe_create(fbt_id, modname,
name, FBT_RETURN, 3, fbt);
name, FBT_RETURN, 5, fbt);
} else {
retfbt->fbtp_next = fbt;
fbt->fbtp_id = retfbt->fbtp_id;
@ -317,7 +320,7 @@ again:
lf->fbt_nentries++;
instr += size;
instr += 4;
goto again;
}
@ -434,6 +437,7 @@ fbt_enable(void *arg, dtrace_id_t id, void *parg)
for (; fbt != NULL; fbt = fbt->fbtp_next) {
*fbt->fbtp_patchpoint = fbt->fbtp_patchval;
__syncicache(fbt->fbtp_patchpoint, 4);
}
}
@ -449,8 +453,10 @@ fbt_disable(void *arg, dtrace_id_t id, void *parg)
if ((ctl->loadcnt != fbt->fbtp_loadcnt))
return;
for (; fbt != NULL; fbt = fbt->fbtp_next)
for (; fbt != NULL; fbt = fbt->fbtp_next) {
*fbt->fbtp_patchpoint = fbt->fbtp_savedval;
__syncicache(fbt->fbtp_patchpoint, 4);
}
}
static void
@ -464,8 +470,10 @@ fbt_suspend(void *arg, dtrace_id_t id, void *parg)
if ((ctl->loadcnt != fbt->fbtp_loadcnt))
return;
for (; fbt != NULL; fbt = fbt->fbtp_next)
for (; fbt != NULL; fbt = fbt->fbtp_next) {
*fbt->fbtp_patchpoint = fbt->fbtp_savedval;
__syncicache(fbt->fbtp_patchpoint, 4);
}
}
static void
@ -479,15 +487,16 @@ fbt_resume(void *arg, dtrace_id_t id, void *parg)
if ((ctl->loadcnt != fbt->fbtp_loadcnt))
return;
for (; fbt != NULL; fbt = fbt->fbtp_next)
for (; fbt != NULL; fbt = fbt->fbtp_next) {
*fbt->fbtp_patchpoint = fbt->fbtp_patchval;
__syncicache(fbt->fbtp_patchpoint, 4);
}
}
static int
fbt_ctfoff_init(modctl_t *lf, linker_ctf_t *lc)
{
const Elf_Sym *symp = lc->symtab;;
const char *name;
const ctf_header_t *hp = (const ctf_header_t *) lc->ctftab;
const uint8_t *ctfdata = lc->ctftab + sizeof(ctf_header_t);
int i;
@ -519,11 +528,6 @@ fbt_ctfoff_init(modctl_t *lf, linker_ctf_t *lc)
continue;
}
if (symp->st_name < lc->strcnt)
name = lc->strtab + symp->st_name;
else
name = "(?)";
switch (ELF_ST_TYPE(symp->st_info)) {
case STT_OBJECT:
if (objtoff >= hp->cth_funcoff ||
@ -690,6 +694,8 @@ fbt_typoff_init(linker_ctf_t *lc)
pop[kind]++;
}
/* account for a sentinel value below */
ctf_typemax++;
*lc->typlenp = ctf_typemax;
if ((xp = malloc(sizeof(uint32_t) * ctf_typemax, M_LINKER, M_ZERO | M_WAITOK)) == NULL)
@ -1171,6 +1177,11 @@ fbt_getargdesc(void *arg __unused, dtrace_id_t id __unused, void *parg, dtrace_a
uint32_t offset;
ushort_t info, kind, n;
if (fbt->fbtp_roffset != 0 && desc->dtargd_ndx == 0) {
(void) strcpy(desc->dtargd_native, "int");
return;
}
desc->dtargd_ndx = DTRACE_ARGNONE;
/* Get a pointer to the CTF data and it's length. */
@ -1221,12 +1232,19 @@ fbt_getargdesc(void *arg __unused, dtrace_id_t id __unused, void *parg, dtrace_a
return;
}
/* Check if the requested argument doesn't exist. */
if (ndx >= n)
return;
if (fbt->fbtp_roffset != 0) {
/* Only return type is available for args[1] in return probe. */
if (ndx > 1)
return;
ASSERT(ndx == 1);
} else {
/* Check if the requested argument doesn't exist. */
if (ndx >= n)
return;
/* Skip the return type and arguments up to the one requested. */
dp += ndx + 1;
/* Skip the return type and arguments up to the one requested. */
dp += ndx + 1;
}
if (fbt_type_name(&lc, *dp, desc->dtargd_native, sizeof(desc->dtargd_native)) > 0)
desc->dtargd_ndx = ndx;
@ -1234,6 +1252,15 @@ fbt_getargdesc(void *arg __unused, dtrace_id_t id __unused, void *parg, dtrace_a
return;
}
static int
fbt_linker_file_cb(linker_file_t lf, void *arg)
{
fbt_provide_module(arg, lf);
return (0);
}
static void
fbt_load(void *dummy)
{
@ -1257,6 +1284,9 @@ fbt_load(void *dummy)
if (dtrace_register("fbt", &fbt_attr, DTRACE_PRIV_USER,
NULL, &fbt_pops, NULL, &fbt_id) != 0)
return;
/* Create probes for the kernel and already-loaded modules. */
linker_file_foreach(fbt_linker_file_cb, NULL);
}

File diff suppressed because it is too large Load Diff

View File

@ -29,84 +29,166 @@
#ifndef _IF_VTNETVAR_H
#define _IF_VTNETVAR_H
struct vtnet_softc;
struct vtnet_statistics {
unsigned long mbuf_alloc_failed;
uint64_t mbuf_alloc_failed;
unsigned long rx_frame_too_large;
unsigned long rx_enq_replacement_failed;
unsigned long rx_mergeable_failed;
unsigned long rx_csum_bad_ethtype;
unsigned long rx_csum_bad_start;
unsigned long rx_csum_bad_ipproto;
unsigned long rx_csum_bad_offset;
unsigned long rx_csum_failed;
unsigned long rx_csum_offloaded;
unsigned long rx_task_rescheduled;
uint64_t rx_frame_too_large;
uint64_t rx_enq_replacement_failed;
uint64_t rx_mergeable_failed;
uint64_t rx_csum_bad_ethtype;
uint64_t rx_csum_bad_ipproto;
uint64_t rx_csum_bad_offset;
uint64_t rx_csum_bad_proto;
uint64_t tx_csum_bad_ethtype;
uint64_t tx_tso_bad_ethtype;
uint64_t tx_tso_not_tcp;
unsigned long tx_csum_offloaded;
unsigned long tx_tso_offloaded;
unsigned long tx_csum_bad_ethtype;
unsigned long tx_tso_bad_ethtype;
unsigned long tx_task_rescheduled;
/*
* These are accumulated from each Rx/Tx queue.
*/
uint64_t rx_csum_failed;
uint64_t rx_csum_offloaded;
uint64_t rx_task_rescheduled;
uint64_t tx_csum_offloaded;
uint64_t tx_tso_offloaded;
uint64_t tx_task_rescheduled;
};
struct vtnet_rxq_stats {
uint64_t vrxs_ipackets; /* if_ipackets */
uint64_t vrxs_ibytes; /* if_ibytes */
uint64_t vrxs_iqdrops; /* if_iqdrops */
uint64_t vrxs_ierrors; /* if_ierrors */
uint64_t vrxs_csum;
uint64_t vrxs_csum_failed;
uint64_t vrxs_rescheduled;
};
struct vtnet_rxq {
struct mtx vtnrx_mtx;
struct vtnet_softc *vtnrx_sc;
struct virtqueue *vtnrx_vq;
int vtnrx_id;
int vtnrx_process_limit;
struct vtnet_rxq_stats vtnrx_stats;
struct taskqueue *vtnrx_tq;
struct task vtnrx_intrtask;
char vtnrx_name[16];
} __aligned(CACHE_LINE_SIZE);
#define VTNET_RXQ_LOCK(_rxq) mtx_lock(&(_rxq)->vtnrx_mtx)
#define VTNET_RXQ_UNLOCK(_rxq) mtx_unlock(&(_rxq)->vtnrx_mtx)
#define VTNET_RXQ_LOCK_ASSERT(_rxq) \
mtx_assert(&(_rxq)->vtnrx_mtx, MA_OWNED)
#define VTNET_RXQ_LOCK_ASSERT_NOTOWNED(_rxq) \
mtx_assert(&(_rxq)->vtnrx_mtx, MA_NOTOWNED)
struct vtnet_txq_stats {
uint64_t vtxs_opackets; /* if_opackets */
uint64_t vtxs_obytes; /* if_obytes */
uint64_t vtxs_omcasts; /* if_omcasts */
uint64_t vtxs_csum;
uint64_t vtxs_tso;
uint64_t vtxs_collapsed;
uint64_t vtxs_rescheduled;
};
struct vtnet_txq {
struct mtx vtntx_mtx;
struct vtnet_softc *vtntx_sc;
struct virtqueue *vtntx_vq;
#ifndef VTNET_LEGACY_TX
struct buf_ring *vtntx_br;
#endif
int vtntx_id;
int vtntx_watchdog;
struct vtnet_txq_stats vtntx_stats;
struct taskqueue *vtntx_tq;
struct task vtntx_intrtask;
#ifndef VTNET_LEGACY_TX
struct task vtntx_defrtask;
#endif
char vtntx_name[16];
} __aligned(CACHE_LINE_SIZE);
#define VTNET_TXQ_LOCK(_txq) mtx_lock(&(_txq)->vtntx_mtx)
#define VTNET_TXQ_TRYLOCK(_txq) mtx_trylock(&(_txq)->vtntx_mtx)
#define VTNET_TXQ_UNLOCK(_txq) mtx_unlock(&(_txq)->vtntx_mtx)
#define VTNET_TXQ_LOCK_ASSERT(_txq) \
mtx_assert(&(_txq)->vtntx_mtx, MA_OWNED)
#define VTNET_TXQ_LOCK_ASSERT_NOTOWNED(_txq) \
mtx_assert(&(_txq)->vtntx_mtx, MA_NOTOWNED)
struct vtnet_softc {
device_t vtnet_dev;
struct ifnet *vtnet_ifp;
struct mtx vtnet_mtx;
struct vtnet_rxq *vtnet_rxqs;
struct vtnet_txq *vtnet_txqs;
uint32_t vtnet_flags;
#define VTNET_FLAG_LINK 0x0001
#define VTNET_FLAG_SUSPENDED 0x0002
#define VTNET_FLAG_SUSPENDED 0x0001
#define VTNET_FLAG_MAC 0x0002
#define VTNET_FLAG_CTRL_VQ 0x0004
#define VTNET_FLAG_CTRL_RX 0x0008
#define VTNET_FLAG_VLAN_FILTER 0x0010
#define VTNET_FLAG_TSO_ECN 0x0020
#define VTNET_FLAG_MRG_RXBUFS 0x0040
#define VTNET_FLAG_LRO_NOMRG 0x0080
struct virtqueue *vtnet_rx_vq;
struct virtqueue *vtnet_tx_vq;
struct virtqueue *vtnet_ctrl_vq;
#define VTNET_FLAG_CTRL_MAC 0x0010
#define VTNET_FLAG_VLAN_FILTER 0x0020
#define VTNET_FLAG_TSO_ECN 0x0040
#define VTNET_FLAG_MRG_RXBUFS 0x0080
#define VTNET_FLAG_LRO_NOMRG 0x0100
#define VTNET_FLAG_MULTIQ 0x0200
int vtnet_link_active;
int vtnet_hdr_size;
int vtnet_tx_size;
int vtnet_rx_size;
int vtnet_rx_process_limit;
int vtnet_rx_mbuf_size;
int vtnet_rx_mbuf_count;
int vtnet_rx_nmbufs;
int vtnet_rx_clsize;
int vtnet_rx_new_clsize;
int vtnet_if_flags;
int vtnet_watchdog_timer;
int vtnet_act_vq_pairs;
int vtnet_max_vq_pairs;
struct virtqueue *vtnet_ctrl_vq;
struct vtnet_mac_filter *vtnet_mac_filter;
uint32_t *vtnet_vlan_filter;
uint64_t vtnet_features;
struct vtnet_statistics vtnet_stats;
struct callout vtnet_tick_ch;
struct ifmedia vtnet_media;
eventhandler_tag vtnet_vlan_attach;
eventhandler_tag vtnet_vlan_detach;
struct ifmedia vtnet_media;
/*
* Fake media type; the host does not provide us with
* any real media information.
*/
#define VTNET_MEDIATYPE (IFM_ETHER | IFM_1000_T | IFM_FDX)
char vtnet_hwaddr[ETHER_ADDR_LEN];
struct vtnet_mac_filter *vtnet_mac_filter;
/*
* During reset, the host's VLAN filtering table is lost. The
* array below is used to restore all the VLANs configured on
* this interface after a reset.
*/
#define VTNET_VLAN_SHADOW_SIZE (4096 / 32)
int vtnet_nvlans;
uint32_t vtnet_vlan_shadow[VTNET_VLAN_SHADOW_SIZE];
struct mtx vtnet_mtx;
char vtnet_mtx_name[16];
char vtnet_hwaddr[ETHER_ADDR_LEN];
};
/*
* Maximum number of queue pairs we will autoconfigure to.
*/
#define VTNET_MAX_QUEUE_PAIRS 8
/*
* Additional completed entries can appear in a virtqueue before we can
* reenable interrupts. Number of times to retry before scheduling the
* taskqueue to process the completed entries.
*/
#define VTNET_INTR_DISABLE_RETRIES 4
/*
* Fake the media type. The host does not provide us with any real media
* information.
*/
#define VTNET_MEDIATYPE (IFM_ETHER | IFM_10G_T | IFM_FDX)
/*
* Number of words to allocate for the VLAN shadow table. There is one
* bit for each VLAN.
*/
#define VTNET_VLAN_FILTER_NWORDS (4096 / 32)
/*
* When mergeable buffers are not negotiated, the vtnet_rx_header structure
* below is placed at the beginning of the mbuf data. Use 4 bytes of pad to
@ -161,8 +243,12 @@ struct vtnet_mac_filter {
*/
CTASSERT(sizeof(struct vtnet_mac_filter) <= PAGE_SIZE);
#define VTNET_WATCHDOG_TIMEOUT 5
#define VTNET_TX_TIMEOUT 5
#define VTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP | CSUM_SCTP)
#define VTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_SCTP_IPV6)
#define VTNET_CSUM_ALL_OFFLOAD \
(VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6 | CSUM_TSO)
/* Features desired/implemented by this driver. */
#define VTNET_FEATURES \
@ -170,8 +256,10 @@ CTASSERT(sizeof(struct vtnet_mac_filter) <= PAGE_SIZE);
VIRTIO_NET_F_STATUS | \
VIRTIO_NET_F_CTRL_VQ | \
VIRTIO_NET_F_CTRL_RX | \
VIRTIO_NET_F_CTRL_MAC_ADDR | \
VIRTIO_NET_F_CTRL_VLAN | \
VIRTIO_NET_F_CSUM | \
VIRTIO_NET_F_GSO | \
VIRTIO_NET_F_HOST_TSO4 | \
VIRTIO_NET_F_HOST_TSO6 | \
VIRTIO_NET_F_HOST_ECN | \
@ -180,8 +268,17 @@ CTASSERT(sizeof(struct vtnet_mac_filter) <= PAGE_SIZE);
VIRTIO_NET_F_GUEST_TSO6 | \
VIRTIO_NET_F_GUEST_ECN | \
VIRTIO_NET_F_MRG_RXBUF | \
VIRTIO_NET_F_MQ | \
VIRTIO_RING_F_EVENT_IDX | \
VIRTIO_RING_F_INDIRECT_DESC)
/*
* The VIRTIO_NET_F_HOST_TSO[46] features permit us to send the host
* frames larger than 1514 bytes.
*/
#define VTNET_TSO_FEATURES (VIRTIO_NET_F_GSO | VIRTIO_NET_F_HOST_TSO4 | \
VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_ECN)
/*
* The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
* frames larger than 1514 bytes. We do not yet support software LRO
@ -208,28 +305,35 @@ CTASSERT(sizeof(struct vtnet_mac_filter) <= PAGE_SIZE);
CTASSERT(((VTNET_MAX_RX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE);
CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_MTU);
/*
* Number of slots in the Tx bufrings. This value matches most other
* multiqueue drivers.
*/
#define VTNET_DEFAULT_BUFRING_SIZE 4096
/*
* Determine how many mbufs are in each receive buffer. For LRO without
* mergeable descriptors, we must allocate an mbuf chain large enough to
* hold both the vtnet_rx_header and the maximum receivable data.
*/
#define VTNET_NEEDED_RX_MBUFS(_sc) \
#define VTNET_NEEDED_RX_MBUFS(_sc, _clsize) \
((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 : \
howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE, \
(_sc)->vtnet_rx_mbuf_size)
(_clsize))
#define VTNET_MTX(_sc) &(_sc)->vtnet_mtx
#define VTNET_LOCK(_sc) mtx_lock(VTNET_MTX((_sc)))
#define VTNET_UNLOCK(_sc) mtx_unlock(VTNET_MTX((_sc)))
#define VTNET_LOCK_DESTROY(_sc) mtx_destroy(VTNET_MTX((_sc)))
#define VTNET_LOCK_ASSERT(_sc) mtx_assert(VTNET_MTX((_sc)), MA_OWNED)
#define VTNET_LOCK_ASSERT_NOTOWNED(_sc) \
mtx_assert(VTNET_MTX((_sc)), MA_NOTOWNED)
#define VTNET_CORE_MTX(_sc) &(_sc)->vtnet_mtx
#define VTNET_CORE_LOCK(_sc) mtx_lock(VTNET_CORE_MTX((_sc)))
#define VTNET_CORE_UNLOCK(_sc) mtx_unlock(VTNET_CORE_MTX((_sc)))
#define VTNET_CORE_LOCK_DESTROY(_sc) mtx_destroy(VTNET_CORE_MTX((_sc)))
#define VTNET_CORE_LOCK_ASSERT(_sc) \
mtx_assert(VTNET_CORE_MTX((_sc)), MA_OWNED)
#define VTNET_CORE_LOCK_ASSERT_NOTOWNED(_sc) \
mtx_assert(VTNET_CORE_MTX((_sc)), MA_NOTOWNED)
#define VTNET_LOCK_INIT(_sc) do { \
#define VTNET_CORE_LOCK_INIT(_sc) do { \
snprintf((_sc)->vtnet_mtx_name, sizeof((_sc)->vtnet_mtx_name), \
"%s", device_get_nameunit((_sc)->vtnet_dev)); \
mtx_init(VTNET_MTX((_sc)), (_sc)->vtnet_mtx_name, \
mtx_init(VTNET_CORE_MTX((_sc)), (_sc)->vtnet_mtx_name, \
"VTNET Core Lock", MTX_DEF); \
} while (0)

View File

@ -50,14 +50,22 @@
#define VIRTIO_NET_F_CTRL_RX 0x40000 /* Control channel RX mode support */
#define VIRTIO_NET_F_CTRL_VLAN 0x80000 /* Control channel VLAN filtering */
#define VIRTIO_NET_F_CTRL_RX_EXTRA 0x100000 /* Extra RX mode control support */
#define VIRTIO_NET_F_GUEST_ANNOUNCE 0x200000 /* Announce device on network */
#define VIRTIO_NET_F_MQ 0x400000 /* Device supports RFS */
#define VIRTIO_NET_F_CTRL_MAC_ADDR 0x800000 /* Set MAC address */
#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
struct virtio_net_config {
/* The config defining mac address (if VIRTIO_NET_F_MAC) */
uint8_t mac[ETHER_ADDR_LEN];
uint8_t mac[ETHER_ADDR_LEN];
/* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
uint16_t status;
/* Maximum number of each of transmit and receive queues;
* see VIRTIO_NET_F_MQ and VIRTIO_NET_CTRL_MQ.
* Legal values are between 1 and 0x8000.
*/
uint16_t max_virtqueue_pairs;
} __packed;
/*
@ -66,6 +74,7 @@ struct virtio_net_config {
*/
struct virtio_net_hdr {
#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start,csum_offset*/
#define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */
uint8_t flags;
#define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */
#define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */
@ -100,8 +109,6 @@ struct virtio_net_ctrl_hdr {
uint8_t cmd;
} __packed;
typedef uint8_t virtio_net_ctrl_ack;
#define VIRTIO_NET_OK 0
#define VIRTIO_NET_ERR 1
@ -134,6 +141,10 @@ typedef uint8_t virtio_net_ctrl_ack;
* first sg list contains unicast addresses, the second is for multicast.
* This functionality is present if the VIRTIO_NET_F_CTRL_RX feature
* is available.
*
* The ADDR_SET command requests one out scatterlist, it contains a
* 6 bytes MAC address. This functionality is present if the
* VIRTIO_NET_F_CTRL_MAC_ADDR feature is available.
*/
struct virtio_net_ctrl_mac {
uint32_t entries;
@ -142,6 +153,7 @@ struct virtio_net_ctrl_mac {
#define VIRTIO_NET_CTRL_MAC 1
#define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
#define VIRTIO_NET_CTRL_MAC_ADDR_SET 1
/*
* Control VLAN filtering
@ -156,4 +168,35 @@ struct virtio_net_ctrl_mac {
#define VIRTIO_NET_CTRL_VLAN_ADD 0
#define VIRTIO_NET_CTRL_VLAN_DEL 1
/*
* Control link announce acknowledgement
*
* The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that
* driver has recevied the notification; device would clear the
* VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives
* this command.
*/
#define VIRTIO_NET_CTRL_ANNOUNCE 3
#define VIRTIO_NET_CTRL_ANNOUNCE_ACK 0
/*
* Control Receive Flow Steering
*
* The command VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET enables Receive Flow
* Steering, specifying the number of the transmit and receive queues
* that will be used. After the command is consumed and acked by the
* device, the device will not steer new packets on receive virtqueues
* other than specified nor read from transmit virtqueues other than
* specified. Accordingly, driver should not transmit new packets on
* virtqueues other than specified.
*/
struct virtio_net_ctrl_mq {
uint16_t virtqueue_pairs;
} __packed;
#define VIRTIO_NET_CTRL_MQ 4
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
#endif /* _VIRTIO_NET_H */

View File

@ -757,8 +757,10 @@ vtpci_probe_and_attach_child(struct vtpci_softc *sc)
vtpci_release_child_resources(sc);
/* Reset status for future attempt. */
vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
} else
} else {
vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
VIRTIO_ATTACH_COMPLETED(child);
}
}
static int

View File

@ -29,6 +29,18 @@
INTERFACE virtio;
CODE {
static int
virtio_default_attach_completed(device_t dev)
{
return (0);
}
};
METHOD int attach_completed {
device_t dev;
} DEFAULT virtio_default_attach_completed;
CODE {
static int
virtio_default_config_change(device_t dev)

View File

@ -127,7 +127,7 @@ static uint16_t vq_ring_enqueue_segments(struct virtqueue *,
static int vq_ring_use_indirect(struct virtqueue *, int);
static void vq_ring_enqueue_indirect(struct virtqueue *, void *,
struct sglist *, int, int);
static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
static int vq_ring_must_notify_host(struct virtqueue *);
static void vq_ring_notify_host(struct virtqueue *);
static void vq_ring_free_chain(struct virtqueue *, uint16_t);
@ -440,28 +440,38 @@ virtqueue_enable_intr(struct virtqueue *vq)
}
int
virtqueue_postpone_intr(struct virtqueue *vq)
virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint)
{
uint16_t ndesc, avail_idx;
/*
* Request the next interrupt be postponed until at least half
* of the available descriptors have been consumed.
*/
avail_idx = vq->vq_ring.avail->idx;
ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx) / 2;
ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
switch (hint) {
case VQ_POSTPONE_SHORT:
ndesc /= 4;
break;
case VQ_POSTPONE_LONG:
ndesc *= 3 / 4;
break;
case VQ_POSTPONE_EMPTIED:
break;
}
return (vq_ring_enable_interrupt(vq, ndesc));
}
/*
* Note this is only considered a hint to the host.
*/
void
virtqueue_disable_intr(struct virtqueue *vq)
{
/*
* Note this is only considered a hint to the host.
*/
if ((vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) == 0)
if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx -
vq->vq_nentries - 1;
} else
vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}

View File

@ -41,6 +41,16 @@ struct sglist;
/* Device callback for a virtqueue interrupt. */
typedef void virtqueue_intr_t(void *);
/*
* Hint on how long the next interrupt should be postponed. This is
* only used when the EVENT_IDX feature is negotiated.
*/
typedef enum {
VQ_POSTPONE_SHORT,
VQ_POSTPONE_LONG,
VQ_POSTPONE_EMPTIED /* Until all available desc are used. */
} vq_postpone_t;
#define VIRTQUEUE_MAX_NAME_SZ 32
/* One for each virtqueue the device wishes to allocate. */
@ -73,7 +83,7 @@ int virtqueue_reinit(struct virtqueue *vq, uint16_t size);
int virtqueue_intr_filter(struct virtqueue *vq);
void virtqueue_intr(struct virtqueue *vq);
int virtqueue_enable_intr(struct virtqueue *vq);
int virtqueue_postpone_intr(struct virtqueue *vq);
int virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint);
void virtqueue_disable_intr(struct virtqueue *vq);
/* Get physical address of the virtqueue ring. */

View File

@ -2914,11 +2914,92 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
}
/*
* This function is advisory.
* Apply the given advice to the specified range of addresses within the
* given pmap. Depending on the advice, clear the referenced and/or
* modified flags in each mapping and set the mapped page's dirty field.
*/
void
pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
{
pd_entry_t *pde, *pdpe;
pt_entry_t *pte;
vm_offset_t va, va_next;
vm_paddr_t pa;
vm_page_t m;
if (advice != MADV_DONTNEED && advice != MADV_FREE)
return;
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
for (; sva < eva; sva = va_next) {
pdpe = pmap_segmap(pmap, sva);
#ifdef __mips_n64
if (*pdpe == 0) {
va_next = (sva + NBSEG) & ~SEGMASK;
if (va_next < sva)
va_next = eva;
continue;
}
#endif
va_next = (sva + NBPDR) & ~PDRMASK;
if (va_next < sva)
va_next = eva;
pde = pmap_pdpe_to_pde(pdpe, sva);
if (*pde == NULL)
continue;
/*
* Limit our scan to either the end of the va represented
* by the current page table page, or to the end of the
* range being write protected.
*/
if (va_next > eva)
va_next = eva;
va = va_next;
for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
sva += PAGE_SIZE) {
if (!pte_test(pte, PTE_MANAGED | PTE_V)) {
if (va != va_next) {
pmap_invalidate_range(pmap, va, sva);
va = va_next;
}
continue;
}
pa = TLBLO_PTE_TO_PA(*pte);
m = PHYS_TO_VM_PAGE(pa);
m->md.pv_flags &= ~PV_TABLE_REF;
if (pte_test(pte, PTE_D)) {
if (advice == MADV_DONTNEED) {
/*
* Future calls to pmap_is_modified()
* can be avoided by making the page
* dirty now.
*/
vm_page_dirty(m);
} else {
pte_clear(pte, PTE_D);
if (va == va_next)
va = sva;
}
} else {
/*
* Unless PTE_D is set, any TLB entries
* mapping "sva" don't allow write access, so
* they needn't be invalidated.
*/
if (va != va_next) {
pmap_invalidate_range(pmap, va, sva);
va = va_next;
}
}
}
if (va != va_next)
pmap_invalidate_range(pmap, va, sva);
}
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
}
/*

View File

@ -23,14 +23,29 @@
# SUCH DAMAGE.
#
.include <bsd.own.mk>
.PATH: ${.CURDIR}/../../../dev/virtio/network
KMOD= if_vtnet
SRCS= if_vtnet.c
SRCS+= virtio_bus_if.h virtio_if.h
SRCS+= bus_if.h device_if.h
SRCS+= opt_inet.h opt_inet6.h
MFILES= kern/bus_if.m kern/device_if.m \
dev/virtio/virtio_bus_if.m dev/virtio/virtio_if.m
.if !defined(KERNBUILDDIR)
.if ${MK_INET_SUPPORT} != "no"
opt_inet.h:
@echo "#define INET 1" > ${.TARGET}
.endif
.if ${MK_INET6_SUPPORT} != "no"
opt_inet6.h:
@echo "#define INET6 1" > ${.TARGET}
.endif
.endif
.include <bsd.kmod.mk>

View File

@ -86,6 +86,8 @@ DRIVER_MODULE(atibl, vgapci, atibl_driver, atibl_devclass, 0, 0);
static void
atibl_identify(driver_t *driver, device_t parent)
{
if (OF_finddevice("mac-io/backlight") == -1)
return;
if (device_find_child(parent, "backlight", -1) == NULL)
device_add_child(parent, "backlight", -1);
}

View File

@ -82,6 +82,8 @@ DRIVER_MODULE(nvbl, vgapci, nvbl_driver, nvbl_devclass, 0, 0);
static void
nvbl_identify(driver_t *driver, device_t parent)
{
if (OF_finddevice("mac-io/backlight") == -1)
return;
if (device_find_child(parent, "backlight", -1) == NULL)
device_add_child(parent, "backlight", -1);
}

View File

@ -780,7 +780,7 @@ finished:
while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
keg_free_slab(keg, slab, 0);
keg_free_slab(keg, slab, keg->uk_ipers);
}
}