Extend the meaning of the CTLFLAG_TUN flag to automatically check if

there is an environment variable which shall initialize the SYSCTL
during early boot. This works for all SYSCTL types both statically and
dynamically created ones, except for the SYSCTL NODE type and SYSCTLs
which belong to VNETs. A new flag, CTLFLAG_NOFETCH, has been added to
be used in the case a tunable sysctl has a custom initialisation
function allowing the sysctl to still be marked as a tunable. The
kernel SYSCTL API is mostly the same, with a few exceptions for some
special operations like iterating childrens of a static/extern SYSCTL
node. This operation should probably be made into a factored out
common macro, hence some device drivers use this. The reason for
changing the SYSCTL API was the need for a SYSCTL parent OID pointer
and not only the SYSCTL parent OID list pointer in order to quickly
generate the sysctl path. The motivation behind this patch is to avoid
parameter loading cludges inside the OFED driver subsystem. Instead of
adding special code to the OFED driver subsystem to post-load tunables
into dynamically created sysctls, we generalize this in the kernel.

Other changes:
- Corrected a possibly incorrect sysctl name from "hw.cbb.intr_mask"
to "hw.pcic.intr_mask".
- Removed redundant TUNABLE statements throughout the kernel.
- Some minor code rewrites in connection to removing not needed
TUNABLE statements.
- Added a missing SYSCTL_DECL().
- Wrapped two very long lines.
- Avoid malloc()/free() inside sysctl string handling, in case it is
called to initialize a sysctl from a tunable, hence malloc()/free() is
not ready when sysctls from the sysctl dataset are registered.
- Bumped FreeBSD version to indicate SYSCTL API change.

MFC after:	2 weeks
Sponsored by:	Mellanox Technologies
This commit is contained in:
Hans Petter Selasky 2014-06-27 16:33:43 +00:00
parent 04006eabea
commit 3da1cf1e88
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=267961
262 changed files with 799 additions and 1385 deletions

View File

@ -45,9 +45,8 @@ __FBSDID("$FreeBSD$");
#include <machine/nexusvar.h>
int acpi_resume_beep;
TUNABLE_INT("debug.acpi.resume_beep", &acpi_resume_beep);
SYSCTL_INT(_debug_acpi, OID_AUTO, resume_beep, CTLFLAG_RW, &acpi_resume_beep,
0, "Beep the PC speaker when resuming");
SYSCTL_INT(_debug_acpi, OID_AUTO, resume_beep, CTLFLAG_RWTUN,
&acpi_resume_beep, 0, "Beep the PC speaker when resuming");
int acpi_reset_video;
TUNABLE_INT("hw.acpi.reset_video", &acpi_reset_video);

View File

@ -69,7 +69,6 @@ static char *mem_owner_bios = "BIOS";
(((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
static int mtrrs_disabled;
TUNABLE_INT("machdep.disable_mtrrs", &mtrrs_disabled);
SYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RDTUN,
&mtrrs_disabled, 0, "Disable amd64 MTRRs.");

View File

@ -675,8 +675,7 @@ cpu_halt(void)
void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */
static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */
static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
TUNABLE_INT("machdep.idle_mwait", &idle_mwait);
SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RW, &idle_mwait,
SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RWTUN, &idle_mwait,
0, "Use MONITOR/MWAIT for short idle");
#define STATE_RUNNING 0x0

View File

@ -68,8 +68,7 @@ static int watchdog_dontfire = 1;
static int watchdog_timer = -1;
static int watchdog_nmi = 1;
TUNABLE_INT("debug.watchdog", &watchdog_cpu);
SYSCTL_INT(_debug, OID_AUTO, watchdog_nmi, CTLFLAG_RW, &watchdog_nmi, 0,
SYSCTL_INT(_debug, OID_AUTO, watchdog_nmi, CTLFLAG_RWTUN, &watchdog_nmi, 0,
"IPI the boot processor with an NMI to enter the debugger");
static struct callout watchdog_callout;

View File

@ -332,8 +332,8 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD, &pat_works, 1,
"Is page attribute table fully functional?");
static int pg_ps_enabled = 1;
SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN, &pg_ps_enabled, 0,
"Are large page mappings enabled?");
SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
&pg_ps_enabled, 0, "Are large page mappings enabled?");
#define PAT_INDEX_SIZE 8
static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */
@ -368,8 +368,8 @@ static int pmap_flags = PMAP_PDE_SUPERPAGE; /* flags for x86 pmaps */
static struct unrhdr pcid_unr;
static struct mtx pcid_mtx;
int pmap_pcid_enabled = 0;
SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_enabled, CTLFLAG_RDTUN, &pmap_pcid_enabled,
0, "Is TLB Context ID enabled ?");
SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
&pmap_pcid_enabled, 0, "Is TLB Context ID enabled ?");
int invpcid_works = 0;
SYSCTL_INT(_vm_pmap, OID_AUTO, invpcid_works, CTLFLAG_RD, &invpcid_works, 0,
"Is the invpcid instruction available ?");

View File

@ -73,7 +73,6 @@ static void
max_ldt_segment_init(void *arg __unused)
{
TUNABLE_INT_FETCH("machdep.max_ldt_segment", &max_ldt_segment);
if (max_ldt_segment <= 0)
max_ldt_segment = 1;
if (max_ldt_segment > MAX_LD)

View File

@ -143,20 +143,18 @@ static char *trap_msg[] = {
#ifdef KDB
static int kdb_on_nmi = 1;
SYSCTL_INT(_machdep, OID_AUTO, kdb_on_nmi, CTLFLAG_RW,
SYSCTL_INT(_machdep, OID_AUTO, kdb_on_nmi, CTLFLAG_RWTUN,
&kdb_on_nmi, 0, "Go to KDB on NMI");
TUNABLE_INT("machdep.kdb_on_nmi", &kdb_on_nmi);
#endif
static int panic_on_nmi = 1;
SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RWTUN,
&panic_on_nmi, 0, "Panic on NMI");
TUNABLE_INT("machdep.panic_on_nmi", &panic_on_nmi);
static int prot_fault_translation;
SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RW,
SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN,
&prot_fault_translation, 0,
"Select signal to deliver on protection fault");
static int uprintf_signal;
SYSCTL_INT(_machdep, OID_AUTO, uprintf_signal, CTLFLAG_RW,
SYSCTL_INT(_machdep, OID_AUTO, uprintf_signal, CTLFLAG_RWTUN,
&uprintf_signal, 0,
"Print debugging information on trap signal to ctty");

View File

@ -65,7 +65,6 @@ static int pcie_minbus, pcie_maxbus;
static uint32_t pcie_badslots;
static struct mtx pcicfg_mtx;
static int mcfg_enable = 1;
TUNABLE_INT("hw.pci.mcfg", &mcfg_enable);
SYSCTL_INT(_hw_pci, OID_AUTO, mcfg, CTLFLAG_RDTUN, &mcfg_enable, 0,
"Enable support for PCI-e memory mapped config access");

View File

@ -199,7 +199,6 @@ SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
* interrupts disabled.
*/
static int halt_detection_enabled = 1;
TUNABLE_INT("hw.vmm.halt_detection", &halt_detection_enabled);
SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN,
&halt_detection_enabled, 0,
"Halt VM if all vcpus execute HLT with interrupts disabled");

View File

@ -224,10 +224,10 @@ busdma_init(void *dummy)
/*
* This init historically used SI_SUB_VM, but now the init code requires
* malloc(9) using M_DEVBUF memory, which is set up later than SI_SUB_VM, by
* SI_SUB_KMEM and SI_ORDER_SECOND, so we'll go right after that by using
* SI_SUB_KMEM and SI_ORDER_THIRD.
* SI_SUB_KMEM and SI_ORDER_THIRD, so we'll go right after that by using
* SI_SUB_KMEM and SI_ORDER_FOURTH.
*/
SYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_THIRD, busdma_init, NULL);
SYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_FOURTH, busdma_init, NULL);
static __inline int
_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)

View File

@ -276,10 +276,10 @@ busdma_init(void *dummy)
/*
* This init historically used SI_SUB_VM, but now the init code requires
* malloc(9) using M_DEVBUF memory, which is set up later than SI_SUB_VM, by
* SI_SUB_KMEM and SI_ORDER_SECOND, so we'll go right after that by using
* SI_SUB_KMEM and SI_ORDER_THIRD.
* SI_SUB_KMEM and SI_ORDER_THIRD, so we'll go right after that by using
* SI_SUB_KMEM and SI_ORDER_FOURTH.
*/
SYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_THIRD, busdma_init, NULL);
SYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_FOURTH, busdma_init, NULL);
/*
* End block of code useful to transplant to other implementations.

View File

@ -50,8 +50,7 @@ __FBSDID("$FreeBSD$");
CTASSERT(sizeof(struct kerneldumpheader) == 512);
int do_minidump = 1;
TUNABLE_INT("debug.minidump", &do_minidump);
SYSCTL_INT(_debug, OID_AUTO, minidump, CTLFLAG_RW, &do_minidump, 0,
SYSCTL_INT(_debug, OID_AUTO, minidump, CTLFLAG_RWTUN, &do_minidump, 0,
"Enable mini crash dumps");
/*

View File

@ -65,8 +65,8 @@ static platform_t plat_obj;
static struct kobj_ops plat_kernel_kops;
static struct platform_kobj plat_kernel_obj;
static char plat_name[64] = "";
SYSCTL_STRING(_hw, OID_AUTO, platform, CTLFLAG_RDTUN, plat_name, 0,
static char plat_name[64];
SYSCTL_STRING(_hw, OID_AUTO, platform, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, plat_name, 0,
"Platform currently in use");
/*

View File

@ -465,7 +465,7 @@ static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
/* Superpages utilization enabled = 1 / disabled = 0 */
static int sp_enabled = 1;
SYSCTL_INT(_vm_pmap, OID_AUTO, sp_enabled, CTLFLAG_RDTUN, &sp_enabled, 0,
SYSCTL_INT(_vm_pmap, OID_AUTO, sp_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &sp_enabled, 0,
"Are large page mappings enabled?");
SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,

View File

@ -375,12 +375,12 @@ cpufreq_initialize(struct imx6_anatop_softc *sc)
"CPU frequency");
SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_imx6),
OID_AUTO, "cpu_minmhz", CTLTYPE_INT | CTLFLAG_RWTUN, sc, 0,
cpufreq_sysctl_minmhz, "IU", "Minimum CPU frequency");
OID_AUTO, "cpu_minmhz", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
sc, 0, cpufreq_sysctl_minmhz, "IU", "Minimum CPU frequency");
SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_imx6),
OID_AUTO, "cpu_maxmhz", CTLTYPE_INT | CTLFLAG_RWTUN, sc, 0,
cpufreq_sysctl_maxmhz, "IU", "Maximum CPU frequency");
OID_AUTO, "cpu_maxmhz", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
sc, 0, cpufreq_sysctl_maxmhz, "IU", "Maximum CPU frequency");
SYSCTL_ADD_INT(NULL, SYSCTL_STATIC_CHILDREN(_hw_imx6),
OID_AUTO, "cpu_maxmhz_hw", CTLFLAG_RD, &sc->cpu_maxmhz_hw, 0,
@ -413,9 +413,6 @@ cpufreq_initialize(struct imx6_anatop_softc *sc)
sc->cpu_maxmhz_hw = imx6_ocotp_mhz_tab[cfg3speed];
sc->cpu_maxmhz = sc->cpu_maxmhz_hw;
TUNABLE_INT_FETCH("hw.imx6.cpu_overclock_enable",
&sc->cpu_overclock_enable);
TUNABLE_INT_FETCH("hw.imx6.cpu_minmhz", &sc->cpu_minmhz);
op = cpufreq_nearest_oppt(sc, sc->cpu_minmhz);
sc->cpu_minmhz = op->mhz;

View File

@ -255,9 +255,8 @@ static SYSCTL_NODE(_hw, OID_AUTO, npe, CTLFLAG_RD, 0,
"IXP4XX NPE driver parameters");
static int npe_debug = 0;
SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RW, &npe_debug,
SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RWTUN, &npe_debug,
0, "IXP4XX NPE network interface debug msgs");
TUNABLE_INT("hw.npe.debug", &npe_debug);
#define DPRINTF(sc, fmt, ...) do { \
if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__); \
} while (0)
@ -265,18 +264,15 @@ TUNABLE_INT("hw.npe.debug", &npe_debug);
if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\
} while (0)
static int npe_tickinterval = 3; /* npe_tick frequency (secs) */
SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RD, &npe_tickinterval,
SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RDTUN, &npe_tickinterval,
0, "periodic work interval (secs)");
TUNABLE_INT("hw.npe.tickinterval", &npe_tickinterval);
static int npe_rxbuf = 64; /* # rx buffers to allocate */
SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RD, &npe_rxbuf,
SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RDTUN, &npe_rxbuf,
0, "rx buffers allocated");
TUNABLE_INT("hw.npe.rxbuf", &npe_rxbuf);
static int npe_txbuf = 128; /* # tx buffers to allocate */
SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RD, &npe_txbuf,
SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RDTUN, &npe_txbuf,
0, "tx buffers allocated");
TUNABLE_INT("hw.npe.txbuf", &npe_txbuf);
static int
unit2npeid(int unit)

View File

@ -181,9 +181,8 @@ typedef struct {
} IxNpeDlNpeMgrStateInfoBlock;
static int npe_debug = 0;
SYSCTL_INT(_debug, OID_AUTO, ixp425npe, CTLFLAG_RW, &npe_debug,
SYSCTL_INT(_debug, OID_AUTO, ixp425npe, CTLFLAG_RWTUN, &npe_debug,
0, "IXP4XX NPE debug msgs");
TUNABLE_INT("debug.ixp425npe", &npe_debug);
#define DPRINTF(dev, fmt, ...) do { \
if (npe_debug) device_printf(dev, fmt, __VA_ARGS__); \
} while (0)

View File

@ -159,10 +159,9 @@ struct ixpqmgr_softc {
uint32_t aqmFreeSramAddress; /* SRAM free space */
};
static int qmgr_debug = 0;
SYSCTL_INT(_debug, OID_AUTO, qmgr, CTLFLAG_RW, &qmgr_debug,
static int qmgr_debug;
SYSCTL_INT(_debug, OID_AUTO, qmgr, CTLFLAG_RWTUN, &qmgr_debug,
0, "IXP4XX Q-Manager debug msgs");
TUNABLE_INT("debug.qmgr", &qmgr_debug);
#define DPRINTF(dev, fmt, ...) do { \
if (qmgr_debug) printf(fmt, __VA_ARGS__); \
} while (0)

View File

@ -546,30 +546,22 @@ static int ada_write_cache = ADA_DEFAULT_WRITE_CACHE;
static SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
"CAM Direct Access Disk driver");
SYSCTL_INT(_kern_cam_ada, OID_AUTO, legacy_aliases, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_ada, OID_AUTO, legacy_aliases, CTLFLAG_RWTUN,
&ada_legacy_aliases, 0, "Create legacy-like device aliases");
TUNABLE_INT("kern.cam.ada.legacy_aliases", &ada_legacy_aliases);
SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RWTUN,
&ada_retry_count, 0, "Normal I/O retry count");
TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count);
SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RWTUN,
&ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout);
SYSCTL_INT(_kern_cam_ada, OID_AUTO, send_ordered, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_ada, OID_AUTO, send_ordered, CTLFLAG_RWTUN,
&ada_send_ordered, 0, "Send Ordered Tags");
TUNABLE_INT("kern.cam.ada.send_ordered", &ada_send_ordered);
SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RWTUN,
&ada_spindown_shutdown, 0, "Spin down upon shutdown");
TUNABLE_INT("kern.cam.ada.spindown_shutdown", &ada_spindown_shutdown);
SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_suspend, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_suspend, CTLFLAG_RWTUN,
&ada_spindown_suspend, 0, "Spin down upon suspend");
TUNABLE_INT("kern.cam.ada.spindown_suspend", &ada_spindown_suspend);
SYSCTL_INT(_kern_cam_ada, OID_AUTO, read_ahead, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_ada, OID_AUTO, read_ahead, CTLFLAG_RWTUN,
&ada_read_ahead, 0, "Enable disk read-ahead");
TUNABLE_INT("kern.cam.ada.read_ahead", &ada_read_ahead);
SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RWTUN,
&ada_write_cache, 0, "Enable disk write cache");
TUNABLE_INT("kern.cam.ada.write_cache", &ada_write_cache);
/*
* ADA_ORDEREDTAG_INTERVAL determines how often, relative

View File

@ -139,15 +139,12 @@ static int pmp_hide_special = PMP_DEFAULT_HIDE_SPECIAL;
static SYSCTL_NODE(_kern_cam, OID_AUTO, pmp, CTLFLAG_RD, 0,
"CAM Direct Access Disk driver");
SYSCTL_INT(_kern_cam_pmp, OID_AUTO, retry_count, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_pmp, OID_AUTO, retry_count, CTLFLAG_RWTUN,
&pmp_retry_count, 0, "Normal I/O retry count");
TUNABLE_INT("kern.cam.pmp.retry_count", &pmp_retry_count);
SYSCTL_INT(_kern_cam_pmp, OID_AUTO, default_timeout, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_pmp, OID_AUTO, default_timeout, CTLFLAG_RWTUN,
&pmp_default_timeout, 0, "Normal I/O timeout (in seconds)");
TUNABLE_INT("kern.cam.pmp.default_timeout", &pmp_default_timeout);
SYSCTL_INT(_kern_cam_pmp, OID_AUTO, hide_special, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_pmp, OID_AUTO, hide_special, CTLFLAG_RWTUN,
&pmp_hide_special, 0, "Hide extra ports");
TUNABLE_INT("kern.cam.pmp.hide_special", &pmp_hide_special);
static struct periph_driver pmpdriver =
{

View File

@ -116,7 +116,6 @@ SYSCTL_NODE(_kern, OID_AUTO, cam, CTLFLAG_RD, 0, "CAM Subsystem");
#endif
int cam_sort_io_queues = CAM_DEFAULT_SORT_IO_QUEUES;
TUNABLE_INT("kern.cam.sort_io_queues", &cam_sort_io_queues);
SYSCTL_INT(_kern_cam, OID_AUTO, sort_io_queues, CTLFLAG_RWTUN,
&cam_sort_io_queues, 0, "Sort IO queues to try and optimise disk access patterns");
#endif

View File

@ -149,7 +149,6 @@ typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
/* Transport layer configuration information */
static struct xpt_softc xsoftc;
TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay);
SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
&xsoftc.boot_delay, 0, "Bus registration wait time");
@ -163,7 +162,6 @@ static struct cam_doneq cam_doneqs[MAXCPU];
static int cam_num_doneqs;
static struct proc *cam_proc;
TUNABLE_INT("kern.cam.num_doneqs", &cam_num_doneqs);
SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
&cam_num_doneqs, 0, "Number of completion queues/threads");
@ -197,12 +195,10 @@ static struct cdevsw xpt_cdevsw = {
/* Storage for debugging datastructures */
struct cam_path *cam_dpath;
u_int32_t cam_dflags = CAM_DEBUG_FLAGS;
TUNABLE_INT("kern.cam.dflags", &cam_dflags);
SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RW,
SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN,
&cam_dflags, 0, "Enabled debug flags");
u_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
TUNABLE_INT("kern.cam.debug_delay", &cam_debug_delay);
SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RW,
SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN,
&cam_debug_delay, 0, "Delay in us after each debug message");
/* Our boot-time initialization hook */

View File

@ -308,11 +308,9 @@ static int index_to_aps_page;
SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer");
static int worker_threads = -1;
TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads);
SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN,
&worker_threads, 1, "Number of worker threads");
static int verbose = 0;
TUNABLE_INT("kern.cam.ctl.verbose", &verbose);
SYSCTL_INT(_kern_cam_ctl, OID_AUTO, verbose, CTLFLAG_RWTUN,
&verbose, 0, "Show SCSI errors returned to initiator");

View File

@ -220,10 +220,9 @@ struct ctl_be_block_io {
};
static int cbb_num_threads = 14;
TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads);
SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0,
"CAM Target Layer Block Backend");
SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RWTUN,
&cbb_num_threads, 0, "Number of threads per backing file");
static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc);

View File

@ -85,19 +85,15 @@ static uma_zone_t cfiscsi_data_wait_zone;
SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, iscsi, CTLFLAG_RD, 0,
"CAM Target Layer iSCSI Frontend");
static int debug = 3;
TUNABLE_INT("kern.cam.ctl.iscsi.debug", &debug);
SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, debug, CTLFLAG_RWTUN,
&debug, 1, "Enable debug messages");
static int ping_timeout = 5;
TUNABLE_INT("kern.cam.ctl.iscsi.ping_timeout", &ping_timeout);
SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, ping_timeout, CTLFLAG_RWTUN,
&ping_timeout, 5, "Interval between ping (NOP-Out) requests, in seconds");
static int login_timeout = 60;
TUNABLE_INT("kern.cam.ctl.iscsi.login_timeout", &login_timeout);
SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, login_timeout, CTLFLAG_RWTUN,
&login_timeout, 60, "Time to wait for ctld(8) to finish Login Phase, in seconds");
static int maxcmdsn_delta = 256;
TUNABLE_INT("kern.cam.ctl.iscsi.maxcmdsn_delta", &maxcmdsn_delta);
SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, maxcmdsn_delta, CTLFLAG_RWTUN,
&maxcmdsn_delta, 256, "Number of commands the initiator can send "
"without confirmation");

View File

@ -277,15 +277,12 @@ static int cd_retry_count = CD_DEFAULT_RETRY;
static int cd_timeout = CD_DEFAULT_TIMEOUT;
static SYSCTL_NODE(_kern_cam, OID_AUTO, cd, CTLFLAG_RD, 0, "CAM CDROM driver");
SYSCTL_INT(_kern_cam_cd, OID_AUTO, poll_period, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_cd, OID_AUTO, poll_period, CTLFLAG_RWTUN,
&cd_poll_period, 0, "Media polling period in seconds");
TUNABLE_INT("kern.cam.cd.poll_period", &cd_poll_period);
SYSCTL_INT(_kern_cam_cd, OID_AUTO, retry_count, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_cd, OID_AUTO, retry_count, CTLFLAG_RWTUN,
&cd_retry_count, 0, "Normal I/O retry count");
TUNABLE_INT("kern.cam.cd.retry_count", &cd_retry_count);
SYSCTL_INT(_kern_cam_cd, OID_AUTO, timeout, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_cd, OID_AUTO, timeout, CTLFLAG_RWTUN,
&cd_timeout, 0, "Timeout, in us, for read operations");
TUNABLE_INT("kern.cam.cd.timeout", &cd_timeout);
static MALLOC_DEFINE(M_SCSICD, "scsi_cd", "scsi_cd buffers");

View File

@ -1188,18 +1188,14 @@ static int da_send_ordered = DA_DEFAULT_SEND_ORDERED;
static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0,
"CAM Direct Access Disk driver");
SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN,
&da_poll_period, 0, "Media polling period in seconds");
TUNABLE_INT("kern.cam.da.poll_period", &da_poll_period);
SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN,
&da_retry_count, 0, "Normal I/O retry count");
TUNABLE_INT("kern.cam.da.retry_count", &da_retry_count);
SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN,
&da_default_timeout, 0, "Normal I/O timeout (in seconds)");
TUNABLE_INT("kern.cam.da.default_timeout", &da_default_timeout);
SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN,
&da_send_ordered, 0, "Send Ordered Tags");
TUNABLE_INT("kern.cam.da.send_ordered", &da_send_ordered);
/*
* DA_ORDEREDTAG_INTERVAL determines how often, relative

View File

@ -226,9 +226,8 @@ static char *safte_2little = "Too Little Data Returned (%d) at line %d\n";
int emulate_array_devices = 1;
SYSCTL_DECL(_kern_cam_enc);
SYSCTL_INT(_kern_cam_enc, OID_AUTO, emulate_array_devices, CTLFLAG_RW,
SYSCTL_INT(_kern_cam_enc, OID_AUTO, emulate_array_devices, CTLFLAG_RWTUN,
&emulate_array_devices, 0, "Emulate Array Devices for SAF-TE");
TUNABLE_INT("kern.cam.enc.emulate_array_devices", &emulate_array_devices);
static int
safte_fill_read_buf_io(enc_softc_t *enc, struct enc_fsm_state *state,

View File

@ -445,9 +445,10 @@ static int sa_allow_io_split = SA_DEFAULT_IO_SPLIT;
* is bad behavior, because it hides the true tape block size from the
* application.
*/
TUNABLE_INT("kern.cam.sa.allow_io_split", &sa_allow_io_split);
static SYSCTL_NODE(_kern_cam, OID_AUTO, sa, CTLFLAG_RD, 0,
"CAM Sequential Access Tape Driver");
SYSCTL_INT(_kern_cam_sa, OID_AUTO, allow_io_split, CTLFLAG_RDTUN,
&sa_allow_io_split, 0, "Default I/O split value");
static struct periph_driver sadriver =
{
@ -1494,7 +1495,7 @@ sasysctlinit(void *context, int pending)
goto bailout;
SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
OID_AUTO, "allow_io_split", CTLTYPE_INT | CTLFLAG_RDTUN,
OID_AUTO, "allow_io_split", CTLTYPE_INT | CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
&softc->allow_io_split, 0, "Allow Splitting I/O");
SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
OID_AUTO, "maxio", CTLTYPE_INT | CTLFLAG_RD,

View File

@ -78,9 +78,8 @@ struct scsi_quirk_entry {
#define SCSI_QUIRK(dev) ((struct scsi_quirk_entry *)((dev)->quirk))
static int cam_srch_hi = 0;
TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT | CTLFLAG_RWTUN, 0, 0,
sysctl_cam_search_luns, "I",
"allow search above LUN 7 for SCSI3 and greater devices");

View File

@ -36,7 +36,7 @@ __FBSDID("$FreeBSD$");
static MALLOC_DEFINE(M_KSTAT, "kstat_data", "Kernel statistics");
SYSCTL_NODE(, OID_AUTO, kstat, CTLFLAG_RW, 0, "Kernel statistics");
SYSCTL_ROOT_NODE(OID_AUTO, kstat, CTLFLAG_RW, 0, "Kernel statistics");
kstat_t *
kstat_create(char *module, int instance, char *name, char *class, uchar_t type,

View File

@ -204,8 +204,6 @@ int zfs_arc_shrink_shift = 0;
int zfs_arc_p_min_shift = 0;
int zfs_disable_dup_eviction = 0;
TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max);
TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min);
TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
SYSCTL_DECL(_vfs_zfs);
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0,

View File

@ -44,8 +44,7 @@ int zfs_dedup_prefetch = 1;
SYSCTL_DECL(_vfs_zfs);
SYSCTL_NODE(_vfs_zfs, OID_AUTO, dedup, CTLFLAG_RW, 0, "ZFS DEDUP");
TUNABLE_INT("vfs.zfs.dedup.prefetch", &zfs_dedup_prefetch);
SYSCTL_INT(_vfs_zfs_dedup, OID_AUTO, prefetch, CTLFLAG_RW, &zfs_dedup_prefetch,
SYSCTL_INT(_vfs_zfs_dedup, OID_AUTO, prefetch, CTLFLAG_RWTUN, &zfs_dedup_prefetch,
0, "Enable/disable prefetching of dedup-ed blocks which are going to be freed");
static const ddt_ops_t *ddt_ops[DDT_TYPES] = {

View File

@ -54,7 +54,6 @@
*/
int zfs_nopwrite_enabled = 1;
SYSCTL_DECL(_vfs_zfs);
TUNABLE_INT("vfs.zfs.nopwrite_enabled", &zfs_nopwrite_enabled);
SYSCTL_INT(_vfs_zfs, OID_AUTO, nopwrite_enabled, CTLFLAG_RDTUN,
&zfs_nopwrite_enabled, 0, "Enable nopwrite feature");
@ -1626,8 +1625,7 @@ dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
}
int zfs_mdcomp_disable = 0;
TUNABLE_INT("vfs.zfs.mdcomp_disable", &zfs_mdcomp_disable);
SYSCTL_INT(_vfs_zfs, OID_AUTO, mdcomp_disable, CTLFLAG_RW,
SYSCTL_INT(_vfs_zfs, OID_AUTO, mdcomp_disable, CTLFLAG_RWTUN,
&zfs_mdcomp_disable, 0, "Disable metadata compression");
/*

View File

@ -55,16 +55,12 @@ SYSCTL_DECL(_vfs_zfs);
SYSCTL_INT(_vfs_zfs, OID_AUTO, prefetch_disable, CTLFLAG_RW,
&zfs_prefetch_disable, 0, "Disable prefetch");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zfetch, CTLFLAG_RW, 0, "ZFS ZFETCH");
TUNABLE_INT("vfs.zfs.zfetch.max_streams", &zfetch_max_streams);
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_streams, CTLFLAG_RW,
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_streams, CTLFLAG_RWTUN,
&zfetch_max_streams, 0, "Max # of streams per zfetch");
TUNABLE_INT("vfs.zfs.zfetch.min_sec_reap", &zfetch_min_sec_reap);
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, min_sec_reap, CTLFLAG_RDTUN,
&zfetch_min_sec_reap, 0, "Min time before stream reclaim");
TUNABLE_INT("vfs.zfs.zfetch.block_cap", &zfetch_block_cap);
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, block_cap, CTLFLAG_RDTUN,
&zfetch_block_cap, 0, "Max number of blocks to fetch at a time");
TUNABLE_QUAD("vfs.zfs.zfetch.array_rd_sz", &zfetch_array_rd_sz);
SYSCTL_UQUAD(_vfs_zfs_zfetch, OID_AUTO, array_rd_sz, CTLFLAG_RDTUN,
&zfetch_array_rd_sz, 0,
"Number of bytes in a array_read at which we stop prefetching");

View File

@ -141,23 +141,19 @@ extern int zfs_vdev_async_write_active_max_dirty_percent;
SYSCTL_DECL(_vfs_zfs);
TUNABLE_QUAD("vfs.zfs.dirty_data_max", &zfs_dirty_data_max);
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, dirty_data_max, CTLFLAG_RWTUN,
&zfs_dirty_data_max, 0,
"The maximum amount of dirty data in bytes after which new writes are "
"halted until space becomes available");
TUNABLE_QUAD("vfs.zfs.dirty_data_max_max", &zfs_dirty_data_max_max);
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, dirty_data_max_max, CTLFLAG_RDTUN,
&zfs_dirty_data_max_max, 0,
"The absolute cap on dirty_data_max when auto calculating");
TUNABLE_INT("vfs.zfs.dirty_data_max_percent", &zfs_dirty_data_max_percent);
SYSCTL_INT(_vfs_zfs, OID_AUTO, dirty_data_max_percent, CTLFLAG_RDTUN,
&zfs_dirty_data_max_percent, 0,
"The percent of physical memory used to auto calculate dirty_data_max");
TUNABLE_QUAD("vfs.zfs.dirty_data_sync", &zfs_dirty_data_sync);
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, dirty_data_sync, CTLFLAG_RWTUN,
&zfs_dirty_data_sync, 0,
"Force a txg if the number of dirty buffer bytes exceed this value");

View File

@ -71,32 +71,23 @@ boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable srub prefetching */
SYSCTL_DECL(_vfs_zfs);
TUNABLE_INT("vfs.zfs.top_maxinflight", &zfs_top_maxinflight);
SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight, CTLFLAG_RW,
SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight, CTLFLAG_RWTUN,
&zfs_top_maxinflight, 0, "Maximum I/Os per top-level vdev");
TUNABLE_INT("vfs.zfs.resilver_delay", &zfs_resilver_delay);
SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_delay, CTLFLAG_RW,
SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_delay, CTLFLAG_RWTUN,
&zfs_resilver_delay, 0, "Number of ticks to delay resilver");
TUNABLE_INT("vfs.zfs.scrub_delay", &zfs_scrub_delay);
SYSCTL_UINT(_vfs_zfs, OID_AUTO, scrub_delay, CTLFLAG_RW,
SYSCTL_UINT(_vfs_zfs, OID_AUTO, scrub_delay, CTLFLAG_RWTUN,
&zfs_scrub_delay, 0, "Number of ticks to delay scrub");
TUNABLE_INT("vfs.zfs.scan_idle", &zfs_scan_idle);
SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_idle, CTLFLAG_RW,
SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_idle, CTLFLAG_RWTUN,
&zfs_scan_idle, 0, "Idle scan window in clock ticks");
TUNABLE_INT("vfs.zfs.scan_min_time_ms", &zfs_scan_min_time_ms);
SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_min_time_ms, CTLFLAG_RW,
SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_min_time_ms, CTLFLAG_RWTUN,
&zfs_scan_min_time_ms, 0, "Min millisecs to scrub per txg");
TUNABLE_INT("vfs.zfs.free_min_time_ms", &zfs_free_min_time_ms);
SYSCTL_UINT(_vfs_zfs, OID_AUTO, free_min_time_ms, CTLFLAG_RW,
SYSCTL_UINT(_vfs_zfs, OID_AUTO, free_min_time_ms, CTLFLAG_RWTUN,
&zfs_free_min_time_ms, 0, "Min millisecs to free per txg");
TUNABLE_INT("vfs.zfs.resilver_min_time_ms", &zfs_resilver_min_time_ms);
SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_min_time_ms, CTLFLAG_RW,
SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_min_time_ms, CTLFLAG_RWTUN,
&zfs_resilver_min_time_ms, 0, "Min millisecs to resilver per txg");
TUNABLE_INT("vfs.zfs.no_scrub_io", &zfs_no_scrub_io);
SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_io, CTLFLAG_RW,
SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_io, CTLFLAG_RWTUN,
&zfs_no_scrub_io, 0, "Disable scrub I/O");
TUNABLE_INT("vfs.zfs.no_scrub_prefetch", &zfs_no_scrub_prefetch);
SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_prefetch, CTLFLAG_RW,
SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_prefetch, CTLFLAG_RWTUN,
&zfs_no_scrub_prefetch, 0, "Disable scrub prefetching");
enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;

View File

@ -55,7 +55,6 @@ SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW, 0, "ZFS metaslab");
uint64_t metaslab_aliquot = 512ULL << 10;
uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
TUNABLE_QUAD("vfs.zfs.metaslab.gang_bang", &metaslab_gang_bang);
SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, gang_bang, CTLFLAG_RWTUN,
&metaslab_gang_bang, 0,
"Force gang block allocation for blocks larger than or equal to this value");
@ -67,7 +66,6 @@ SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, gang_bang, CTLFLAG_RWTUN,
* Values should be greater than or equal to 100.
*/
int zfs_condense_pct = 200;
TUNABLE_INT("vfs.zfs.condense_pct", &zfs_condense_pct);
SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct, CTLFLAG_RWTUN,
&zfs_condense_pct, 0,
"Condense on-disk spacemap when it is more than this many percents"
@ -87,7 +85,6 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct, CTLFLAG_RWTUN,
* no metaslab group will be excluded based on this criterion.
*/
int zfs_mg_noalloc_threshold = 0;
TUNABLE_INT("vfs.zfs.mg_noalloc_threshold", &zfs_mg_noalloc_threshold);
SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_noalloc_threshold, CTLFLAG_RWTUN,
&zfs_mg_noalloc_threshold, 0,
"Percentage of metaslab group size that should be free"
@ -97,7 +94,6 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_noalloc_threshold, CTLFLAG_RWTUN,
* When set will load all metaslabs when pool is first opened.
*/
int metaslab_debug_load = 0;
TUNABLE_INT("vfs.zfs.metaslab.debug_load", &metaslab_debug_load);
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_load, CTLFLAG_RWTUN,
&metaslab_debug_load, 0,
"Load all metaslabs when pool is first opened");
@ -106,7 +102,6 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_load, CTLFLAG_RWTUN,
* When set will prevent metaslabs from being unloaded.
*/
int metaslab_debug_unload = 0;
TUNABLE_INT("vfs.zfs.metaslab.debug_unload", &metaslab_debug_unload);
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_unload, CTLFLAG_RWTUN,
&metaslab_debug_unload, 0,
"Prevent metaslabs from being unloaded");
@ -118,8 +113,6 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_unload, CTLFLAG_RWTUN,
* aggressive strategy (i.e search by size rather than offset).
*/
uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE;
TUNABLE_QUAD("vfs.zfs.metaslab.df_alloc_threshold",
&metaslab_df_alloc_threshold);
SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN,
&metaslab_df_alloc_threshold, 0,
"Minimum size which forces the dynamic allocator to change it's allocation strategy");
@ -131,27 +124,25 @@ SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN,
* switch to using best-fit allocations.
*/
int metaslab_df_free_pct = 4;
TUNABLE_INT("vfs.zfs.metaslab.df_free_pct", &metaslab_df_free_pct);
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, CTLFLAG_RWTUN,
&metaslab_df_free_pct, 0,
"The minimum free space, in percent, which must be available in a space map to continue allocations in a first-fit fashion");
"The minimum free space, in percent, which must be available in a "
"space map to continue allocations in a first-fit fashion");
/*
* A metaslab is considered "free" if it contains a contiguous
* segment which is greater than metaslab_min_alloc_size.
*/
uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
TUNABLE_QUAD("vfs.zfs.metaslab.min_alloc_size",
&metaslab_min_alloc_size);
SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, min_alloc_size, CTLFLAG_RWTUN,
&metaslab_min_alloc_size, 0,
"A metaslab is considered \"free\" if it contains a contiguous segment which is greater than vfs.zfs.metaslab.min_alloc_size");
"A metaslab is considered \"free\" if it contains a contiguous "
"segment which is greater than vfs.zfs.metaslab.min_alloc_size");
/*
* Percentage of all cpus that can be used by the metaslab taskq.
*/
int metaslab_load_pct = 50;
TUNABLE_INT("vfs.zfs.metaslab.load_pct", &metaslab_load_pct);
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct, CTLFLAG_RWTUN,
&metaslab_load_pct, 0,
"Percentage of cpus that can be used by the metaslab taskq");
@ -162,7 +153,6 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct, CTLFLAG_RWTUN,
* keep it loaded.
*/
int metaslab_unload_delay = TXG_SIZE * 2;
TUNABLE_INT("vfs.zfs.metaslab.unload_delay", &metaslab_unload_delay);
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, unload_delay, CTLFLAG_RWTUN,
&metaslab_unload_delay, 0,
"Number of TXGs that an unused metaslab can be kept in memory");
@ -173,13 +163,11 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, unload_delay, CTLFLAG_RWTUN,
boolean_t zfs_write_to_degraded = B_FALSE;
SYSCTL_INT(_vfs_zfs, OID_AUTO, write_to_degraded, CTLFLAG_RWTUN,
&zfs_write_to_degraded, 0, "Allow writing data to degraded vdevs");
TUNABLE_INT("vfs.zfs.write_to_degraded", &zfs_write_to_degraded);
/*
* Max number of metaslabs per group to preload.
*/
int metaslab_preload_limit = SPA_DVAS_PER_BP;
TUNABLE_INT("vfs.zfs.metaslab.preload_limit", &metaslab_preload_limit);
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, CTLFLAG_RWTUN,
&metaslab_preload_limit, 0,
"Max number of metaslabs per group to preload");
@ -188,7 +176,6 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, CTLFLAG_RWTUN,
* Enable/disable preloading of metaslab.
*/
boolean_t metaslab_preload_enabled = B_TRUE;
TUNABLE_INT("vfs.zfs.metaslab.preload_enabled", &metaslab_preload_enabled);
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_enabled, CTLFLAG_RWTUN,
&metaslab_preload_enabled, 0,
"Max number of metaslabs per group to preload");
@ -197,8 +184,6 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_enabled, CTLFLAG_RWTUN,
* Enable/disable additional weight factor for each metaslab.
*/
boolean_t metaslab_weight_factor_enable = B_FALSE;
TUNABLE_INT("vfs.zfs.metaslab.weight_factor_enable",
&metaslab_weight_factor_enable);
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, weight_factor_enable, CTLFLAG_RWTUN,
&metaslab_weight_factor_enable, 0,
"Enable additional weight factor for each metaslab");

View File

@ -84,8 +84,7 @@
static int check_hostid = 1;
SYSCTL_DECL(_vfs_zfs);
TUNABLE_INT("vfs.zfs.check_hostid", &check_hostid);
SYSCTL_INT(_vfs_zfs, OID_AUTO, check_hostid, CTLFLAG_RW, &check_hostid, 0,
SYSCTL_INT(_vfs_zfs, OID_AUTO, check_hostid, CTLFLAG_RWTUN, &check_hostid, 0,
"Check hostid on import?");
/*

View File

@ -244,7 +244,6 @@ int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA);
int zfs_flags = 0;
#endif
SYSCTL_DECL(_debug);
TUNABLE_INT("debug.zfs_flags", &zfs_flags);
SYSCTL_INT(_debug, OID_AUTO, zfs_flags, CTLFLAG_RWTUN, &zfs_flags, 0,
"ZFS debug flags.");
@ -257,7 +256,6 @@ SYSCTL_INT(_debug, OID_AUTO, zfs_flags, CTLFLAG_RWTUN, &zfs_flags, 0,
*/
int zfs_recover = 0;
SYSCTL_DECL(_vfs_zfs);
TUNABLE_INT("vfs.zfs.recover", &zfs_recover);
SYSCTL_INT(_vfs_zfs, OID_AUTO, recover, CTLFLAG_RDTUN, &zfs_recover, 0,
"Try to recover from otherwise-fatal errors.");
@ -270,7 +268,6 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, recover, CTLFLAG_RDTUN, &zfs_recover, 0,
* in a system panic.
*/
uint64_t zfs_deadman_synctime_ms = 1000000ULL;
TUNABLE_QUAD("vfs.zfs.deadman_synctime_ms", &zfs_deadman_synctime_ms);
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_synctime_ms, CTLFLAG_RDTUN,
&zfs_deadman_synctime_ms, 0,
"Stalled ZFS I/O expiration time in milliseconds");
@ -280,7 +277,6 @@ SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_synctime_ms, CTLFLAG_RDTUN,
* for hung I/O.
*/
uint64_t zfs_deadman_checktime_ms = 5000ULL;
TUNABLE_QUAD("vfs.zfs.deadman_checktime_ms", &zfs_deadman_checktime_ms);
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_checktime_ms, CTLFLAG_RDTUN,
&zfs_deadman_checktime_ms, 0,
"Period of checks for stalled ZFS I/O in milliseconds");
@ -290,7 +286,6 @@ SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_checktime_ms, CTLFLAG_RDTUN,
* zfs_deadman_init()
*/
int zfs_deadman_enabled = -1;
TUNABLE_INT("vfs.zfs.deadman_enabled", &zfs_deadman_enabled);
SYSCTL_INT(_vfs_zfs, OID_AUTO, deadman_enabled, CTLFLAG_RDTUN,
&zfs_deadman_enabled, 0, "Kernel panic on stalled ZFS I/O");
@ -304,7 +299,6 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, deadman_enabled, CTLFLAG_RDTUN,
* (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
*/
int spa_asize_inflation = 24;
TUNABLE_INT("vfs.zfs.spa_asize_inflation", &spa_asize_inflation);
SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_asize_inflation, CTLFLAG_RWTUN,
&spa_asize_inflation, 0, "Worst case inflation factor for single sector writes");

View File

@ -85,31 +85,22 @@ static u_int trim_vdev_max_pending = 64;
SYSCTL_DECL(_vfs_zfs);
SYSCTL_NODE(_vfs_zfs, OID_AUTO, trim, CTLFLAG_RD, 0, "ZFS TRIM");
TUNABLE_INT("vfs.zfs.trim.txg_delay", &trim_txg_delay);
SYSCTL_UINT(_vfs_zfs_trim, OID_AUTO, txg_delay, CTLFLAG_RWTUN, &trim_txg_delay,
0, "Delay TRIMs by up to this many TXGs");
TUNABLE_INT("vfs.zfs.trim.timeout", &trim_timeout);
SYSCTL_UINT(_vfs_zfs_trim, OID_AUTO, timeout, CTLFLAG_RWTUN, &trim_timeout, 0,
"Delay TRIMs by up to this many seconds");
TUNABLE_INT("vfs.zfs.trim.max_interval", &trim_max_interval);
SYSCTL_UINT(_vfs_zfs_trim, OID_AUTO, max_interval, CTLFLAG_RWTUN,
&trim_max_interval, 0,
"Maximum interval between TRIM queue processing (seconds)");
SYSCTL_DECL(_vfs_zfs_vdev);
TUNABLE_QUAD("vfs.zfs.vdev.trim_max_bytes", &trim_vdev_max_bytes);
SYSCTL_QUAD(_vfs_zfs_vdev, OID_AUTO, trim_max_bytes, CTLFLAG_RWTUN,
&trim_vdev_max_bytes, 0,
"Maximum pending TRIM bytes for a vdev");
TUNABLE_INT("vfs.zfs.vdev.trim_max_pending", &trim_vdev_max_pending);
SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, trim_max_pending, CTLFLAG_RWTUN,
&trim_vdev_max_pending, 0,
"Maximum pending TRIM segments for a vdev");
static void trim_map_vdev_commit_done(spa_t *spa, vdev_t *vd);
static int

View File

@ -112,8 +112,7 @@ int zfs_txg_timeout = 5; /* max seconds worth of delta per txg */
SYSCTL_DECL(_vfs_zfs);
SYSCTL_NODE(_vfs_zfs, OID_AUTO, txg, CTLFLAG_RW, 0, "ZFS TXG");
TUNABLE_INT("vfs.zfs.txg.timeout", &zfs_txg_timeout);
SYSCTL_INT(_vfs_zfs_txg, OID_AUTO, timeout, CTLFLAG_RW, &zfs_txg_timeout, 0,
SYSCTL_INT(_vfs_zfs_txg, OID_AUTO, timeout, CTLFLAG_RWTUN, &zfs_txg_timeout, 0,
"Maximum seconds worth of delta per txg");
/*

View File

@ -90,13 +90,10 @@ int zfs_vdev_cache_bshift = 16;
SYSCTL_DECL(_vfs_zfs_vdev);
SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, cache, CTLFLAG_RW, 0, "ZFS VDEV Cache");
TUNABLE_INT("vfs.zfs.vdev.cache.max", &zfs_vdev_cache_max);
SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, max, CTLFLAG_RDTUN,
&zfs_vdev_cache_max, 0, "Maximum I/O request size that increase read size");
TUNABLE_INT("vfs.zfs.vdev.cache.size", &zfs_vdev_cache_size);
SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, size, CTLFLAG_RDTUN,
&zfs_vdev_cache_size, 0, "Size of VDEV cache");
TUNABLE_INT("vfs.zfs.vdev.cache.bshift", &zfs_vdev_cache_bshift);
SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, bshift, CTLFLAG_RDTUN,
&zfs_vdev_cache_bshift, 0, "Turn too small requests into 1 << this value");

View File

@ -53,14 +53,12 @@ DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev);
SYSCTL_DECL(_vfs_zfs_vdev);
/* Don't send BIO_FLUSH. */
static int vdev_geom_bio_flush_disable = 0;
TUNABLE_INT("vfs.zfs.vdev.bio_flush_disable", &vdev_geom_bio_flush_disable);
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_flush_disable, CTLFLAG_RW,
static int vdev_geom_bio_flush_disable;
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_flush_disable, CTLFLAG_RWTUN,
&vdev_geom_bio_flush_disable, 0, "Disable BIO_FLUSH");
/* Don't send BIO_DELETE. */
static int vdev_geom_bio_delete_disable = 0;
TUNABLE_INT("vfs.zfs.vdev.bio_delete_disable", &vdev_geom_bio_delete_disable);
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_delete_disable, CTLFLAG_RW,
static int vdev_geom_bio_delete_disable;
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_delete_disable, CTLFLAG_RWTUN,
&vdev_geom_bio_delete_disable, 0, "Disable BIO_DELETE");
static void

View File

@ -74,32 +74,26 @@ static SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, mirror, CTLFLAG_RD, 0,
/* Rotating media load calculation configuration. */
static int rotating_inc = 0;
TUNABLE_INT("vfs.zfs.vdev.mirror.rotating_inc", &rotating_inc);
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_inc, CTLFLAG_RW,
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_inc, CTLFLAG_RWTUN,
&rotating_inc, 0, "Rotating media load increment for non-seeking I/O's");
static int rotating_seek_inc = 5;
TUNABLE_INT("vfs.zfs.vdev.mirror.rotating_seek_inc", &rotating_seek_inc);
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_seek_inc, CTLFLAG_RW,
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_seek_inc, CTLFLAG_RWTUN,
&rotating_seek_inc, 0, "Rotating media load increment for seeking I/O's");
static int rotating_seek_offset = 1 * 1024 * 1024;
TUNABLE_INT("vfs.zfs.vdev.mirror.rotating_seek_offset", &rotating_seek_offset);
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_seek_offset, CTLFLAG_RW,
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_seek_offset, CTLFLAG_RWTUN,
&rotating_seek_offset, 0, "Offset in bytes from the last I/O which "
"triggers a reduced rotating media seek increment");
/* Non-rotating media load calculation configuration. */
static int non_rotating_inc = 0;
TUNABLE_INT("vfs.zfs.vdev.mirror.non_rotating_inc", &non_rotating_inc);
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, non_rotating_inc, CTLFLAG_RW,
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, non_rotating_inc, CTLFLAG_RWTUN,
&non_rotating_inc, 0,
"Non-rotating media load increment for non-seeking I/O's");
static int non_rotating_seek_inc = 1;
TUNABLE_INT("vfs.zfs.vdev.mirror.non_rotating_seek_inc",
&non_rotating_seek_inc);
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, non_rotating_seek_inc, CTLFLAG_RW,
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, non_rotating_seek_inc, CTLFLAG_RWTUN,
&non_rotating_seek_inc, 0,
"Non-rotating media load increment for seeking I/O's");

View File

@ -176,23 +176,18 @@ int zfs_vdev_write_gap_limit = 4 << 10;
#ifdef __FreeBSD__
SYSCTL_DECL(_vfs_zfs_vdev);
TUNABLE_INT("vfs.zfs.vdev.max_active", &zfs_vdev_max_active);
SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, max_active, CTLFLAG_RW,
SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, max_active, CTLFLAG_RWTUN,
&zfs_vdev_max_active, 0,
"The maximum number of I/Os of all types active for each device.");
#define ZFS_VDEV_QUEUE_KNOB_MIN(name) \
TUNABLE_INT("vfs.zfs.vdev." #name "_min_active", \
&zfs_vdev_ ## name ## _min_active); \
SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, name ## _min_active, CTLFLAG_RW, \
SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, name ## _min_active, CTLFLAG_RWTUN,\
&zfs_vdev_ ## name ## _min_active, 0, \
"Initial number of I/O requests of type " #name \
" active for each device");
#define ZFS_VDEV_QUEUE_KNOB_MAX(name) \
TUNABLE_INT("vfs.zfs.vdev." #name "_max_active", \
&zfs_vdev_ ## name ## _max_active); \
SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, name ## _max_active, CTLFLAG_RW, \
SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, name ## _max_active, CTLFLAG_RWTUN,\
&zfs_vdev_ ## name ## _max_active, 0, \
"Maximum number of I/O requests of type " #name \
" active for each device");
@ -212,16 +207,13 @@ ZFS_VDEV_QUEUE_KNOB_MAX(trim);
#undef ZFS_VDEV_QUEUE_KNOB
TUNABLE_INT("vfs.zfs.vdev.aggregation_limit", &zfs_vdev_aggregation_limit);
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, aggregation_limit, CTLFLAG_RW,
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, aggregation_limit, CTLFLAG_RWTUN,
&zfs_vdev_aggregation_limit, 0,
"I/O requests are aggregated up to this size");
TUNABLE_INT("vfs.zfs.vdev.read_gap_limit", &zfs_vdev_read_gap_limit);
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, read_gap_limit, CTLFLAG_RW,
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, read_gap_limit, CTLFLAG_RWTUN,
&zfs_vdev_read_gap_limit, 0,
"Acceptable gap between two reads being aggregated");
TUNABLE_INT("vfs.zfs.vdev.write_gap_limit", &zfs_vdev_write_gap_limit);
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, write_gap_limit, CTLFLAG_RW,
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, write_gap_limit, CTLFLAG_RWTUN,
&zfs_vdev_write_gap_limit, 0,
"Acceptable gap between two writes being aggregated");
#endif

View File

@ -195,8 +195,7 @@ CTASSERT(sizeof(zfs_cmd_t) < IOCPARM_MAX);
static int snapshot_list_prefetch;
SYSCTL_DECL(_vfs_zfs);
TUNABLE_INT("vfs.zfs.snapshot_list_prefetch", &snapshot_list_prefetch);
SYSCTL_INT(_vfs_zfs, OID_AUTO, snapshot_list_prefetch, CTLFLAG_RW,
SYSCTL_INT(_vfs_zfs, OID_AUTO, snapshot_list_prefetch, CTLFLAG_RWTUN,
&snapshot_list_prefetch, 0, "Prefetch data when listing snapshots");
static struct cdev *zfsdev;

View File

@ -74,8 +74,7 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, super_owner, CTLFLAG_RW, &zfs_super_owner, 0,
"File system owner can perform privileged operation on his file systems");
int zfs_debug_level;
TUNABLE_INT("vfs.zfs.debug", &zfs_debug_level);
SYSCTL_INT(_vfs_zfs, OID_AUTO, debug, CTLFLAG_RW, &zfs_debug_level, 0,
SYSCTL_INT(_vfs_zfs, OID_AUTO, debug, CTLFLAG_RWTUN, &zfs_debug_level, 0,
"Debug level");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, version, CTLFLAG_RD, 0, "ZFS versions");

View File

@ -70,8 +70,7 @@
*/
int zil_replay_disable = 0;
SYSCTL_DECL(_vfs_zfs);
TUNABLE_INT("vfs.zfs.zil_replay_disable", &zil_replay_disable);
SYSCTL_INT(_vfs_zfs, OID_AUTO, zil_replay_disable, CTLFLAG_RW,
SYSCTL_INT(_vfs_zfs, OID_AUTO, zil_replay_disable, CTLFLAG_RWTUN,
&zil_replay_disable, 0, "Disable intent logging replay");
/*
@ -80,12 +79,10 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, zil_replay_disable, CTLFLAG_RW,
* out-of-order write cache is enabled.
*/
boolean_t zfs_nocacheflush = B_FALSE;
TUNABLE_INT("vfs.zfs.cache_flush_disable", &zfs_nocacheflush);
SYSCTL_INT(_vfs_zfs, OID_AUTO, cache_flush_disable, CTLFLAG_RDTUN,
&zfs_nocacheflush, 0, "Disable cache flush");
boolean_t zfs_trim_enabled = B_TRUE;
SYSCTL_DECL(_vfs_zfs_trim);
TUNABLE_INT("vfs.zfs.trim.enabled", &zfs_trim_enabled);
SYSCTL_INT(_vfs_zfs_trim, OID_AUTO, enabled, CTLFLAG_RDTUN, &zfs_trim_enabled, 0,
"Enable ZFS TRIM");

View File

@ -46,11 +46,9 @@ static int zio_use_uma = 1;
#else
static int zio_use_uma = 0;
#endif
TUNABLE_INT("vfs.zfs.zio.use_uma", &zio_use_uma);
SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, use_uma, CTLFLAG_RDTUN, &zio_use_uma, 0,
"Use uma(9) for ZIO allocations");
static int zio_exclude_metadata = 0;
TUNABLE_INT("vfs.zfs.zio.exclude_metadata", &zio_exclude_metadata);
SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata, CTLFLAG_RDTUN, &zio_exclude_metadata, 0,
"Exclude metadata buffers from dumps as well");
@ -104,15 +102,12 @@ extern vmem_t *zio_alloc_arena;
* regular blocks are not deferred.
*/
int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */
TUNABLE_INT("vfs.zfs.sync_pass_deferred_free", &zfs_sync_pass_deferred_free);
SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_deferred_free, CTLFLAG_RDTUN,
&zfs_sync_pass_deferred_free, 0, "defer frees starting in this pass");
int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */
TUNABLE_INT("vfs.zfs.sync_pass_dont_compress", &zfs_sync_pass_dont_compress);
SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_dont_compress, CTLFLAG_RDTUN,
&zfs_sync_pass_dont_compress, 0, "don't compress starting in this pass");
int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */
TUNABLE_INT("vfs.zfs.sync_pass_rewrite", &zfs_sync_pass_rewrite);
SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_rewrite, CTLFLAG_RDTUN,
&zfs_sync_pass_rewrite, 0, "rewrite new bps starting in this pass");

View File

@ -119,7 +119,6 @@ static uint32_t zvol_minors;
SYSCTL_DECL(_vfs_zfs);
SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME");
static int volmode = ZFS_VOLMODE_GEOM;
TUNABLE_INT("vfs.zfs.vol.mode", &volmode);
SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &volmode, 0,
"Expose as GEOM providers (1), device files (2) or neither");

View File

@ -25,8 +25,7 @@
SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace debug parameters");
int dtrace_debug = 0;
TUNABLE_INT("debug.dtrace.debug", &dtrace_debug);
SYSCTL_INT(_debug_dtrace, OID_AUTO, debug, CTLFLAG_RW, &dtrace_debug, 0, "");
SYSCTL_INT(_debug_dtrace, OID_AUTO, debug, CTLFLAG_RWTUN, &dtrace_debug, 0, "");
/* Report registered DTrace providers. */
static int

View File

@ -89,14 +89,11 @@ extern const char *freebsd32_syscallnames[];
static SYSCTL_NODE(_compat, OID_AUTO, ia32, CTLFLAG_RW, 0, "ia32 mode");
static u_long ia32_maxdsiz = IA32_MAXDSIZ;
SYSCTL_ULONG(_compat_ia32, OID_AUTO, maxdsiz, CTLFLAG_RW, &ia32_maxdsiz, 0, "");
TUNABLE_ULONG("compat.ia32.maxdsiz", &ia32_maxdsiz);
SYSCTL_ULONG(_compat_ia32, OID_AUTO, maxdsiz, CTLFLAG_RWTUN, &ia32_maxdsiz, 0, "");
u_long ia32_maxssiz = IA32_MAXSSIZ;
SYSCTL_ULONG(_compat_ia32, OID_AUTO, maxssiz, CTLFLAG_RW, &ia32_maxssiz, 0, "");
TUNABLE_ULONG("compat.ia32.maxssiz", &ia32_maxssiz);
SYSCTL_ULONG(_compat_ia32, OID_AUTO, maxssiz, CTLFLAG_RWTUN, &ia32_maxssiz, 0, "");
static u_long ia32_maxvmem = IA32_MAXVMEM;
SYSCTL_ULONG(_compat_ia32, OID_AUTO, maxvmem, CTLFLAG_RW, &ia32_maxvmem, 0, "");
TUNABLE_ULONG("compat.ia32.maxvmem", &ia32_maxvmem);
SYSCTL_ULONG(_compat_ia32, OID_AUTO, maxvmem, CTLFLAG_RWTUN, &ia32_maxvmem, 0, "");
struct sysentvec ia32_freebsd_sysvec = {
.sv_size = FREEBSD32_SYS_MAXSYSCALL,

View File

@ -70,12 +70,10 @@ static struct mtx x86bios_lock;
static SYSCTL_NODE(_debug, OID_AUTO, x86bios, CTLFLAG_RD, NULL,
"x86bios debugging");
static int x86bios_trace_call;
TUNABLE_INT("debug.x86bios.call", &x86bios_trace_call);
SYSCTL_INT(_debug_x86bios, OID_AUTO, call, CTLFLAG_RW, &x86bios_trace_call, 0,
SYSCTL_INT(_debug_x86bios, OID_AUTO, call, CTLFLAG_RWTUN, &x86bios_trace_call, 0,
"Trace far function calls");
static int x86bios_trace_int;
TUNABLE_INT("debug.x86bios.int", &x86bios_trace_int);
SYSCTL_INT(_debug_x86bios, OID_AUTO, int, CTLFLAG_RW, &x86bios_trace_int, 0,
SYSCTL_INT(_debug_x86bios, OID_AUTO, int, CTLFLAG_RWTUN, &x86bios_trace_int, 0,
"Trace software interrupt handlers");
#ifdef X86BIOS_NATIVE_VM86

View File

@ -61,7 +61,6 @@ static int aac_pci_probe(device_t dev);
static int aac_pci_attach(device_t dev);
static int aac_enable_msi = 1;
TUNABLE_INT("hw.aac.enable_msi", &aac_enable_msi);
SYSCTL_INT(_hw_aac, OID_AUTO, enable_msi, CTLFLAG_RDTUN, &aac_enable_msi, 0,
"Enable MSI interrupts");

View File

@ -56,7 +56,6 @@ ACPI_MODULE_NAME("SCHEDULE")
* Allow the user to tune the maximum number of tasks we may enqueue.
*/
static int acpi_max_tasks = ACPI_MAX_TASKS;
TUNABLE_INT("debug.acpi.max_tasks", &acpi_max_tasks);
SYSCTL_INT(_debug_acpi, OID_AUTO, max_tasks, CTLFLAG_RDTUN, &acpi_max_tasks,
0, "Maximum acpi tasks");
@ -65,7 +64,6 @@ SYSCTL_INT(_debug_acpi, OID_AUTO, max_tasks, CTLFLAG_RDTUN, &acpi_max_tasks,
* some systems have problems with increased parallelism.
*/
static int acpi_max_threads = ACPI_MAX_THREADS;
TUNABLE_INT("debug.acpi.max_threads", &acpi_max_threads);
SYSCTL_INT(_debug_acpi, OID_AUTO, max_threads, CTLFLAG_RDTUN, &acpi_max_threads,
0, "Maximum acpi threads");

View File

@ -129,7 +129,6 @@ struct acpi_cpu_device {
/* Allow users to ignore processor orders in MADT. */
static int cpu_unordered;
TUNABLE_INT("debug.acpi.cpu_unordered", &cpu_unordered);
SYSCTL_INT(_debug_acpi, OID_AUTO, cpu_unordered, CTLFLAG_RDTUN,
&cpu_unordered, 0,
"Do not use the MADT to match ACPI Processor objects to CPUs.");

View File

@ -181,16 +181,13 @@ ACPI_SERIAL_DECL(ec, "ACPI embedded controller");
static SYSCTL_NODE(_debug_acpi, OID_AUTO, ec, CTLFLAG_RD, NULL, "EC debugging");
static int ec_burst_mode;
TUNABLE_INT("debug.acpi.ec.burst", &ec_burst_mode);
SYSCTL_INT(_debug_acpi_ec, OID_AUTO, burst, CTLFLAG_RW, &ec_burst_mode, 0,
SYSCTL_INT(_debug_acpi_ec, OID_AUTO, burst, CTLFLAG_RWTUN, &ec_burst_mode, 0,
"Enable use of burst mode (faster for nearly all systems)");
static int ec_polled_mode;
TUNABLE_INT("debug.acpi.ec.polled", &ec_polled_mode);
SYSCTL_INT(_debug_acpi_ec, OID_AUTO, polled, CTLFLAG_RW, &ec_polled_mode, 0,
SYSCTL_INT(_debug_acpi_ec, OID_AUTO, polled, CTLFLAG_RWTUN, &ec_polled_mode, 0,
"Force use of polled mode (only if interrupt mode doesn't work)");
static int ec_timeout = EC_TIMEOUT;
TUNABLE_INT("debug.acpi.ec.timeout", &ec_timeout);
SYSCTL_INT(_debug_acpi_ec, OID_AUTO, timeout, CTLFLAG_RW, &ec_timeout,
SYSCTL_INT(_debug_acpi_ec, OID_AUTO, timeout, CTLFLAG_RWTUN, &ec_timeout,
EC_TIMEOUT, "Total time spent waiting for a response (poll+sleep)");
static ACPI_STATUS

View File

@ -92,7 +92,6 @@ static int amr_setup_mbox(struct amr_softc *sc);
static int amr_ccb_map(struct amr_softc *sc);
static u_int amr_force_sg32 = 0;
TUNABLE_INT("hw.amr.force_sg32", &amr_force_sg32);
SYSCTL_DECL(_hw_amr);
SYSCTL_UINT(_hw_amr, OID_AUTO, force_sg32, CTLFLAG_RDTUN, &amr_force_sg32, 0,
"Force the AMR driver to use 32bit scatter gather");

View File

@ -80,9 +80,8 @@ int ata_dma_check_80pin = 1;
/* sysctl vars */
static SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters");
TUNABLE_INT("hw.ata.ata_dma_check_80pin", &ata_dma_check_80pin);
SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin,
CTLFLAG_RW, &ata_dma_check_80pin, 1,
CTLFLAG_RWTUN, &ata_dma_check_80pin, 0,
"Check for 80pin cable before setting ATA DMA mode");
FEATURE(ata_cam, "ATA devices are accessed through the cam(4) driver");

View File

@ -96,9 +96,8 @@ static SYSCTL_NODE(_hw_ath, OID_AUTO, hal, CTLFLAG_RD, 0,
#ifdef AH_DEBUG
int ath_hal_debug = 0;
SYSCTL_INT(_hw_ath_hal, OID_AUTO, debug, CTLFLAG_RW, &ath_hal_debug,
SYSCTL_INT(_hw_ath_hal, OID_AUTO, debug, CTLFLAG_RWTUN, &ath_hal_debug,
0, "Atheros HAL debugging printfs");
TUNABLE_INT("hw.ath.hal.debug", &ath_hal_debug);
#endif /* AH_DEBUG */
static MALLOC_DEFINE(M_ATH_HAL, "ath_hal", "ath hal data");

View File

@ -240,17 +240,14 @@ SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval,
0, "ANI calibration (msecs)");
int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &ath_rxbuf,
0, "rx buffers allocated");
TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RWTUN, &ath_txbuf,
0, "tx buffers allocated");
TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */
SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RW, &ath_txbuf_mgmt,
SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RWTUN, &ath_txbuf_mgmt,
0, "tx (mgmt) buffers allocated");
TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt);
int ath_bstuck_threshold = 4; /* max missed beacons */
SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,

View File

@ -92,9 +92,8 @@ __FBSDID("$FreeBSD$");
uint64_t ath_debug = 0;
SYSCTL_DECL(_hw_ath);
SYSCTL_QUAD(_hw_ath, OID_AUTO, debug, CTLFLAG_RW, &ath_debug,
SYSCTL_QUAD(_hw_ath, OID_AUTO, debug, CTLFLAG_RWTUN, &ath_debug,
0, "control debugging printfs");
TUNABLE_QUAD("hw.ath.debug", &ath_debug);
void
ath_printrxbuf(struct ath_softc *sc, const struct ath_buf *bf,

View File

@ -535,44 +535,37 @@ static SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
/* Allowable values are TRUE or FALSE */
static int bce_verbose = TRUE;
TUNABLE_INT("hw.bce.verbose", &bce_verbose);
SYSCTL_INT(_hw_bce, OID_AUTO, verbose, CTLFLAG_RDTUN, &bce_verbose, 0,
"Verbose output enable/disable");
/* Allowable values are TRUE or FALSE */
static int bce_tso_enable = TRUE;
TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable);
SYSCTL_INT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0,
"TSO Enable/Disable");
/* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
/* ToDo: Add MSI-X support. */
static int bce_msi_enable = 1;
TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable);
SYSCTL_INT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0,
"MSI-X|MSI|INTx selector");
/* Allowable values are 1, 2, 4, 8. */
static int bce_rx_pages = DEFAULT_RX_PAGES;
TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages);
SYSCTL_UINT(_hw_bce, OID_AUTO, rx_pages, CTLFLAG_RDTUN, &bce_rx_pages, 0,
"Receive buffer descriptor pages (1 page = 255 buffer descriptors)");
/* Allowable values are 1, 2, 4, 8. */
static int bce_tx_pages = DEFAULT_TX_PAGES;
TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages);
SYSCTL_UINT(_hw_bce, OID_AUTO, tx_pages, CTLFLAG_RDTUN, &bce_tx_pages, 0,
"Transmit buffer descriptor pages (1 page = 255 buffer descriptors)");
/* Allowable values are TRUE or FALSE. */
static int bce_hdr_split = TRUE;
TUNABLE_INT("hw.bce.hdr_split", &bce_hdr_split);
SYSCTL_UINT(_hw_bce, OID_AUTO, hdr_split, CTLFLAG_RDTUN, &bce_hdr_split, 0,
"Frame header/payload splitting Enable/Disable");
/* Allowable values are TRUE or FALSE. */
static int bce_strict_rx_mtu = FALSE;
TUNABLE_INT("hw.bce.strict_rx_mtu", &bce_strict_rx_mtu);
SYSCTL_UINT(_hw_bce, OID_AUTO, strict_rx_mtu, CTLFLAG_RDTUN,
&bce_strict_rx_mtu, 0,
"Enable/Disable strict RX frame size checking");
@ -585,7 +578,6 @@ static int bce_tx_quick_cons_trip_int = 1;
/* Generate 1 interrupt for every 20 transmit completions. */
static int bce_tx_quick_cons_trip_int = DEFAULT_TX_QUICK_CONS_TRIP_INT;
#endif
TUNABLE_INT("hw.bce.tx_quick_cons_trip_int", &bce_tx_quick_cons_trip_int);
SYSCTL_UINT(_hw_bce, OID_AUTO, tx_quick_cons_trip_int, CTLFLAG_RDTUN,
&bce_tx_quick_cons_trip_int, 0,
"Transmit BD trip point during interrupts");
@ -598,7 +590,6 @@ static int bce_tx_quick_cons_trip = 1;
/* Generate 1 interrupt for every 20 transmit completions. */
static int bce_tx_quick_cons_trip = DEFAULT_TX_QUICK_CONS_TRIP;
#endif
TUNABLE_INT("hw.bce.tx_quick_cons_trip", &bce_tx_quick_cons_trip);
SYSCTL_UINT(_hw_bce, OID_AUTO, tx_quick_cons_trip, CTLFLAG_RDTUN,
&bce_tx_quick_cons_trip, 0,
"Transmit BD trip point");
@ -611,7 +602,6 @@ static int bce_tx_ticks_int = 0;
/* Generate an interrupt if 80us have elapsed since the last TX completion. */
static int bce_tx_ticks_int = DEFAULT_TX_TICKS_INT;
#endif
TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int);
SYSCTL_UINT(_hw_bce, OID_AUTO, tx_ticks_int, CTLFLAG_RDTUN,
&bce_tx_ticks_int, 0, "Transmit ticks count during interrupt");
@ -623,7 +613,6 @@ static int bce_tx_ticks = 0;
/* Generate an interrupt if 80us have elapsed since the last TX completion. */
static int bce_tx_ticks = DEFAULT_TX_TICKS;
#endif
TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks);
SYSCTL_UINT(_hw_bce, OID_AUTO, tx_ticks, CTLFLAG_RDTUN,
&bce_tx_ticks, 0, "Transmit ticks count");
@ -635,7 +624,6 @@ static int bce_rx_quick_cons_trip_int = 1;
/* Generate 1 interrupt for every 6 received frames. */
static int bce_rx_quick_cons_trip_int = DEFAULT_RX_QUICK_CONS_TRIP_INT;
#endif
TUNABLE_INT("hw.bce.rx_quick_cons_trip_int", &bce_rx_quick_cons_trip_int);
SYSCTL_UINT(_hw_bce, OID_AUTO, rx_quick_cons_trip_int, CTLFLAG_RDTUN,
&bce_rx_quick_cons_trip_int, 0,
"Receive BD trip point duirng interrupts");
@ -648,7 +636,6 @@ static int bce_rx_quick_cons_trip = 1;
/* Generate 1 interrupt for every 6 received frames. */
static int bce_rx_quick_cons_trip = DEFAULT_RX_QUICK_CONS_TRIP;
#endif
TUNABLE_INT("hw.bce.rx_quick_cons_trip", &bce_rx_quick_cons_trip);
SYSCTL_UINT(_hw_bce, OID_AUTO, rx_quick_cons_trip, CTLFLAG_RDTUN,
&bce_rx_quick_cons_trip, 0,
"Receive BD trip point");
@ -661,7 +648,6 @@ static int bce_rx_ticks_int = 0;
/* Generate an int. if 18us have elapsed since the last received frame. */
static int bce_rx_ticks_int = DEFAULT_RX_TICKS_INT;
#endif
TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int);
SYSCTL_UINT(_hw_bce, OID_AUTO, rx_ticks_int, CTLFLAG_RDTUN,
&bce_rx_ticks_int, 0, "Receive ticks count during interrupt");
@ -673,7 +659,6 @@ static int bce_rx_ticks = 0;
/* Generate an int. if 18us have elapsed since the last received frame. */
static int bce_rx_ticks = DEFAULT_RX_TICKS;
#endif
TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks);
SYSCTL_UINT(_hw_bce, OID_AUTO, rx_ticks, CTLFLAG_RDTUN,
&bce_rx_ticks, 0, "Receive ticks count");

View File

@ -542,10 +542,8 @@ DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
static int bge_allow_asf = 1;
TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
static SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RDTUN, &bge_allow_asf, 0,
"Allow ASF mode if available");
#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
@ -6237,7 +6235,6 @@ bge_add_sysctls(struct bge_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children;
char tn[32];
int unit;
ctx = device_get_sysctl_ctx(sc->bge_dev);
@ -6276,18 +6273,14 @@ bge_add_sysctls(struct bge_softc *sc)
* consumes a lot of CPU cycles, so leave it off by default.
*/
sc->bge_forced_collapse = 0;
snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
CTLFLAG_RW, &sc->bge_forced_collapse, 0,
CTLFLAG_RWTUN, &sc->bge_forced_collapse, 0,
"Number of fragmented TX buffers of a frame allowed before "
"forced collapsing");
sc->bge_msi = 1;
snprintf(tn, sizeof(tn), "dev.bge.%d.msi", unit);
TUNABLE_INT_FETCH(tn, &sc->bge_msi);
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "msi",
CTLFLAG_RD, &sc->bge_msi, 0, "Enable MSI");
CTLFLAG_RDTUN, &sc->bge_msi, 0, "Enable MSI");
/*
* It seems all Broadcom controllers have a bug that can generate UDP
@ -6300,10 +6293,8 @@ bge_add_sysctls(struct bge_softc *sc)
* dev.bge.0.forced_udpcsum.
*/
sc->bge_forced_udpcsum = 0;
snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
CTLFLAG_RWTUN, &sc->bge_forced_udpcsum, 0,
"Enable UDP checksum offloading even if controller can "
"generate UDP checksum value 0");

View File

@ -83,9 +83,8 @@ static SYSCTL_NODE(_hw, OID_AUTO, bwn, CTLFLAG_RD, 0,
#ifdef BWN_DEBUG
static int bwn_debug = 0;
SYSCTL_INT(_hw_bwn, OID_AUTO, debug, CTLFLAG_RW, &bwn_debug, 0,
SYSCTL_INT(_hw_bwn, OID_AUTO, debug, CTLFLAG_RWTUN, &bwn_debug, 0,
"Broadcom debugging printfs");
TUNABLE_INT("hw.bwn.debug", &bwn_debug);
enum {
BWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
BWN_DEBUG_RECV = 0x00000002, /* basic recv operation */

View File

@ -297,67 +297,56 @@ SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
/* Debug */
unsigned long bxe_debug = 0;
TUNABLE_ULONG("hw.bxe.debug", &bxe_debug);
SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, (CTLFLAG_RDTUN),
SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN,
&bxe_debug, 0, "Debug logging mode");
/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
static int bxe_interrupt_mode = INTR_MODE_MSIX;
TUNABLE_INT("hw.bxe.interrupt_mode", &bxe_interrupt_mode);
SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
&bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
static int bxe_queue_count = 4;
TUNABLE_INT("hw.bxe.queue_count", &bxe_queue_count);
SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
&bxe_queue_count, 0, "Multi-Queue queue count");
/* max number of buffers per queue (default RX_BD_USABLE) */
static int bxe_max_rx_bufs = 0;
TUNABLE_INT("hw.bxe.max_rx_bufs", &bxe_max_rx_bufs);
SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
&bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
/* Host interrupt coalescing RX tick timer (usecs) */
static int bxe_hc_rx_ticks = 25;
TUNABLE_INT("hw.bxe.hc_rx_ticks", &bxe_hc_rx_ticks);
SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
&bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
/* Host interrupt coalescing TX tick timer (usecs) */
static int bxe_hc_tx_ticks = 50;
TUNABLE_INT("hw.bxe.hc_tx_ticks", &bxe_hc_tx_ticks);
SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
&bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
/* Maximum number of Rx packets to process at a time */
static int bxe_rx_budget = 0xffffffff;
TUNABLE_INT("hw.bxe.rx_budget", &bxe_rx_budget);
SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
&bxe_rx_budget, 0, "Rx processing budget");
/* Maximum LRO aggregation size */
static int bxe_max_aggregation_size = 0;
TUNABLE_INT("hw.bxe.max_aggregation_size", &bxe_max_aggregation_size);
SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
&bxe_max_aggregation_size, 0, "max aggregation size");
/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
static int bxe_mrrs = -1;
TUNABLE_INT("hw.bxe.mrrs", &bxe_mrrs);
SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
&bxe_mrrs, 0, "PCIe maximum read request size");
/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
static int bxe_autogreeen = 0;
TUNABLE_INT("hw.bxe.autogreeen", &bxe_autogreeen);
SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
&bxe_autogreeen, 0, "AutoGrEEEn support");
/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
static int bxe_udp_rss = 0;
TUNABLE_INT("hw.bxe.udp_rss", &bxe_udp_rss);
SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
&bxe_udp_rss, 0, "UDP RSS support");

View File

@ -57,16 +57,12 @@ __FBSDID("$FreeBSD$");
static SYSCTL_NODE(_hw, OID_AUTO, cardbus, CTLFLAG_RD, 0, "CardBus parameters");
int cardbus_debug = 0;
TUNABLE_INT("hw.cardbus.debug", &cardbus_debug);
SYSCTL_INT(_hw_cardbus, OID_AUTO, debug, CTLFLAG_RW,
&cardbus_debug, 0,
"CardBus debug");
SYSCTL_INT(_hw_cardbus, OID_AUTO, debug, CTLFLAG_RWTUN,
&cardbus_debug, 0, "CardBus debug");
int cardbus_cis_debug = 0;
TUNABLE_INT("hw.cardbus.cis_debug", &cardbus_cis_debug);
SYSCTL_INT(_hw_cardbus, OID_AUTO, cis_debug, CTLFLAG_RW,
&cardbus_cis_debug, 0,
"CardBus CIS debug");
SYSCTL_INT(_hw_cardbus, OID_AUTO, cis_debug, CTLFLAG_RWTUN,
&cardbus_cis_debug, 0, "CardBus CIS debug");
#define DPRINTF(a) if (cardbus_debug) printf a
#define DEVPRINTF(x) if (cardbus_debug) device_printf x

View File

@ -107,14 +107,12 @@ driver_intr_t csintr;
static SYSCTL_NODE(_hw, OID_AUTO, cs, CTLFLAG_RD, 0, "cs device parameters");
int cs_ignore_cksum_failure = 0;
TUNABLE_INT("hw.cs.ignore_checksum_failure", &cs_ignore_cksum_failure);
SYSCTL_INT(_hw_cs, OID_AUTO, ignore_checksum_failure, CTLFLAG_RW,
SYSCTL_INT(_hw_cs, OID_AUTO, ignore_checksum_failure, CTLFLAG_RWTUN,
&cs_ignore_cksum_failure, 0,
"ignore checksum errors in cs card EEPROM");
static int cs_recv_delay = 570;
TUNABLE_INT("hw.cs.recv_delay", &cs_recv_delay);
SYSCTL_INT(_hw_cs, OID_AUTO, recv_delay, CTLFLAG_RW, &cs_recv_delay, 570, "");
SYSCTL_INT(_hw_cs, OID_AUTO, recv_delay, CTLFLAG_RWTUN, &cs_recv_delay, 570, "");
static int cs8900_eeint2irq[16] = {
10, 11, 12, 5, 255, 255, 255, 255,

View File

@ -5,6 +5,7 @@
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/sysctl.h>
#include <machine/bus.h>
#include <cxgb_osdep.h>
#include <common/cxgb_common.h>
@ -16,3 +17,5 @@
#include <common/cxgb_sge_defs.h>
#include <common/cxgb_firmware_exports.h>
#include <common/jhash.h>
SYSCTL_DECL(_hw_cxgb);

View File

@ -208,7 +208,6 @@ static SLIST_HEAD(, uld_info) t3_uld_list;
*/
static int msi_allowed = 2;
TUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
SYSCTL_INT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
"MSI-X, MSI, INTx selector");
@ -218,7 +217,6 @@ SYSCTL_INT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
* To disable it and force a single queue-set per port, use multiq = 0
*/
static int multiq = 1;
TUNABLE_INT("hw.cxgb.multiq", &multiq);
SYSCTL_INT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0,
"use min(ncpus/ports, 8) queue-sets per port");
@ -228,17 +226,14 @@ SYSCTL_INT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0,
*
*/
static int force_fw_update = 0;
TUNABLE_INT("hw.cxgb.force_fw_update", &force_fw_update);
SYSCTL_INT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
"update firmware even if up to date");
int cxgb_use_16k_clusters = -1;
TUNABLE_INT("hw.cxgb.use_16k_clusters", &cxgb_use_16k_clusters);
SYSCTL_INT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
&cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
static int nfilters = -1;
TUNABLE_INT("hw.cxgb.nfilters", &nfilters);
SYSCTL_INT(_hw_cxgb, OID_AUTO, nfilters, CTLFLAG_RDTUN,
&nfilters, 0, "max number of entries in the filter table");

View File

@ -86,13 +86,11 @@ CTASSERT(NUM_CPL_HANDLERS >= NUM_CPL_CMDS);
extern struct sysctl_oid_list sysctl__hw_cxgb_children;
int cxgb_txq_buf_ring_size = TX_ETH_Q_SIZE;
TUNABLE_INT("hw.cxgb.txq_mr_size", &cxgb_txq_buf_ring_size);
SYSCTL_INT(_hw_cxgb, OID_AUTO, txq_mr_size, CTLFLAG_RDTUN, &cxgb_txq_buf_ring_size, 0,
"size of per-queue mbuf ring");
static int cxgb_tx_coalesce_force = 0;
TUNABLE_INT("hw.cxgb.tx_coalesce_force", &cxgb_tx_coalesce_force);
SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RW,
SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RWTUN,
&cxgb_tx_coalesce_force, 0,
"coalesce small packets into a single work request regardless of ring state");
@ -106,19 +104,15 @@ SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RW,
static int cxgb_tx_coalesce_enable_start = COALESCE_START_DEFAULT;
TUNABLE_INT("hw.cxgb.tx_coalesce_enable_start",
&cxgb_tx_coalesce_enable_start);
SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_start, CTLFLAG_RW,
SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_start, CTLFLAG_RWTUN,
&cxgb_tx_coalesce_enable_start, 0,
"coalesce enable threshold");
static int cxgb_tx_coalesce_enable_stop = COALESCE_STOP_DEFAULT;
TUNABLE_INT("hw.cxgb.tx_coalesce_enable_stop", &cxgb_tx_coalesce_enable_stop);
SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_stop, CTLFLAG_RW,
SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_stop, CTLFLAG_RWTUN,
&cxgb_tx_coalesce_enable_stop, 0,
"coalesce disable threshold");
static int cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
TUNABLE_INT("hw.cxgb.tx_reclaim_threshold", &cxgb_tx_reclaim_threshold);
SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_reclaim_threshold, CTLFLAG_RW,
SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_reclaim_threshold, CTLFLAG_RWTUN,
&cxgb_tx_reclaim_threshold, 0,
"tx cleaning minimum threshold");

View File

@ -106,43 +106,35 @@ static char *states[] = {
SYSCTL_NODE(_hw, OID_AUTO, iw_cxgb, CTLFLAG_RD, 0, "iw_cxgb driver parameters");
static int ep_timeout_secs = 60;
TUNABLE_INT("hw.iw_cxgb.ep_timeout_secs", &ep_timeout_secs);
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, ep_timeout_secs, CTLFLAG_RW, &ep_timeout_secs, 0,
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0,
"CM Endpoint operation timeout in seconds (default=60)");
static int mpa_rev = 1;
TUNABLE_INT("hw.iw_cxgb.mpa_rev", &mpa_rev);
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, mpa_rev, CTLFLAG_RW, &mpa_rev, 0,
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0,
"MPA Revision, 0 supports amso1100, 1 is spec compliant. (default=1)");
static int markers_enabled = 0;
TUNABLE_INT("hw.iw_cxgb.markers_enabled", &markers_enabled);
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, markers_enabled, CTLFLAG_RW, &markers_enabled, 0,
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0,
"Enable MPA MARKERS (default(0)=disabled)");
static int crc_enabled = 1;
TUNABLE_INT("hw.iw_cxgb.crc_enabled", &crc_enabled);
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, crc_enabled, CTLFLAG_RW, &crc_enabled, 0,
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0,
"Enable MPA CRC (default(1)=enabled)");
static int rcv_win = 256 * 1024;
TUNABLE_INT("hw.iw_cxgb.rcv_win", &rcv_win);
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, rcv_win, CTLFLAG_RW, &rcv_win, 0,
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0,
"TCP receive window in bytes (default=256KB)");
static int snd_win = 32 * 1024;
TUNABLE_INT("hw.iw_cxgb.snd_win", &snd_win);
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, snd_win, CTLFLAG_RW, &snd_win, 0,
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0,
"TCP send window in bytes (default=32KB)");
static unsigned int nocong = 0;
TUNABLE_INT("hw.iw_cxgb.nocong", &nocong);
SYSCTL_UINT(_hw_iw_cxgb, OID_AUTO, nocong, CTLFLAG_RW, &nocong, 0,
SYSCTL_UINT(_hw_iw_cxgb, OID_AUTO, nocong, CTLFLAG_RWTUN, &nocong, 0,
"Turn off congestion control (default=0)");
static unsigned int cong_flavor = 1;
TUNABLE_INT("hw.iw_cxgb.cong_flavor", &cong_flavor);
SYSCTL_UINT(_hw_iw_cxgb, OID_AUTO, cong_flavor, CTLFLAG_RW, &cong_flavor, 0,
SYSCTL_UINT(_hw_iw_cxgb, OID_AUTO, cong_flavor, CTLFLAG_RWTUN, &cong_flavor, 0,
"TCP Congestion control flavor (default=1)");
static void ep_timeout(void *arg);

View File

@ -769,88 +769,72 @@ process_socket_event(struct c4iw_ep *ep)
SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD, 0, "iw_cxgbe driver parameters");
int db_delay_usecs = 1;
TUNABLE_INT("hw.iw_cxgbe.db_delay_usecs", &db_delay_usecs);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_delay_usecs, CTLFLAG_RW, &db_delay_usecs, 0,
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_delay_usecs, CTLFLAG_RWTUN, &db_delay_usecs, 0,
"Usecs to delay awaiting db fifo to drain");
static int dack_mode = 1;
TUNABLE_INT("hw.iw_cxgbe.dack_mode", &dack_mode);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RW, &dack_mode, 0,
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0,
"Delayed ack mode (default = 1)");
int c4iw_max_read_depth = 8;
TUNABLE_INT("hw.iw_cxgbe.c4iw_max_read_depth", &c4iw_max_read_depth);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RW, &c4iw_max_read_depth, 0,
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0,
"Per-connection max ORD/IRD (default = 8)");
static int enable_tcp_timestamps;
TUNABLE_INT("hw.iw_cxgbe.enable_tcp_timestamps", &enable_tcp_timestamps);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RW, &enable_tcp_timestamps, 0,
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0,
"Enable tcp timestamps (default = 0)");
static int enable_tcp_sack;
TUNABLE_INT("hw.iw_cxgbe.enable_tcp_sack", &enable_tcp_sack);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RW, &enable_tcp_sack, 0,
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0,
"Enable tcp SACK (default = 0)");
static int enable_tcp_window_scaling = 1;
TUNABLE_INT("hw.iw_cxgbe.enable_tcp_window_scaling", &enable_tcp_window_scaling);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RW, &enable_tcp_window_scaling, 0,
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0,
"Enable tcp window scaling (default = 1)");
int c4iw_debug = 1;
TUNABLE_INT("hw.iw_cxgbe.c4iw_debug", &c4iw_debug);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RW, &c4iw_debug, 0,
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0,
"Enable debug logging (default = 0)");
static int peer2peer;
TUNABLE_INT("hw.iw_cxgbe.peer2peer", &peer2peer);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RW, &peer2peer, 0,
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0,
"Support peer2peer ULPs (default = 0)");
static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
TUNABLE_INT("hw.iw_cxgbe.p2p_type", &p2p_type);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RW, &p2p_type, 0,
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0,
"RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)");
static int ep_timeout_secs = 60;
TUNABLE_INT("hw.iw_cxgbe.ep_timeout_secs", &ep_timeout_secs);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RW, &ep_timeout_secs, 0,
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0,
"CM Endpoint operation timeout in seconds (default = 60)");
static int mpa_rev = 1;
TUNABLE_INT("hw.iw_cxgbe.mpa_rev", &mpa_rev);
#ifdef IW_CM_MPAV2
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RW, &mpa_rev, 0,
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0,
"MPA Revision, 0 supports amso1100, 1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)");
#else
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RW, &mpa_rev, 0,
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0,
"MPA Revision, 0 supports amso1100, 1 is RFC0544 spec compliant (default = 1)");
#endif
static int markers_enabled;
TUNABLE_INT("hw.iw_cxgbe.markers_enabled", &markers_enabled);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RW, &markers_enabled, 0,
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0,
"Enable MPA MARKERS (default(0) = disabled)");
static int crc_enabled = 1;
TUNABLE_INT("hw.iw_cxgbe.crc_enabled", &crc_enabled);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RW, &crc_enabled, 0,
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0,
"Enable MPA CRC (default(1) = enabled)");
static int rcv_win = 256 * 1024;
TUNABLE_INT("hw.iw_cxgbe.rcv_win", &rcv_win);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RW, &rcv_win, 0,
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0,
"TCP receive window in bytes (default = 256KB)");
static int snd_win = 128 * 1024;
TUNABLE_INT("hw.iw_cxgbe.snd_win", &snd_win);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RW, &snd_win, 0,
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0,
"TCP send window in bytes (default = 128KB)");
int db_fc_threshold = 2000;
TUNABLE_INT("hw.iw_cxgbe.db_fc_threshold", &db_fc_threshold);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_fc_threshold, CTLFLAG_RW, &db_fc_threshold, 0,
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_fc_threshold, CTLFLAG_RWTUN, &db_fc_threshold, 0,
"QP count/threshold that triggers automatic");
static void

View File

@ -132,7 +132,6 @@ static struct cdevsw drm_cdevsw = {
};
static int drm_msi = 1; /* Enable by default. */
TUNABLE_INT("hw.drm.msi", &drm_msi);
SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1,
"Enable MSI interrupts for drm devices");

View File

@ -70,7 +70,7 @@ int drm_sysctl_init(struct drm_device *dev)
dev->sysctl = info;
/* Add the sysctl node for DRI if it doesn't already exist */
drioid = SYSCTL_ADD_NODE( &info->ctx, &sysctl__hw_children, OID_AUTO, "dri", CTLFLAG_RW, NULL, "DRI Graphics");
drioid = SYSCTL_ADD_NODE(&info->ctx, SYSCTL_CHILDREN(&sysctl___hw), OID_AUTO, "dri", CTLFLAG_RW, NULL, "DRI Graphics");
if (!drioid)
return 1;

View File

@ -203,7 +203,6 @@ static struct cdevsw drm_cdevsw = {
};
static int drm_msi = 1; /* Enable by default. */
TUNABLE_INT("hw.drm.msi", &drm_msi);
SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1,
"Enable MSI interrupts for drm devices");

View File

@ -68,7 +68,7 @@ int drm_sysctl_init(struct drm_device *dev)
dev->sysctl = info;
/* Add the sysctl node for DRI if it doesn't already exist */
drioid = SYSCTL_ADD_NODE(&info->ctx, &sysctl__hw_children, OID_AUTO,
drioid = SYSCTL_ADD_NODE(&info->ctx, SYSCTL_CHILDREN(&sysctl___hw), OID_AUTO,
"dri", CTLFLAG_RW, NULL, "DRI Graphics");
if (!drioid)
return 1;

View File

@ -353,8 +353,6 @@ static SYSCTL_NODE(_hw, OID_AUTO, em, CTLFLAG_RD, 0, "EM driver parameters");
static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
SYSCTL_INT(_hw_em, OID_AUTO, tx_int_delay, CTLFLAG_RDTUN, &em_tx_int_delay_dflt,
0, "Default transmit interrupt delay in usecs");
SYSCTL_INT(_hw_em, OID_AUTO, rx_int_delay, CTLFLAG_RDTUN, &em_rx_int_delay_dflt,
@ -362,8 +360,6 @@ SYSCTL_INT(_hw_em, OID_AUTO, rx_int_delay, CTLFLAG_RDTUN, &em_rx_int_delay_dflt,
static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
SYSCTL_INT(_hw_em, OID_AUTO, tx_abs_int_delay, CTLFLAG_RDTUN,
&em_tx_abs_int_delay_dflt, 0,
"Default transmit interrupt delay limit in usecs");
@ -373,32 +369,26 @@ SYSCTL_INT(_hw_em, OID_AUTO, rx_abs_int_delay, CTLFLAG_RDTUN,
static int em_rxd = EM_DEFAULT_RXD;
static int em_txd = EM_DEFAULT_TXD;
TUNABLE_INT("hw.em.rxd", &em_rxd);
TUNABLE_INT("hw.em.txd", &em_txd);
SYSCTL_INT(_hw_em, OID_AUTO, rxd, CTLFLAG_RDTUN, &em_rxd, 0,
"Number of receive descriptors per queue");
SYSCTL_INT(_hw_em, OID_AUTO, txd, CTLFLAG_RDTUN, &em_txd, 0,
"Number of transmit descriptors per queue");
static int em_smart_pwr_down = FALSE;
TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
SYSCTL_INT(_hw_em, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &em_smart_pwr_down,
0, "Set to true to leave smart power down enabled on newer adapters");
/* Controls whether promiscuous also shows bad packets */
static int em_debug_sbp = FALSE;
TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
SYSCTL_INT(_hw_em, OID_AUTO, sbp, CTLFLAG_RDTUN, &em_debug_sbp, 0,
"Show bad packets in promiscuous mode");
static int em_enable_msix = TRUE;
TUNABLE_INT("hw.em.enable_msix", &em_enable_msix);
SYSCTL_INT(_hw_em, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &em_enable_msix, 0,
"Enable MSI-X interrupts");
/* How many packets rxeof tries to clean at a time */
static int em_rx_process_limit = 100;
TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
SYSCTL_INT(_hw_em, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
&em_rx_process_limit, 0,
"Maximum number of received packets to process "
@ -406,7 +396,6 @@ SYSCTL_INT(_hw_em, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
/* Energy efficient ethernet - default to OFF */
static int eee_setting = 1;
TUNABLE_INT("hw.em.eee_setting", &eee_setting);
SYSCTL_INT(_hw_em, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &eee_setting, 0,
"Enable Energy Efficient Ethernet");

View File

@ -327,8 +327,6 @@ static SYSCTL_NODE(_hw, OID_AUTO, igb, CTLFLAG_RD, 0, "IGB driver parameters");
/* Descriptor defaults */
static int igb_rxd = IGB_DEFAULT_RXD;
static int igb_txd = IGB_DEFAULT_TXD;
TUNABLE_INT("hw.igb.rxd", &igb_rxd);
TUNABLE_INT("hw.igb.txd", &igb_txd);
SYSCTL_INT(_hw_igb, OID_AUTO, rxd, CTLFLAG_RDTUN, &igb_rxd, 0,
"Number of receive descriptors per queue");
SYSCTL_INT(_hw_igb, OID_AUTO, txd, CTLFLAG_RDTUN, &igb_txd, 0,
@ -341,8 +339,7 @@ SYSCTL_INT(_hw_igb, OID_AUTO, txd, CTLFLAG_RDTUN, &igb_txd, 0,
** traffic for that interrupt vector
*/
static int igb_enable_aim = TRUE;
TUNABLE_INT("hw.igb.enable_aim", &igb_enable_aim);
SYSCTL_INT(_hw_igb, OID_AUTO, enable_aim, CTLFLAG_RW, &igb_enable_aim, 0,
SYSCTL_INT(_hw_igb, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &igb_enable_aim, 0,
"Enable adaptive interrupt moderation");
/*
@ -350,7 +347,6 @@ SYSCTL_INT(_hw_igb, OID_AUTO, enable_aim, CTLFLAG_RW, &igb_enable_aim, 0,
* but this allows it to be forced off for testing.
*/
static int igb_enable_msix = 1;
TUNABLE_INT("hw.igb.enable_msix", &igb_enable_msix);
SYSCTL_INT(_hw_igb, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &igb_enable_msix, 0,
"Enable MSI-X interrupts");
@ -358,7 +354,6 @@ SYSCTL_INT(_hw_igb, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &igb_enable_msix, 0,
** Tuneable Interrupt rate
*/
static int igb_max_interrupt_rate = 8000;
TUNABLE_INT("hw.igb.max_interrupt_rate", &igb_max_interrupt_rate);
SYSCTL_INT(_hw_igb, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
&igb_max_interrupt_rate, 0, "Maximum interrupts per second");
@ -367,7 +362,6 @@ SYSCTL_INT(_hw_igb, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
** Tuneable number of buffers in the buf-ring (drbr_xxx)
*/
static int igb_buf_ring_size = IGB_BR_SIZE;
TUNABLE_INT("hw.igb.buf_ring_size", &igb_buf_ring_size);
SYSCTL_INT(_hw_igb, OID_AUTO, buf_ring_size, CTLFLAG_RDTUN,
&igb_buf_ring_size, 0, "Size of the bufring");
#endif
@ -381,7 +375,6 @@ SYSCTL_INT(_hw_igb, OID_AUTO, buf_ring_size, CTLFLAG_RDTUN,
** a very workload dependent type feature.
*/
static int igb_header_split = FALSE;
TUNABLE_INT("hw.igb.hdr_split", &igb_header_split);
SYSCTL_INT(_hw_igb, OID_AUTO, header_split, CTLFLAG_RDTUN, &igb_header_split, 0,
"Enable receive mbuf header split");
@ -391,7 +384,6 @@ SYSCTL_INT(_hw_igb, OID_AUTO, header_split, CTLFLAG_RDTUN, &igb_header_split, 0,
** MSIX messages if left at 0.
*/
static int igb_num_queues = 0;
TUNABLE_INT("hw.igb.num_queues", &igb_num_queues);
SYSCTL_INT(_hw_igb, OID_AUTO, num_queues, CTLFLAG_RDTUN, &igb_num_queues, 0,
"Number of queues to configure, 0 indicates autoconfigure");
@ -404,7 +396,6 @@ static int igb_last_bind_cpu = -1;
/* How many packets rxeof tries to clean at a time */
static int igb_rx_process_limit = 100;
TUNABLE_INT("hw.igb.rx_process_limit", &igb_rx_process_limit);
SYSCTL_INT(_hw_igb, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
&igb_rx_process_limit, 0,
"Maximum number of received packets to process at a time, -1 means unlimited");

View File

@ -101,7 +101,6 @@ static video_adapter_t *vesa_adp;
static SYSCTL_NODE(_debug, OID_AUTO, vesa, CTLFLAG_RD, NULL, "VESA debugging");
static int vesa_shadow_rom;
TUNABLE_INT("debug.vesa.shadow_rom", &vesa_shadow_rom);
SYSCTL_INT(_debug_vesa, OID_AUTO, shadow_rom, CTLFLAG_RDTUN, &vesa_shadow_rom,
0, "Enable video BIOS shadow");

View File

@ -78,14 +78,13 @@
#undef OHCI_DEBUG
static int nocyclemaster = 0;
static int nocyclemaster;
int firewire_phydma_enable = 1;
SYSCTL_DECL(_hw_firewire);
SYSCTL_INT(_hw_firewire, OID_AUTO, nocyclemaster, CTLFLAG_RW, &nocyclemaster, 0,
"Do not send cycle start packets");
SYSCTL_INT(_hw_firewire, OID_AUTO, phydma_enable, CTLFLAG_RW,
&firewire_phydma_enable, 1, "Allow physical request DMA from firewire");
TUNABLE_INT("hw.firewire.phydma_enable", &firewire_phydma_enable);
SYSCTL_INT(_hw_firewire, OID_AUTO, nocyclemaster, CTLFLAG_RWTUN,
&nocyclemaster, 0, "Do not send cycle start packets");
SYSCTL_INT(_hw_firewire, OID_AUTO, phydma_enable, CTLFLAG_RWTUN,
&firewire_phydma_enable, 0, "Allow physical request DMA from firewire");
static char dbcode[16][0x10]={"OUTM", "OUTL","INPM","INPL",
"STOR","LOAD","NOP ","STOP",};

View File

@ -88,21 +88,17 @@ static int tx_speed = 2;
static int rx_queue_len = FWMAXQUEUE;
static MALLOC_DEFINE(M_FWE, "if_fwe", "Ethernet over FireWire interface");
SYSCTL_INT(_debug, OID_AUTO, if_fwe_debug, CTLFLAG_RW, &fwedebug, 0, "");
SYSCTL_INT(_debug, OID_AUTO, if_fwe_debug, CTLFLAG_RWTUN, &fwedebug, 0, "");
SYSCTL_DECL(_hw_firewire);
static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwe, CTLFLAG_RD, 0,
"Ethernet emulation subsystem");
SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, stream_ch, CTLFLAG_RW, &stream_ch, 0,
SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, stream_ch, CTLFLAG_RWTUN, &stream_ch, 0,
"Stream channel to use");
SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, tx_speed, CTLFLAG_RW, &tx_speed, 0,
SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, tx_speed, CTLFLAG_RWTUN, &tx_speed, 0,
"Transmission speed");
SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, rx_queue_len, CTLFLAG_RW, &rx_queue_len,
SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, rx_queue_len, CTLFLAG_RWTUN, &rx_queue_len,
0, "Length of the receive queue");
TUNABLE_INT("hw.firewire.fwe.stream_ch", &stream_ch);
TUNABLE_INT("hw.firewire.fwe.tx_speed", &tx_speed);
TUNABLE_INT("hw.firewire.fwe.rx_queue_len", &rx_queue_len);
#ifdef DEVICE_POLLING
static poll_handler_t fwe_poll;

View File

@ -105,11 +105,9 @@ SYSCTL_INT(_debug, OID_AUTO, if_fwip_debug, CTLFLAG_RW, &fwipdebug, 0, "");
SYSCTL_DECL(_hw_firewire);
static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwip, CTLFLAG_RD, 0,
"Firewire ip subsystem");
SYSCTL_INT(_hw_firewire_fwip, OID_AUTO, rx_queue_len, CTLFLAG_RW, &rx_queue_len,
SYSCTL_INT(_hw_firewire_fwip, OID_AUTO, rx_queue_len, CTLFLAG_RWTUN, &rx_queue_len,
0, "Length of the receive queue");
TUNABLE_INT("hw.firewire.fwip.rx_queue_len", &rx_queue_len);
#ifdef DEVICE_POLLING
static poll_handler_t fwip_poll;

View File

@ -134,31 +134,23 @@ static int sbp_tags = 0;
SYSCTL_DECL(_hw_firewire);
static SYSCTL_NODE(_hw_firewire, OID_AUTO, sbp, CTLFLAG_RD, 0,
"SBP-II Subsystem");
SYSCTL_INT(_debug, OID_AUTO, sbp_debug, CTLFLAG_RW, &debug, 0,
SYSCTL_INT(_debug, OID_AUTO, sbp_debug, CTLFLAG_RWTUN, &debug, 0,
"SBP debug flag");
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, auto_login, CTLFLAG_RW, &auto_login, 0,
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, auto_login, CTLFLAG_RWTUN, &auto_login, 0,
"SBP perform login automatically");
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, max_speed, CTLFLAG_RW, &max_speed, 0,
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, max_speed, CTLFLAG_RWTUN, &max_speed, 0,
"SBP transfer max speed");
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, exclusive_login, CTLFLAG_RW,
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, exclusive_login, CTLFLAG_RWTUN,
&ex_login, 0, "SBP enable exclusive login");
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, login_delay, CTLFLAG_RW,
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, login_delay, CTLFLAG_RWTUN,
&login_delay, 0, "SBP login delay in msec");
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, scan_delay, CTLFLAG_RW,
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, scan_delay, CTLFLAG_RWTUN,
&scan_delay, 0, "SBP scan delay in msec");
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, use_doorbell, CTLFLAG_RW,
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, use_doorbell, CTLFLAG_RWTUN,
&use_doorbell, 0, "SBP use doorbell request");
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, tags, CTLFLAG_RW, &sbp_tags, 0,
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, tags, CTLFLAG_RWTUN, &sbp_tags, 0,
"SBP tagged queuing support");
TUNABLE_INT("hw.firewire.sbp.auto_login", &auto_login);
TUNABLE_INT("hw.firewire.sbp.max_speed", &max_speed);
TUNABLE_INT("hw.firewire.sbp.exclusive_login", &ex_login);
TUNABLE_INT("hw.firewire.sbp.login_delay", &login_delay);
TUNABLE_INT("hw.firewire.sbp.scan_delay", &scan_delay);
TUNABLE_INT("hw.firewire.sbp.use_doorbell", &use_doorbell);
TUNABLE_INT("hw.firewire.sbp.tags", &sbp_tags);
#define NEED_RESPONSE 0
#define SBP_SEG_MAX rounddown(0xffff, PAGE_SIZE)

View File

@ -314,7 +314,6 @@ glxiic_attach(device_t dev)
struct sysctl_oid *tree;
int error, irq, unit;
uint32_t irq_map;
char tn[32];
sc = device_get_softc(dev);
sc->dev = dev;
@ -402,10 +401,8 @@ glxiic_attach(device_t dev)
tree = device_get_sysctl_tree(dev);
sc->timeout = GLXIIC_DEFAULT_TIMEOUT;
snprintf(tn, sizeof(tn), "dev.glxiic.%d.timeout", unit);
TUNABLE_INT_FETCH(tn, &sc->timeout);
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"timeout", CTLFLAG_RW | CTLFLAG_TUN, &sc->timeout, 0,
"timeout", CTLFLAG_RWTUN, &sc->timeout, 0,
"activity timeout in ms");
glxiic_gpio_enable(sc);

View File

@ -617,7 +617,7 @@ hpt_status(FORMAL_HANDLER_ARGS)
NULL, 0, hpt_status, "A", "Get/Set " #name " state")
#else
#define hptregister_node(name) \
SYSCTL_NODE(, OID_AUTO, name, CTLFLAG_RW, 0, "Get/Set " #name " state root node"); \
SYSCTL_ROOT_NODE(OID_AUTO, name, CTLFLAG_RW, 0, "Get/Set " #name " state root node"); \
SYSCTL_OID(_ ## name, OID_AUTO, status, CTLTYPE_STRING|CTLFLAG_RW, \
NULL, 0, hpt_status, "A", "Get/Set " #name " state");
#endif

View File

@ -66,8 +66,7 @@ SYSCTL_DECL(_kern_hwpmc);
*/
static int pmclog_buffer_size = PMC_LOG_BUFFER_SIZE;
TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "logbuffersize", &pmclog_buffer_size);
SYSCTL_INT(_kern_hwpmc, OID_AUTO, logbuffersize, CTLFLAG_TUN|CTLFLAG_RD,
SYSCTL_INT(_kern_hwpmc, OID_AUTO, logbuffersize, CTLFLAG_RDTUN,
&pmclog_buffer_size, 0, "size of log buffers in kilobytes");
/*
@ -75,8 +74,7 @@ SYSCTL_INT(_kern_hwpmc, OID_AUTO, logbuffersize, CTLFLAG_TUN|CTLFLAG_RD,
*/
static int pmc_nlogbuffers = PMC_NLOGBUFFERS;
TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nbuffers", &pmc_nlogbuffers);
SYSCTL_INT(_kern_hwpmc, OID_AUTO, nbuffers, CTLFLAG_TUN|CTLFLAG_RD,
SYSCTL_INT(_kern_hwpmc, OID_AUTO, nbuffers, CTLFLAG_RDTUN,
&pmc_nlogbuffers, 0, "number of global log buffers");
/*

View File

@ -234,8 +234,7 @@ static void pmc_generic_cpu_finalize(struct pmc_mdep *md);
SYSCTL_DECL(_kern_hwpmc);
static int pmc_callchaindepth = PMC_CALLCHAIN_DEPTH;
TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "callchaindepth", &pmc_callchaindepth);
SYSCTL_INT(_kern_hwpmc, OID_AUTO, callchaindepth, CTLFLAG_TUN|CTLFLAG_RD,
SYSCTL_INT(_kern_hwpmc, OID_AUTO, callchaindepth, CTLFLAG_RDTUN,
&pmc_callchaindepth, 0, "depth of call chain records");
#ifdef DEBUG
@ -244,7 +243,7 @@ char pmc_debugstr[PMC_DEBUG_STRSIZE];
TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr,
sizeof(pmc_debugstr));
SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,
CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_TUN,
CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags");
#endif
@ -254,8 +253,7 @@ SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,
*/
static int pmc_hashsize = PMC_HASH_SIZE;
TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "hashsize", &pmc_hashsize);
SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_TUN|CTLFLAG_RD,
SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_RDTUN,
&pmc_hashsize, 0, "rows in hash tables");
/*
@ -263,8 +261,7 @@ SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_TUN|CTLFLAG_RD,
*/
static int pmc_nsamples = PMC_NSAMPLES;
TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nsamples", &pmc_nsamples);
SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_TUN|CTLFLAG_RD,
SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_RDTUN,
&pmc_nsamples, 0, "number of PC samples per CPU");
@ -273,8 +270,7 @@ SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_TUN|CTLFLAG_RD,
*/
static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE;
TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "mtxpoolsize", &pmc_mtxpool_size);
SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_TUN|CTLFLAG_RD,
SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_RDTUN,
&pmc_mtxpool_size, 0, "size of spin mutex pool");
@ -288,8 +284,7 @@ SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_TUN|CTLFLAG_RD,
*/
static int pmc_unprivileged_syspmcs = 0;
TUNABLE_INT("security.bsd.unprivileged_syspmcs", &pmc_unprivileged_syspmcs);
SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RW,
SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RWTUN,
&pmc_unprivileged_syspmcs, 0,
"allow unprivileged process to allocate system PMCs");

View File

@ -61,24 +61,19 @@
SYSCTL_NODE(_kern, OID_AUTO, icl, CTLFLAG_RD, 0, "iSCSI Common Layer");
static int debug = 1;
TUNABLE_INT("kern.icl.debug", &debug);
SYSCTL_INT(_kern_icl, OID_AUTO, debug, CTLFLAG_RWTUN,
&debug, 0, "Enable debug messages");
static int coalesce = 1;
TUNABLE_INT("kern.icl.coalesce", &coalesce);
SYSCTL_INT(_kern_icl, OID_AUTO, coalesce, CTLFLAG_RWTUN,
&coalesce, 0, "Try to coalesce PDUs before sending");
static int partial_receive_len = 128 * 1024;
TUNABLE_INT("kern.icl.partial_receive_len", &partial_receive_len);
SYSCTL_INT(_kern_icl, OID_AUTO, partial_receive_len, CTLFLAG_RWTUN,
&partial_receive_len, 0, "Minimum read size for partially received "
"data segment");
static int sendspace = 1048576;
TUNABLE_INT("kern.icl.sendspace", &sendspace);
SYSCTL_INT(_kern_icl, OID_AUTO, sendspace, CTLFLAG_RWTUN,
&sendspace, 0, "Default send socket buffer size");
static int recvspace = 1048576;
TUNABLE_INT("kern.icl.recvspace", &recvspace);
SYSCTL_INT(_kern_icl, OID_AUTO, recvspace, CTLFLAG_RWTUN,
&recvspace, 0, "Default receive socket buffer size");

View File

@ -78,27 +78,21 @@ static struct iscsi_softc *sc;
SYSCTL_NODE(_kern, OID_AUTO, iscsi, CTLFLAG_RD, 0, "iSCSI initiator");
static int debug = 1;
TUNABLE_INT("kern.iscsi.debug", &debug);
SYSCTL_INT(_kern_iscsi, OID_AUTO, debug, CTLFLAG_RWTUN,
&debug, 0, "Enable debug messages");
static int ping_timeout = 5;
TUNABLE_INT("kern.iscsi.ping_timeout", &ping_timeout);
SYSCTL_INT(_kern_iscsi, OID_AUTO, ping_timeout, CTLFLAG_RWTUN, &ping_timeout,
0, "Timeout for ping (NOP-Out) requests, in seconds");
static int iscsid_timeout = 60;
TUNABLE_INT("kern.iscsi.iscsid_timeout", &iscsid_timeout);
SYSCTL_INT(_kern_iscsi, OID_AUTO, iscsid_timeout, CTLFLAG_RWTUN, &iscsid_timeout,
0, "Time to wait for iscsid(8) to handle reconnection, in seconds");
static int login_timeout = 60;
TUNABLE_INT("kern.iscsi.login_timeout", &login_timeout);
SYSCTL_INT(_kern_iscsi, OID_AUTO, login_timeout, CTLFLAG_RWTUN, &login_timeout,
0, "Time to wait for iscsid(8) to finish Login Phase, in seconds");
static int maxtags = 255;
TUNABLE_INT("kern.iscsi.maxtags", &maxtags);
SYSCTL_INT(_kern_iscsi, OID_AUTO, maxtags, CTLFLAG_RWTUN, &maxtags,
0, "Max number of IO requests queued");
static int fail_on_disconnection = 0;
TUNABLE_INT("kern.iscsi.fail_on_disconnection", &fail_on_disconnection);
SYSCTL_INT(_kern_iscsi, OID_AUTO, fail_on_disconnection, CTLFLAG_RWTUN,
&fail_on_disconnection, 0, "Destroy CAM SIM on connection failure");

View File

@ -77,11 +77,11 @@ struct mtx iscsi_dbg_mtx;
#endif
static int max_sessions = MAX_SESSIONS;
SYSCTL_INT(_net, OID_AUTO, iscsi_initiator_max_sessions, CTLFLAG_RDTUN, &max_sessions, MAX_SESSIONS,
"Max sessions allowed");
SYSCTL_INT(_net, OID_AUTO, iscsi_initiator_max_sessions, CTLFLAG_RDTUN,
&max_sessions, 0, "Max sessions allowed");
static int max_pdus = MAX_PDUS;
SYSCTL_INT(_net, OID_AUTO, iscsi_initiator_max_pdus, CTLFLAG_RDTUN, &max_pdus, MAX_PDUS,
"Max pdu pool");
SYSCTL_INT(_net, OID_AUTO, iscsi_initiator_max_pdus, CTLFLAG_RDTUN,
&max_pdus, 0, "Max PDU pool");
static char isid[6+1] = {
0x80,
@ -711,9 +711,6 @@ iscsi_start(void)
{
debug_called(8);
TUNABLE_INT_FETCH("net.iscsi_initiator.max_sessions", &max_sessions);
TUNABLE_INT_FETCH("net.iscsi_initiator.max_pdus", &max_pdus);
isc = malloc(sizeof(struct isc_softc), M_ISCSI, M_ZERO|M_WAITOK);
mtx_init(&isc->isc_mtx, "iscsi-isc", NULL, MTX_DEF);

View File

@ -244,18 +244,15 @@ static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
** traffic for that interrupt vector
*/
static int ixgbe_enable_aim = TRUE;
TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RW, &ixgbe_enable_aim, 0,
SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
"Enable adaptive interrupt moderation");
static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
TUNABLE_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate);
SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
&ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
/* How many packets rxeof tries to clean at a time */
static int ixgbe_rx_process_limit = 256;
TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
&ixgbe_rx_process_limit, 0,
"Maximum number of received packets to process at a time,"
@ -263,7 +260,6 @@ SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
/* How many packets txeof tries to clean at a time */
static int ixgbe_tx_process_limit = 256;
TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
&ixgbe_tx_process_limit, 0,
"Maximum number of sent packets to process at a time,"
@ -283,7 +279,6 @@ static int ixgbe_smart_speed = ixgbe_smart_speed_on;
* but this allows it to be forced off for testing.
*/
static int ixgbe_enable_msix = 1;
TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
"Enable MSI-X interrupts");
@ -294,7 +289,6 @@ SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
* can be overriden manually here.
*/
static int ixgbe_num_queues = 0;
TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
"Number of queues to configure, 0 indicates autoconfigure");
@ -304,13 +298,11 @@ SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
** the better performing choice.
*/
static int ixgbe_txd = PERFORM_TXD;
TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
"Number of receive descriptors per queue");
/* Number of RX descriptors per ring */
static int ixgbe_rxd = PERFORM_RXD;
TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
"Number of receive descriptors per queue");

View File

@ -65,27 +65,22 @@ SYSCTL_NODE(_hw, OID_AUTO, malo, CTLFLAG_RD, 0,
"Marvell 88w8335 driver parameters");
static int malo_txcoalesce = 8; /* # tx pkts to q before poking f/w*/
SYSCTL_INT(_hw_malo, OID_AUTO, txcoalesce, CTLFLAG_RW, &malo_txcoalesce,
SYSCTL_INT(_hw_malo, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &malo_txcoalesce,
0, "tx buffers to send at once");
TUNABLE_INT("hw.malo.txcoalesce", &malo_txcoalesce);
static int malo_rxbuf = MALO_RXBUF; /* # rx buffers to allocate */
SYSCTL_INT(_hw_malo, OID_AUTO, rxbuf, CTLFLAG_RW, &malo_rxbuf,
SYSCTL_INT(_hw_malo, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &malo_rxbuf,
0, "rx buffers allocated");
TUNABLE_INT("hw.malo.rxbuf", &malo_rxbuf);
static int malo_rxquota = MALO_RXBUF; /* # max buffers to process */
SYSCTL_INT(_hw_malo, OID_AUTO, rxquota, CTLFLAG_RW, &malo_rxquota,
SYSCTL_INT(_hw_malo, OID_AUTO, rxquota, CTLFLAG_RWTUN, &malo_rxquota,
0, "max rx buffers to process per interrupt");
TUNABLE_INT("hw.malo.rxquota", &malo_rxquota);
static int malo_txbuf = MALO_TXBUF; /* # tx buffers to allocate */
SYSCTL_INT(_hw_malo, OID_AUTO, txbuf, CTLFLAG_RW, &malo_txbuf,
SYSCTL_INT(_hw_malo, OID_AUTO, txbuf, CTLFLAG_RWTUN, &malo_txbuf,
0, "tx buffers allocated");
TUNABLE_INT("hw.malo.txbuf", &malo_txbuf);
#ifdef MALO_DEBUG
static int malo_debug = 0;
SYSCTL_INT(_hw_malo, OID_AUTO, debug, CTLFLAG_RW, &malo_debug,
SYSCTL_INT(_hw_malo, OID_AUTO, debug, CTLFLAG_RWTUN, &malo_debug,
0, "control debugging printfs");
TUNABLE_INT("hw.malo.debug", &malo_debug);
enum {
MALO_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
MALO_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */

View File

@ -86,9 +86,8 @@ static SYSCTL_NODE(_hw_malo, OID_AUTO, pci, CTLFLAG_RD, 0,
"Marvell 88W8335 driver PCI parameters");
static int msi_disable = 0; /* MSI disabled */
SYSCTL_INT(_hw_malo_pci, OID_AUTO, msi_disable, CTLFLAG_RW, &msi_disable,
SYSCTL_INT(_hw_malo_pci, OID_AUTO, msi_disable, CTLFLAG_RWTUN, &msi_disable,
0, "MSI disabled");
TUNABLE_INT("hw.malo.pci.msi_disable", &msi_disable);
/*
* Devices supported by this driver.

View File

@ -132,33 +132,27 @@ static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
0, "event message locale");
static int mfi_event_class = MFI_EVT_CLASS_INFO;
TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
0, "event message class");
static int mfi_max_cmds = 128;
TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
0, "Max commands limit (-1 = controller limit)");
static int mfi_detect_jbod_change = 1;
TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
&mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
int mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
TUNABLE_INT("hw.mfi.polled_cmd_timeout", &mfi_polled_cmd_timeout);
SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
&mfi_polled_cmd_timeout, 0,
"Polled command timeout - used for firmware flash etc (in seconds)");
static int mfi_cmd_timeout = MFI_CMD_TIMEOUT;
TUNABLE_INT("hw.mfi.cmd_timeout", &mfi_cmd_timeout);
SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
0, "Command timeout (in seconds)");

View File

@ -90,8 +90,7 @@ static struct mfi_command * mfip_start(void *);
static void mfip_done(struct mfi_command *cm);
static int mfi_allow_disks = 0;
TUNABLE_INT("hw.mfi.allow_cam_disk_passthrough", &mfi_allow_disks);
SYSCTL_INT(_hw_mfi, OID_AUTO, allow_cam_disk_passthrough, CTLFLAG_RD,
SYSCTL_INT(_hw_mfi, OID_AUTO, allow_cam_disk_passthrough, CTLFLAG_RDTUN,
&mfi_allow_disks, 0, "event message locale");
static devclass_t mfip_devclass;

View File

@ -108,12 +108,10 @@ DRIVER_MODULE(mfi, pci, mfi_pci_driver, mfi_devclass, 0, 0);
MODULE_VERSION(mfi, 1);
static int mfi_msi = 1;
TUNABLE_INT("hw.mfi.msi", &mfi_msi);
SYSCTL_INT(_hw_mfi, OID_AUTO, msi, CTLFLAG_RDTUN, &mfi_msi, 0,
"Enable use of MSI interrupts");
static int mfi_mrsas_enable = 0;
TUNABLE_INT("hw.mfi.mrsas_enable", &mfi_mrsas_enable);
static int mfi_mrsas_enable;
SYSCTL_INT(_hw_mfi, OID_AUTO, mrsas_enable, CTLFLAG_RDTUN, &mfi_mrsas_enable,
0, "Allow mrasas to take newer cards");
@ -186,7 +184,6 @@ mfi_pci_probe(device_t dev)
device_set_desc(dev, id->desc);
/* give priority to mrsas if tunable set */
TUNABLE_INT_FETCH("hw.mfi.mrsas_enable", &mfi_mrsas_enable);
if ((id->flags & MFI_FLAGS_MRSAS) && mfi_mrsas_enable)
return (BUS_PROBE_LOW_PRIORITY);
else

View File

@ -86,7 +86,6 @@ static void mfi_queue_map_sync(struct mfi_softc *sc);
extern int mfi_polled_cmd_timeout;
static int mfi_fw_reset_test = 0;
#ifdef MFI_DEBUG
TUNABLE_INT("hw.mfi.fw_reset_test", &mfi_fw_reset_test);
SYSCTL_INT(_hw_mfi, OID_AUTO, fw_reset_test, CTLFLAG_RWTUN, &mfi_fw_reset_test,
0, "Force a firmware reset condition");
#endif

View File

@ -188,31 +188,25 @@ static int mwl_rxdesc = MWL_RXDESC; /* # rx desc's to allocate */
SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
0, "rx descriptors allocated");
static int mwl_rxbuf = MWL_RXBUF; /* # rx buffers to allocate */
SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RW, &mwl_rxbuf,
SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf,
0, "rx buffers allocated");
TUNABLE_INT("hw.mwl.rxbuf", &mwl_rxbuf);
static int mwl_txbuf = MWL_TXBUF; /* # tx buffers to allocate */
SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RW, &mwl_txbuf,
SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf,
0, "tx buffers allocated");
TUNABLE_INT("hw.mwl.txbuf", &mwl_txbuf);
static int mwl_txcoalesce = 8; /* # tx packets to q before poking f/w*/
SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RW, &mwl_txcoalesce,
SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce,
0, "tx buffers to send at once");
TUNABLE_INT("hw.mwl.txcoalesce", &mwl_txcoalesce);
static int mwl_rxquota = MWL_RXBUF; /* # max buffers to process */
SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RW, &mwl_rxquota,
SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota,
0, "max rx buffers to process per interrupt");
TUNABLE_INT("hw.mwl.rxquota", &mwl_rxquota);
static int mwl_rxdmalow = 3; /* # min buffers for wakeup */
SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RW, &mwl_rxdmalow,
SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow,
0, "min free rx buffers before restarting traffic");
TUNABLE_INT("hw.mwl.rxdmalow", &mwl_rxdmalow);
#ifdef MWL_DEBUG
static int mwl_debug = 0;
SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RW, &mwl_debug,
SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug,
0, "control debugging printfs");
TUNABLE_INT("hw.mwl.debug", &mwl_debug);
enum {
MWL_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
MWL_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */

Some files were not shown because too many files have changed in this diff Show More