Revert r267961, r267973:

These changes prevent sysctl(8) from returning proper output,
such as:

 1) no output from sysctl(8)
 2) erroneously returning ENOMEM with tools like truss(1)
    or uname(1)
 truss: can not get etype: Cannot allocate memory
This commit is contained in:
Glen Barber 2014-06-27 22:05:21 +00:00
parent d2f1b8f4d2
commit 37a107a407
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=267985
263 changed files with 1388 additions and 802 deletions

View File

@ -45,8 +45,9 @@ __FBSDID("$FreeBSD$");
#include <machine/nexusvar.h> #include <machine/nexusvar.h>
int acpi_resume_beep; int acpi_resume_beep;
SYSCTL_INT(_debug_acpi, OID_AUTO, resume_beep, CTLFLAG_RWTUN, TUNABLE_INT("debug.acpi.resume_beep", &acpi_resume_beep);
&acpi_resume_beep, 0, "Beep the PC speaker when resuming"); SYSCTL_INT(_debug_acpi, OID_AUTO, resume_beep, CTLFLAG_RW, &acpi_resume_beep,
0, "Beep the PC speaker when resuming");
int acpi_reset_video; int acpi_reset_video;
TUNABLE_INT("hw.acpi.reset_video", &acpi_reset_video); TUNABLE_INT("hw.acpi.reset_video", &acpi_reset_video);

View File

@ -69,6 +69,7 @@ static char *mem_owner_bios = "BIOS";
(((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK)) (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
static int mtrrs_disabled; static int mtrrs_disabled;
TUNABLE_INT("machdep.disable_mtrrs", &mtrrs_disabled);
SYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RDTUN, SYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RDTUN,
&mtrrs_disabled, 0, "Disable amd64 MTRRs."); &mtrrs_disabled, 0, "Disable amd64 MTRRs.");

View File

@ -675,7 +675,8 @@ cpu_halt(void)
void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */ void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */
static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */ static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */
static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */ static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RWTUN, &idle_mwait, TUNABLE_INT("machdep.idle_mwait", &idle_mwait);
SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RW, &idle_mwait,
0, "Use MONITOR/MWAIT for short idle"); 0, "Use MONITOR/MWAIT for short idle");
#define STATE_RUNNING 0x0 #define STATE_RUNNING 0x0

View File

@ -68,7 +68,8 @@ static int watchdog_dontfire = 1;
static int watchdog_timer = -1; static int watchdog_timer = -1;
static int watchdog_nmi = 1; static int watchdog_nmi = 1;
SYSCTL_INT(_debug, OID_AUTO, watchdog_nmi, CTLFLAG_RWTUN, &watchdog_nmi, 0, TUNABLE_INT("debug.watchdog", &watchdog_cpu);
SYSCTL_INT(_debug, OID_AUTO, watchdog_nmi, CTLFLAG_RW, &watchdog_nmi, 0,
"IPI the boot processor with an NMI to enter the debugger"); "IPI the boot processor with an NMI to enter the debugger");
static struct callout watchdog_callout; static struct callout watchdog_callout;

View File

@ -332,8 +332,8 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD, &pat_works, 1,
"Is page attribute table fully functional?"); "Is page attribute table fully functional?");
static int pg_ps_enabled = 1; static int pg_ps_enabled = 1;
SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN, &pg_ps_enabled, 0,
&pg_ps_enabled, 0, "Are large page mappings enabled?"); "Are large page mappings enabled?");
#define PAT_INDEX_SIZE 8 #define PAT_INDEX_SIZE 8
static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */ static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */
@ -368,8 +368,8 @@ static int pmap_flags = PMAP_PDE_SUPERPAGE; /* flags for x86 pmaps */
static struct unrhdr pcid_unr; static struct unrhdr pcid_unr;
static struct mtx pcid_mtx; static struct mtx pcid_mtx;
int pmap_pcid_enabled = 0; int pmap_pcid_enabled = 0;
SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_enabled, CTLFLAG_RDTUN, &pmap_pcid_enabled,
&pmap_pcid_enabled, 0, "Is TLB Context ID enabled ?"); 0, "Is TLB Context ID enabled ?");
int invpcid_works = 0; int invpcid_works = 0;
SYSCTL_INT(_vm_pmap, OID_AUTO, invpcid_works, CTLFLAG_RD, &invpcid_works, 0, SYSCTL_INT(_vm_pmap, OID_AUTO, invpcid_works, CTLFLAG_RD, &invpcid_works, 0,
"Is the invpcid instruction available ?"); "Is the invpcid instruction available ?");

View File

@ -73,6 +73,7 @@ static void
max_ldt_segment_init(void *arg __unused) max_ldt_segment_init(void *arg __unused)
{ {
TUNABLE_INT_FETCH("machdep.max_ldt_segment", &max_ldt_segment);
if (max_ldt_segment <= 0) if (max_ldt_segment <= 0)
max_ldt_segment = 1; max_ldt_segment = 1;
if (max_ldt_segment > MAX_LD) if (max_ldt_segment > MAX_LD)

View File

@ -143,18 +143,20 @@ static char *trap_msg[] = {
#ifdef KDB #ifdef KDB
static int kdb_on_nmi = 1; static int kdb_on_nmi = 1;
SYSCTL_INT(_machdep, OID_AUTO, kdb_on_nmi, CTLFLAG_RWTUN, SYSCTL_INT(_machdep, OID_AUTO, kdb_on_nmi, CTLFLAG_RW,
&kdb_on_nmi, 0, "Go to KDB on NMI"); &kdb_on_nmi, 0, "Go to KDB on NMI");
TUNABLE_INT("machdep.kdb_on_nmi", &kdb_on_nmi);
#endif #endif
static int panic_on_nmi = 1; static int panic_on_nmi = 1;
SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RWTUN, SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
&panic_on_nmi, 0, "Panic on NMI"); &panic_on_nmi, 0, "Panic on NMI");
TUNABLE_INT("machdep.panic_on_nmi", &panic_on_nmi);
static int prot_fault_translation; static int prot_fault_translation;
SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN, SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RW,
&prot_fault_translation, 0, &prot_fault_translation, 0,
"Select signal to deliver on protection fault"); "Select signal to deliver on protection fault");
static int uprintf_signal; static int uprintf_signal;
SYSCTL_INT(_machdep, OID_AUTO, uprintf_signal, CTLFLAG_RWTUN, SYSCTL_INT(_machdep, OID_AUTO, uprintf_signal, CTLFLAG_RW,
&uprintf_signal, 0, &uprintf_signal, 0,
"Print debugging information on trap signal to ctty"); "Print debugging information on trap signal to ctty");

View File

@ -65,6 +65,7 @@ static int pcie_minbus, pcie_maxbus;
static uint32_t pcie_badslots; static uint32_t pcie_badslots;
static struct mtx pcicfg_mtx; static struct mtx pcicfg_mtx;
static int mcfg_enable = 1; static int mcfg_enable = 1;
TUNABLE_INT("hw.pci.mcfg", &mcfg_enable);
SYSCTL_INT(_hw_pci, OID_AUTO, mcfg, CTLFLAG_RDTUN, &mcfg_enable, 0, SYSCTL_INT(_hw_pci, OID_AUTO, mcfg, CTLFLAG_RDTUN, &mcfg_enable, 0,
"Enable support for PCI-e memory mapped config access"); "Enable support for PCI-e memory mapped config access");

View File

@ -199,6 +199,7 @@ SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
* interrupts disabled. * interrupts disabled.
*/ */
static int halt_detection_enabled = 1; static int halt_detection_enabled = 1;
TUNABLE_INT("hw.vmm.halt_detection", &halt_detection_enabled);
SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN, SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN,
&halt_detection_enabled, 0, &halt_detection_enabled, 0,
"Halt VM if all vcpus execute HLT with interrupts disabled"); "Halt VM if all vcpus execute HLT with interrupts disabled");

View File

@ -224,10 +224,10 @@ busdma_init(void *dummy)
/* /*
* This init historically used SI_SUB_VM, but now the init code requires * This init historically used SI_SUB_VM, but now the init code requires
* malloc(9) using M_DEVBUF memory, which is set up later than SI_SUB_VM, by * malloc(9) using M_DEVBUF memory, which is set up later than SI_SUB_VM, by
* SI_SUB_KMEM and SI_ORDER_THIRD, so we'll go right after that by using * SI_SUB_KMEM and SI_ORDER_SECOND, so we'll go right after that by using
* SI_SUB_KMEM and SI_ORDER_FOURTH. * SI_SUB_KMEM and SI_ORDER_THIRD.
*/ */
SYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_FOURTH, busdma_init, NULL); SYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_THIRD, busdma_init, NULL);
static __inline int static __inline int
_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) _bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)

View File

@ -276,10 +276,10 @@ busdma_init(void *dummy)
/* /*
* This init historically used SI_SUB_VM, but now the init code requires * This init historically used SI_SUB_VM, but now the init code requires
* malloc(9) using M_DEVBUF memory, which is set up later than SI_SUB_VM, by * malloc(9) using M_DEVBUF memory, which is set up later than SI_SUB_VM, by
* SI_SUB_KMEM and SI_ORDER_THIRD, so we'll go right after that by using * SI_SUB_KMEM and SI_ORDER_SECOND, so we'll go right after that by using
* SI_SUB_KMEM and SI_ORDER_FOURTH. * SI_SUB_KMEM and SI_ORDER_THIRD.
*/ */
SYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_FOURTH, busdma_init, NULL); SYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_THIRD, busdma_init, NULL);
/* /*
* End block of code useful to transplant to other implementations. * End block of code useful to transplant to other implementations.

View File

@ -50,7 +50,8 @@ __FBSDID("$FreeBSD$");
CTASSERT(sizeof(struct kerneldumpheader) == 512); CTASSERT(sizeof(struct kerneldumpheader) == 512);
int do_minidump = 1; int do_minidump = 1;
SYSCTL_INT(_debug, OID_AUTO, minidump, CTLFLAG_RWTUN, &do_minidump, 0, TUNABLE_INT("debug.minidump", &do_minidump);
SYSCTL_INT(_debug, OID_AUTO, minidump, CTLFLAG_RW, &do_minidump, 0,
"Enable mini crash dumps"); "Enable mini crash dumps");
/* /*

View File

@ -65,8 +65,8 @@ static platform_t plat_obj;
static struct kobj_ops plat_kernel_kops; static struct kobj_ops plat_kernel_kops;
static struct platform_kobj plat_kernel_obj; static struct platform_kobj plat_kernel_obj;
static char plat_name[64]; static char plat_name[64] = "";
SYSCTL_STRING(_hw, OID_AUTO, platform, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, plat_name, 0, SYSCTL_STRING(_hw, OID_AUTO, platform, CTLFLAG_RDTUN, plat_name, 0,
"Platform currently in use"); "Platform currently in use");
/* /*

View File

@ -465,7 +465,7 @@ static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
/* Superpages utilization enabled = 1 / disabled = 0 */ /* Superpages utilization enabled = 1 / disabled = 0 */
static int sp_enabled = 1; static int sp_enabled = 1;
SYSCTL_INT(_vm_pmap, OID_AUTO, sp_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &sp_enabled, 0, SYSCTL_INT(_vm_pmap, OID_AUTO, sp_enabled, CTLFLAG_RDTUN, &sp_enabled, 0,
"Are large page mappings enabled?"); "Are large page mappings enabled?");
SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,

View File

@ -375,12 +375,12 @@ cpufreq_initialize(struct imx6_anatop_softc *sc)
"CPU frequency"); "CPU frequency");
SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_imx6), SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_imx6),
OID_AUTO, "cpu_minmhz", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NOFETCH, OID_AUTO, "cpu_minmhz", CTLTYPE_INT | CTLFLAG_RWTUN, sc, 0,
sc, 0, cpufreq_sysctl_minmhz, "IU", "Minimum CPU frequency"); cpufreq_sysctl_minmhz, "IU", "Minimum CPU frequency");
SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_imx6), SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_imx6),
OID_AUTO, "cpu_maxmhz", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NOFETCH, OID_AUTO, "cpu_maxmhz", CTLTYPE_INT | CTLFLAG_RWTUN, sc, 0,
sc, 0, cpufreq_sysctl_maxmhz, "IU", "Maximum CPU frequency"); cpufreq_sysctl_maxmhz, "IU", "Maximum CPU frequency");
SYSCTL_ADD_INT(NULL, SYSCTL_STATIC_CHILDREN(_hw_imx6), SYSCTL_ADD_INT(NULL, SYSCTL_STATIC_CHILDREN(_hw_imx6),
OID_AUTO, "cpu_maxmhz_hw", CTLFLAG_RD, &sc->cpu_maxmhz_hw, 0, OID_AUTO, "cpu_maxmhz_hw", CTLFLAG_RD, &sc->cpu_maxmhz_hw, 0,
@ -413,6 +413,9 @@ cpufreq_initialize(struct imx6_anatop_softc *sc)
sc->cpu_maxmhz_hw = imx6_ocotp_mhz_tab[cfg3speed]; sc->cpu_maxmhz_hw = imx6_ocotp_mhz_tab[cfg3speed];
sc->cpu_maxmhz = sc->cpu_maxmhz_hw; sc->cpu_maxmhz = sc->cpu_maxmhz_hw;
TUNABLE_INT_FETCH("hw.imx6.cpu_overclock_enable",
&sc->cpu_overclock_enable);
TUNABLE_INT_FETCH("hw.imx6.cpu_minmhz", &sc->cpu_minmhz); TUNABLE_INT_FETCH("hw.imx6.cpu_minmhz", &sc->cpu_minmhz);
op = cpufreq_nearest_oppt(sc, sc->cpu_minmhz); op = cpufreq_nearest_oppt(sc, sc->cpu_minmhz);
sc->cpu_minmhz = op->mhz; sc->cpu_minmhz = op->mhz;

View File

@ -255,8 +255,9 @@ static SYSCTL_NODE(_hw, OID_AUTO, npe, CTLFLAG_RD, 0,
"IXP4XX NPE driver parameters"); "IXP4XX NPE driver parameters");
static int npe_debug = 0; static int npe_debug = 0;
SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RWTUN, &npe_debug, SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RW, &npe_debug,
0, "IXP4XX NPE network interface debug msgs"); 0, "IXP4XX NPE network interface debug msgs");
TUNABLE_INT("hw.npe.debug", &npe_debug);
#define DPRINTF(sc, fmt, ...) do { \ #define DPRINTF(sc, fmt, ...) do { \
if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__); \ if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__); \
} while (0) } while (0)
@ -264,15 +265,18 @@ SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RWTUN, &npe_debug,
if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\ if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\
} while (0) } while (0)
static int npe_tickinterval = 3; /* npe_tick frequency (secs) */ static int npe_tickinterval = 3; /* npe_tick frequency (secs) */
SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RDTUN, &npe_tickinterval, SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RD, &npe_tickinterval,
0, "periodic work interval (secs)"); 0, "periodic work interval (secs)");
TUNABLE_INT("hw.npe.tickinterval", &npe_tickinterval);
static int npe_rxbuf = 64; /* # rx buffers to allocate */ static int npe_rxbuf = 64; /* # rx buffers to allocate */
SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RDTUN, &npe_rxbuf, SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RD, &npe_rxbuf,
0, "rx buffers allocated"); 0, "rx buffers allocated");
TUNABLE_INT("hw.npe.rxbuf", &npe_rxbuf);
static int npe_txbuf = 128; /* # tx buffers to allocate */ static int npe_txbuf = 128; /* # tx buffers to allocate */
SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RDTUN, &npe_txbuf, SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RD, &npe_txbuf,
0, "tx buffers allocated"); 0, "tx buffers allocated");
TUNABLE_INT("hw.npe.txbuf", &npe_txbuf);
static int static int
unit2npeid(int unit) unit2npeid(int unit)

View File

@ -181,8 +181,9 @@ typedef struct {
} IxNpeDlNpeMgrStateInfoBlock; } IxNpeDlNpeMgrStateInfoBlock;
static int npe_debug = 0; static int npe_debug = 0;
SYSCTL_INT(_debug, OID_AUTO, ixp425npe, CTLFLAG_RWTUN, &npe_debug, SYSCTL_INT(_debug, OID_AUTO, ixp425npe, CTLFLAG_RW, &npe_debug,
0, "IXP4XX NPE debug msgs"); 0, "IXP4XX NPE debug msgs");
TUNABLE_INT("debug.ixp425npe", &npe_debug);
#define DPRINTF(dev, fmt, ...) do { \ #define DPRINTF(dev, fmt, ...) do { \
if (npe_debug) device_printf(dev, fmt, __VA_ARGS__); \ if (npe_debug) device_printf(dev, fmt, __VA_ARGS__); \
} while (0) } while (0)

View File

@ -159,9 +159,10 @@ struct ixpqmgr_softc {
uint32_t aqmFreeSramAddress; /* SRAM free space */ uint32_t aqmFreeSramAddress; /* SRAM free space */
}; };
static int qmgr_debug; static int qmgr_debug = 0;
SYSCTL_INT(_debug, OID_AUTO, qmgr, CTLFLAG_RWTUN, &qmgr_debug, SYSCTL_INT(_debug, OID_AUTO, qmgr, CTLFLAG_RW, &qmgr_debug,
0, "IXP4XX Q-Manager debug msgs"); 0, "IXP4XX Q-Manager debug msgs");
TUNABLE_INT("debug.qmgr", &qmgr_debug);
#define DPRINTF(dev, fmt, ...) do { \ #define DPRINTF(dev, fmt, ...) do { \
if (qmgr_debug) printf(fmt, __VA_ARGS__); \ if (qmgr_debug) printf(fmt, __VA_ARGS__); \
} while (0) } while (0)

View File

@ -546,22 +546,30 @@ static int ada_write_cache = ADA_DEFAULT_WRITE_CACHE;
static SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0, static SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
"CAM Direct Access Disk driver"); "CAM Direct Access Disk driver");
SYSCTL_INT(_kern_cam_ada, OID_AUTO, legacy_aliases, CTLFLAG_RWTUN, SYSCTL_INT(_kern_cam_ada, OID_AUTO, legacy_aliases, CTLFLAG_RW,
&ada_legacy_aliases, 0, "Create legacy-like device aliases"); &ada_legacy_aliases, 0, "Create legacy-like device aliases");
SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RWTUN, TUNABLE_INT("kern.cam.ada.legacy_aliases", &ada_legacy_aliases);
SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW,
&ada_retry_count, 0, "Normal I/O retry count"); &ada_retry_count, 0, "Normal I/O retry count");
SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RWTUN, TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count);
SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW,
&ada_default_timeout, 0, "Normal I/O timeout (in seconds)"); &ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
SYSCTL_INT(_kern_cam_ada, OID_AUTO, send_ordered, CTLFLAG_RWTUN, TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout);
SYSCTL_INT(_kern_cam_ada, OID_AUTO, send_ordered, CTLFLAG_RW,
&ada_send_ordered, 0, "Send Ordered Tags"); &ada_send_ordered, 0, "Send Ordered Tags");
SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RWTUN, TUNABLE_INT("kern.cam.ada.send_ordered", &ada_send_ordered);
SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RW,
&ada_spindown_shutdown, 0, "Spin down upon shutdown"); &ada_spindown_shutdown, 0, "Spin down upon shutdown");
SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_suspend, CTLFLAG_RWTUN, TUNABLE_INT("kern.cam.ada.spindown_shutdown", &ada_spindown_shutdown);
SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_suspend, CTLFLAG_RW,
&ada_spindown_suspend, 0, "Spin down upon suspend"); &ada_spindown_suspend, 0, "Spin down upon suspend");
SYSCTL_INT(_kern_cam_ada, OID_AUTO, read_ahead, CTLFLAG_RWTUN, TUNABLE_INT("kern.cam.ada.spindown_suspend", &ada_spindown_suspend);
SYSCTL_INT(_kern_cam_ada, OID_AUTO, read_ahead, CTLFLAG_RW,
&ada_read_ahead, 0, "Enable disk read-ahead"); &ada_read_ahead, 0, "Enable disk read-ahead");
SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RWTUN, TUNABLE_INT("kern.cam.ada.read_ahead", &ada_read_ahead);
SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RW,
&ada_write_cache, 0, "Enable disk write cache"); &ada_write_cache, 0, "Enable disk write cache");
TUNABLE_INT("kern.cam.ada.write_cache", &ada_write_cache);
/* /*
* ADA_ORDEREDTAG_INTERVAL determines how often, relative * ADA_ORDEREDTAG_INTERVAL determines how often, relative

View File

@ -139,12 +139,15 @@ static int pmp_hide_special = PMP_DEFAULT_HIDE_SPECIAL;
static SYSCTL_NODE(_kern_cam, OID_AUTO, pmp, CTLFLAG_RD, 0, static SYSCTL_NODE(_kern_cam, OID_AUTO, pmp, CTLFLAG_RD, 0,
"CAM Direct Access Disk driver"); "CAM Direct Access Disk driver");
SYSCTL_INT(_kern_cam_pmp, OID_AUTO, retry_count, CTLFLAG_RWTUN, SYSCTL_INT(_kern_cam_pmp, OID_AUTO, retry_count, CTLFLAG_RW,
&pmp_retry_count, 0, "Normal I/O retry count"); &pmp_retry_count, 0, "Normal I/O retry count");
SYSCTL_INT(_kern_cam_pmp, OID_AUTO, default_timeout, CTLFLAG_RWTUN, TUNABLE_INT("kern.cam.pmp.retry_count", &pmp_retry_count);
SYSCTL_INT(_kern_cam_pmp, OID_AUTO, default_timeout, CTLFLAG_RW,
&pmp_default_timeout, 0, "Normal I/O timeout (in seconds)"); &pmp_default_timeout, 0, "Normal I/O timeout (in seconds)");
SYSCTL_INT(_kern_cam_pmp, OID_AUTO, hide_special, CTLFLAG_RWTUN, TUNABLE_INT("kern.cam.pmp.default_timeout", &pmp_default_timeout);
SYSCTL_INT(_kern_cam_pmp, OID_AUTO, hide_special, CTLFLAG_RW,
&pmp_hide_special, 0, "Hide extra ports"); &pmp_hide_special, 0, "Hide extra ports");
TUNABLE_INT("kern.cam.pmp.hide_special", &pmp_hide_special);
static struct periph_driver pmpdriver = static struct periph_driver pmpdriver =
{ {

View File

@ -116,6 +116,7 @@ SYSCTL_NODE(_kern, OID_AUTO, cam, CTLFLAG_RD, 0, "CAM Subsystem");
#endif #endif
int cam_sort_io_queues = CAM_DEFAULT_SORT_IO_QUEUES; int cam_sort_io_queues = CAM_DEFAULT_SORT_IO_QUEUES;
TUNABLE_INT("kern.cam.sort_io_queues", &cam_sort_io_queues);
SYSCTL_INT(_kern_cam, OID_AUTO, sort_io_queues, CTLFLAG_RWTUN, SYSCTL_INT(_kern_cam, OID_AUTO, sort_io_queues, CTLFLAG_RWTUN,
&cam_sort_io_queues, 0, "Sort IO queues to try and optimise disk access patterns"); &cam_sort_io_queues, 0, "Sort IO queues to try and optimise disk access patterns");
#endif #endif

View File

@ -149,6 +149,7 @@ typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
/* Transport layer configuration information */ /* Transport layer configuration information */
static struct xpt_softc xsoftc; static struct xpt_softc xsoftc;
TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay);
SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN, SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
&xsoftc.boot_delay, 0, "Bus registration wait time"); &xsoftc.boot_delay, 0, "Bus registration wait time");
@ -162,6 +163,7 @@ static struct cam_doneq cam_doneqs[MAXCPU];
static int cam_num_doneqs; static int cam_num_doneqs;
static struct proc *cam_proc; static struct proc *cam_proc;
TUNABLE_INT("kern.cam.num_doneqs", &cam_num_doneqs);
SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN, SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
&cam_num_doneqs, 0, "Number of completion queues/threads"); &cam_num_doneqs, 0, "Number of completion queues/threads");
@ -195,10 +197,12 @@ static struct cdevsw xpt_cdevsw = {
/* Storage for debugging datastructures */ /* Storage for debugging datastructures */
struct cam_path *cam_dpath; struct cam_path *cam_dpath;
u_int32_t cam_dflags = CAM_DEBUG_FLAGS; u_int32_t cam_dflags = CAM_DEBUG_FLAGS;
SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN, TUNABLE_INT("kern.cam.dflags", &cam_dflags);
SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RW,
&cam_dflags, 0, "Enabled debug flags"); &cam_dflags, 0, "Enabled debug flags");
u_int32_t cam_debug_delay = CAM_DEBUG_DELAY; u_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN, TUNABLE_INT("kern.cam.debug_delay", &cam_debug_delay);
SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RW,
&cam_debug_delay, 0, "Delay in us after each debug message"); &cam_debug_delay, 0, "Delay in us after each debug message");
/* Our boot-time initialization hook */ /* Our boot-time initialization hook */

View File

@ -308,9 +308,11 @@ static int index_to_aps_page;
SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer");
static int worker_threads = -1; static int worker_threads = -1;
TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads);
SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN,
&worker_threads, 1, "Number of worker threads"); &worker_threads, 1, "Number of worker threads");
static int verbose = 0; static int verbose = 0;
TUNABLE_INT("kern.cam.ctl.verbose", &verbose);
SYSCTL_INT(_kern_cam_ctl, OID_AUTO, verbose, CTLFLAG_RWTUN, SYSCTL_INT(_kern_cam_ctl, OID_AUTO, verbose, CTLFLAG_RWTUN,
&verbose, 0, "Show SCSI errors returned to initiator"); &verbose, 0, "Show SCSI errors returned to initiator");

View File

@ -220,9 +220,10 @@ struct ctl_be_block_io {
}; };
static int cbb_num_threads = 14; static int cbb_num_threads = 14;
TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads);
SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0, SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0,
"CAM Target Layer Block Backend"); "CAM Target Layer Block Backend");
SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RWTUN, SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RW,
&cbb_num_threads, 0, "Number of threads per backing file"); &cbb_num_threads, 0, "Number of threads per backing file");
static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc); static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc);

View File

@ -85,15 +85,19 @@ static uma_zone_t cfiscsi_data_wait_zone;
SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, iscsi, CTLFLAG_RD, 0, SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, iscsi, CTLFLAG_RD, 0,
"CAM Target Layer iSCSI Frontend"); "CAM Target Layer iSCSI Frontend");
static int debug = 3; static int debug = 3;
TUNABLE_INT("kern.cam.ctl.iscsi.debug", &debug);
SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, debug, CTLFLAG_RWTUN, SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, debug, CTLFLAG_RWTUN,
&debug, 1, "Enable debug messages"); &debug, 1, "Enable debug messages");
static int ping_timeout = 5; static int ping_timeout = 5;
TUNABLE_INT("kern.cam.ctl.iscsi.ping_timeout", &ping_timeout);
SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, ping_timeout, CTLFLAG_RWTUN, SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, ping_timeout, CTLFLAG_RWTUN,
&ping_timeout, 5, "Interval between ping (NOP-Out) requests, in seconds"); &ping_timeout, 5, "Interval between ping (NOP-Out) requests, in seconds");
static int login_timeout = 60; static int login_timeout = 60;
TUNABLE_INT("kern.cam.ctl.iscsi.login_timeout", &login_timeout);
SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, login_timeout, CTLFLAG_RWTUN, SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, login_timeout, CTLFLAG_RWTUN,
&login_timeout, 60, "Time to wait for ctld(8) to finish Login Phase, in seconds"); &login_timeout, 60, "Time to wait for ctld(8) to finish Login Phase, in seconds");
static int maxcmdsn_delta = 256; static int maxcmdsn_delta = 256;
TUNABLE_INT("kern.cam.ctl.iscsi.maxcmdsn_delta", &maxcmdsn_delta);
SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, maxcmdsn_delta, CTLFLAG_RWTUN, SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, maxcmdsn_delta, CTLFLAG_RWTUN,
&maxcmdsn_delta, 256, "Number of commands the initiator can send " &maxcmdsn_delta, 256, "Number of commands the initiator can send "
"without confirmation"); "without confirmation");

View File

@ -277,12 +277,15 @@ static int cd_retry_count = CD_DEFAULT_RETRY;
static int cd_timeout = CD_DEFAULT_TIMEOUT; static int cd_timeout = CD_DEFAULT_TIMEOUT;
static SYSCTL_NODE(_kern_cam, OID_AUTO, cd, CTLFLAG_RD, 0, "CAM CDROM driver"); static SYSCTL_NODE(_kern_cam, OID_AUTO, cd, CTLFLAG_RD, 0, "CAM CDROM driver");
SYSCTL_INT(_kern_cam_cd, OID_AUTO, poll_period, CTLFLAG_RWTUN, SYSCTL_INT(_kern_cam_cd, OID_AUTO, poll_period, CTLFLAG_RW,
&cd_poll_period, 0, "Media polling period in seconds"); &cd_poll_period, 0, "Media polling period in seconds");
SYSCTL_INT(_kern_cam_cd, OID_AUTO, retry_count, CTLFLAG_RWTUN, TUNABLE_INT("kern.cam.cd.poll_period", &cd_poll_period);
SYSCTL_INT(_kern_cam_cd, OID_AUTO, retry_count, CTLFLAG_RW,
&cd_retry_count, 0, "Normal I/O retry count"); &cd_retry_count, 0, "Normal I/O retry count");
SYSCTL_INT(_kern_cam_cd, OID_AUTO, timeout, CTLFLAG_RWTUN, TUNABLE_INT("kern.cam.cd.retry_count", &cd_retry_count);
SYSCTL_INT(_kern_cam_cd, OID_AUTO, timeout, CTLFLAG_RW,
&cd_timeout, 0, "Timeout, in us, for read operations"); &cd_timeout, 0, "Timeout, in us, for read operations");
TUNABLE_INT("kern.cam.cd.timeout", &cd_timeout);
static MALLOC_DEFINE(M_SCSICD, "scsi_cd", "scsi_cd buffers"); static MALLOC_DEFINE(M_SCSICD, "scsi_cd", "scsi_cd buffers");

View File

@ -1188,14 +1188,18 @@ static int da_send_ordered = DA_DEFAULT_SEND_ORDERED;
static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0, static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0,
"CAM Direct Access Disk driver"); "CAM Direct Access Disk driver");
SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN, SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RW,
&da_poll_period, 0, "Media polling period in seconds"); &da_poll_period, 0, "Media polling period in seconds");
SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN, TUNABLE_INT("kern.cam.da.poll_period", &da_poll_period);
SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RW,
&da_retry_count, 0, "Normal I/O retry count"); &da_retry_count, 0, "Normal I/O retry count");
SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN, TUNABLE_INT("kern.cam.da.retry_count", &da_retry_count);
SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RW,
&da_default_timeout, 0, "Normal I/O timeout (in seconds)"); &da_default_timeout, 0, "Normal I/O timeout (in seconds)");
SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN, TUNABLE_INT("kern.cam.da.default_timeout", &da_default_timeout);
SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RW,
&da_send_ordered, 0, "Send Ordered Tags"); &da_send_ordered, 0, "Send Ordered Tags");
TUNABLE_INT("kern.cam.da.send_ordered", &da_send_ordered);
/* /*
* DA_ORDEREDTAG_INTERVAL determines how often, relative * DA_ORDEREDTAG_INTERVAL determines how often, relative

View File

@ -226,8 +226,9 @@ static char *safte_2little = "Too Little Data Returned (%d) at line %d\n";
int emulate_array_devices = 1; int emulate_array_devices = 1;
SYSCTL_DECL(_kern_cam_enc); SYSCTL_DECL(_kern_cam_enc);
SYSCTL_INT(_kern_cam_enc, OID_AUTO, emulate_array_devices, CTLFLAG_RWTUN, SYSCTL_INT(_kern_cam_enc, OID_AUTO, emulate_array_devices, CTLFLAG_RW,
&emulate_array_devices, 0, "Emulate Array Devices for SAF-TE"); &emulate_array_devices, 0, "Emulate Array Devices for SAF-TE");
TUNABLE_INT("kern.cam.enc.emulate_array_devices", &emulate_array_devices);
static int static int
safte_fill_read_buf_io(enc_softc_t *enc, struct enc_fsm_state *state, safte_fill_read_buf_io(enc_softc_t *enc, struct enc_fsm_state *state,

View File

@ -445,10 +445,9 @@ static int sa_allow_io_split = SA_DEFAULT_IO_SPLIT;
* is bad behavior, because it hides the true tape block size from the * is bad behavior, because it hides the true tape block size from the
* application. * application.
*/ */
TUNABLE_INT("kern.cam.sa.allow_io_split", &sa_allow_io_split);
static SYSCTL_NODE(_kern_cam, OID_AUTO, sa, CTLFLAG_RD, 0, static SYSCTL_NODE(_kern_cam, OID_AUTO, sa, CTLFLAG_RD, 0,
"CAM Sequential Access Tape Driver"); "CAM Sequential Access Tape Driver");
SYSCTL_INT(_kern_cam_sa, OID_AUTO, allow_io_split, CTLFLAG_RDTUN,
&sa_allow_io_split, 0, "Default I/O split value");
static struct periph_driver sadriver = static struct periph_driver sadriver =
{ {
@ -1495,7 +1494,7 @@ sasysctlinit(void *context, int pending)
goto bailout; goto bailout;
SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
OID_AUTO, "allow_io_split", CTLTYPE_INT | CTLFLAG_RDTUN | CTLFLAG_NOFETCH, OID_AUTO, "allow_io_split", CTLTYPE_INT | CTLFLAG_RDTUN,
&softc->allow_io_split, 0, "Allow Splitting I/O"); &softc->allow_io_split, 0, "Allow Splitting I/O");
SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
OID_AUTO, "maxio", CTLTYPE_INT | CTLFLAG_RD, OID_AUTO, "maxio", CTLTYPE_INT | CTLFLAG_RD,

View File

@ -78,8 +78,9 @@ struct scsi_quirk_entry {
#define SCSI_QUIRK(dev) ((struct scsi_quirk_entry *)((dev)->quirk)) #define SCSI_QUIRK(dev) ((struct scsi_quirk_entry *)((dev)->quirk))
static int cam_srch_hi = 0; static int cam_srch_hi = 0;
TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS); static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT | CTLFLAG_RWTUN, 0, 0, SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
sysctl_cam_search_luns, "I", sysctl_cam_search_luns, "I",
"allow search above LUN 7 for SCSI3 and greater devices"); "allow search above LUN 7 for SCSI3 and greater devices");

View File

@ -36,7 +36,7 @@ __FBSDID("$FreeBSD$");
static MALLOC_DEFINE(M_KSTAT, "kstat_data", "Kernel statistics"); static MALLOC_DEFINE(M_KSTAT, "kstat_data", "Kernel statistics");
SYSCTL_ROOT_NODE(OID_AUTO, kstat, CTLFLAG_RW, 0, "Kernel statistics"); SYSCTL_NODE(, OID_AUTO, kstat, CTLFLAG_RW, 0, "Kernel statistics");
kstat_t * kstat_t *
kstat_create(char *module, int instance, char *name, char *class, uchar_t type, kstat_create(char *module, int instance, char *name, char *class, uchar_t type,

View File

@ -204,6 +204,8 @@ int zfs_arc_shrink_shift = 0;
int zfs_arc_p_min_shift = 0; int zfs_arc_p_min_shift = 0;
int zfs_disable_dup_eviction = 0; int zfs_disable_dup_eviction = 0;
TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max);
TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min);
TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit); TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
SYSCTL_DECL(_vfs_zfs); SYSCTL_DECL(_vfs_zfs);
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0,

View File

@ -44,7 +44,8 @@ int zfs_dedup_prefetch = 1;
SYSCTL_DECL(_vfs_zfs); SYSCTL_DECL(_vfs_zfs);
SYSCTL_NODE(_vfs_zfs, OID_AUTO, dedup, CTLFLAG_RW, 0, "ZFS DEDUP"); SYSCTL_NODE(_vfs_zfs, OID_AUTO, dedup, CTLFLAG_RW, 0, "ZFS DEDUP");
SYSCTL_INT(_vfs_zfs_dedup, OID_AUTO, prefetch, CTLFLAG_RWTUN, &zfs_dedup_prefetch, TUNABLE_INT("vfs.zfs.dedup.prefetch", &zfs_dedup_prefetch);
SYSCTL_INT(_vfs_zfs_dedup, OID_AUTO, prefetch, CTLFLAG_RW, &zfs_dedup_prefetch,
0, "Enable/disable prefetching of dedup-ed blocks which are going to be freed"); 0, "Enable/disable prefetching of dedup-ed blocks which are going to be freed");
static const ddt_ops_t *ddt_ops[DDT_TYPES] = { static const ddt_ops_t *ddt_ops[DDT_TYPES] = {

View File

@ -54,6 +54,7 @@
*/ */
int zfs_nopwrite_enabled = 1; int zfs_nopwrite_enabled = 1;
SYSCTL_DECL(_vfs_zfs); SYSCTL_DECL(_vfs_zfs);
TUNABLE_INT("vfs.zfs.nopwrite_enabled", &zfs_nopwrite_enabled);
SYSCTL_INT(_vfs_zfs, OID_AUTO, nopwrite_enabled, CTLFLAG_RDTUN, SYSCTL_INT(_vfs_zfs, OID_AUTO, nopwrite_enabled, CTLFLAG_RDTUN,
&zfs_nopwrite_enabled, 0, "Enable nopwrite feature"); &zfs_nopwrite_enabled, 0, "Enable nopwrite feature");
@ -1625,7 +1626,8 @@ dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
} }
int zfs_mdcomp_disable = 0; int zfs_mdcomp_disable = 0;
SYSCTL_INT(_vfs_zfs, OID_AUTO, mdcomp_disable, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.mdcomp_disable", &zfs_mdcomp_disable);
SYSCTL_INT(_vfs_zfs, OID_AUTO, mdcomp_disable, CTLFLAG_RW,
&zfs_mdcomp_disable, 0, "Disable metadata compression"); &zfs_mdcomp_disable, 0, "Disable metadata compression");
/* /*

View File

@ -55,12 +55,16 @@ SYSCTL_DECL(_vfs_zfs);
SYSCTL_INT(_vfs_zfs, OID_AUTO, prefetch_disable, CTLFLAG_RW, SYSCTL_INT(_vfs_zfs, OID_AUTO, prefetch_disable, CTLFLAG_RW,
&zfs_prefetch_disable, 0, "Disable prefetch"); &zfs_prefetch_disable, 0, "Disable prefetch");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zfetch, CTLFLAG_RW, 0, "ZFS ZFETCH"); SYSCTL_NODE(_vfs_zfs, OID_AUTO, zfetch, CTLFLAG_RW, 0, "ZFS ZFETCH");
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_streams, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.zfetch.max_streams", &zfetch_max_streams);
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_streams, CTLFLAG_RW,
&zfetch_max_streams, 0, "Max # of streams per zfetch"); &zfetch_max_streams, 0, "Max # of streams per zfetch");
TUNABLE_INT("vfs.zfs.zfetch.min_sec_reap", &zfetch_min_sec_reap);
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, min_sec_reap, CTLFLAG_RDTUN, SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, min_sec_reap, CTLFLAG_RDTUN,
&zfetch_min_sec_reap, 0, "Min time before stream reclaim"); &zfetch_min_sec_reap, 0, "Min time before stream reclaim");
TUNABLE_INT("vfs.zfs.zfetch.block_cap", &zfetch_block_cap);
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, block_cap, CTLFLAG_RDTUN, SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, block_cap, CTLFLAG_RDTUN,
&zfetch_block_cap, 0, "Max number of blocks to fetch at a time"); &zfetch_block_cap, 0, "Max number of blocks to fetch at a time");
TUNABLE_QUAD("vfs.zfs.zfetch.array_rd_sz", &zfetch_array_rd_sz);
SYSCTL_UQUAD(_vfs_zfs_zfetch, OID_AUTO, array_rd_sz, CTLFLAG_RDTUN, SYSCTL_UQUAD(_vfs_zfs_zfetch, OID_AUTO, array_rd_sz, CTLFLAG_RDTUN,
&zfetch_array_rd_sz, 0, &zfetch_array_rd_sz, 0,
"Number of bytes in a array_read at which we stop prefetching"); "Number of bytes in a array_read at which we stop prefetching");

View File

@ -141,19 +141,23 @@ extern int zfs_vdev_async_write_active_max_dirty_percent;
SYSCTL_DECL(_vfs_zfs); SYSCTL_DECL(_vfs_zfs);
TUNABLE_QUAD("vfs.zfs.dirty_data_max", &zfs_dirty_data_max);
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, dirty_data_max, CTLFLAG_RWTUN, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, dirty_data_max, CTLFLAG_RWTUN,
&zfs_dirty_data_max, 0, &zfs_dirty_data_max, 0,
"The maximum amount of dirty data in bytes after which new writes are " "The maximum amount of dirty data in bytes after which new writes are "
"halted until space becomes available"); "halted until space becomes available");
TUNABLE_QUAD("vfs.zfs.dirty_data_max_max", &zfs_dirty_data_max_max);
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, dirty_data_max_max, CTLFLAG_RDTUN, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, dirty_data_max_max, CTLFLAG_RDTUN,
&zfs_dirty_data_max_max, 0, &zfs_dirty_data_max_max, 0,
"The absolute cap on dirty_data_max when auto calculating"); "The absolute cap on dirty_data_max when auto calculating");
TUNABLE_INT("vfs.zfs.dirty_data_max_percent", &zfs_dirty_data_max_percent);
SYSCTL_INT(_vfs_zfs, OID_AUTO, dirty_data_max_percent, CTLFLAG_RDTUN, SYSCTL_INT(_vfs_zfs, OID_AUTO, dirty_data_max_percent, CTLFLAG_RDTUN,
&zfs_dirty_data_max_percent, 0, &zfs_dirty_data_max_percent, 0,
"The percent of physical memory used to auto calculate dirty_data_max"); "The percent of physical memory used to auto calculate dirty_data_max");
TUNABLE_QUAD("vfs.zfs.dirty_data_sync", &zfs_dirty_data_sync);
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, dirty_data_sync, CTLFLAG_RWTUN, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, dirty_data_sync, CTLFLAG_RWTUN,
&zfs_dirty_data_sync, 0, &zfs_dirty_data_sync, 0,
"Force a txg if the number of dirty buffer bytes exceed this value"); "Force a txg if the number of dirty buffer bytes exceed this value");

View File

@ -71,23 +71,32 @@ boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable srub prefetching */ boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable srub prefetching */
SYSCTL_DECL(_vfs_zfs); SYSCTL_DECL(_vfs_zfs);
SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.top_maxinflight", &zfs_top_maxinflight);
SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight, CTLFLAG_RW,
&zfs_top_maxinflight, 0, "Maximum I/Os per top-level vdev"); &zfs_top_maxinflight, 0, "Maximum I/Os per top-level vdev");
SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_delay, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.resilver_delay", &zfs_resilver_delay);
SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_delay, CTLFLAG_RW,
&zfs_resilver_delay, 0, "Number of ticks to delay resilver"); &zfs_resilver_delay, 0, "Number of ticks to delay resilver");
SYSCTL_UINT(_vfs_zfs, OID_AUTO, scrub_delay, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.scrub_delay", &zfs_scrub_delay);
SYSCTL_UINT(_vfs_zfs, OID_AUTO, scrub_delay, CTLFLAG_RW,
&zfs_scrub_delay, 0, "Number of ticks to delay scrub"); &zfs_scrub_delay, 0, "Number of ticks to delay scrub");
SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_idle, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.scan_idle", &zfs_scan_idle);
SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_idle, CTLFLAG_RW,
&zfs_scan_idle, 0, "Idle scan window in clock ticks"); &zfs_scan_idle, 0, "Idle scan window in clock ticks");
SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_min_time_ms, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.scan_min_time_ms", &zfs_scan_min_time_ms);
SYSCTL_UINT(_vfs_zfs, OID_AUTO, scan_min_time_ms, CTLFLAG_RW,
&zfs_scan_min_time_ms, 0, "Min millisecs to scrub per txg"); &zfs_scan_min_time_ms, 0, "Min millisecs to scrub per txg");
SYSCTL_UINT(_vfs_zfs, OID_AUTO, free_min_time_ms, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.free_min_time_ms", &zfs_free_min_time_ms);
SYSCTL_UINT(_vfs_zfs, OID_AUTO, free_min_time_ms, CTLFLAG_RW,
&zfs_free_min_time_ms, 0, "Min millisecs to free per txg"); &zfs_free_min_time_ms, 0, "Min millisecs to free per txg");
SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_min_time_ms, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.resilver_min_time_ms", &zfs_resilver_min_time_ms);
SYSCTL_UINT(_vfs_zfs, OID_AUTO, resilver_min_time_ms, CTLFLAG_RW,
&zfs_resilver_min_time_ms, 0, "Min millisecs to resilver per txg"); &zfs_resilver_min_time_ms, 0, "Min millisecs to resilver per txg");
SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_io, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.no_scrub_io", &zfs_no_scrub_io);
SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_io, CTLFLAG_RW,
&zfs_no_scrub_io, 0, "Disable scrub I/O"); &zfs_no_scrub_io, 0, "Disable scrub I/O");
SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_prefetch, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.no_scrub_prefetch", &zfs_no_scrub_prefetch);
SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_prefetch, CTLFLAG_RW,
&zfs_no_scrub_prefetch, 0, "Disable scrub prefetching"); &zfs_no_scrub_prefetch, 0, "Disable scrub prefetching");
enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;

View File

@ -55,6 +55,7 @@ SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW, 0, "ZFS metaslab");
uint64_t metaslab_aliquot = 512ULL << 10; uint64_t metaslab_aliquot = 512ULL << 10;
uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */ uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
TUNABLE_QUAD("vfs.zfs.metaslab.gang_bang", &metaslab_gang_bang);
SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, gang_bang, CTLFLAG_RWTUN, SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, gang_bang, CTLFLAG_RWTUN,
&metaslab_gang_bang, 0, &metaslab_gang_bang, 0,
"Force gang block allocation for blocks larger than or equal to this value"); "Force gang block allocation for blocks larger than or equal to this value");
@ -66,6 +67,7 @@ SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, gang_bang, CTLFLAG_RWTUN,
* Values should be greater than or equal to 100. * Values should be greater than or equal to 100.
*/ */
int zfs_condense_pct = 200; int zfs_condense_pct = 200;
TUNABLE_INT("vfs.zfs.condense_pct", &zfs_condense_pct);
SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct, CTLFLAG_RWTUN, SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct, CTLFLAG_RWTUN,
&zfs_condense_pct, 0, &zfs_condense_pct, 0,
"Condense on-disk spacemap when it is more than this many percents" "Condense on-disk spacemap when it is more than this many percents"
@ -85,6 +87,7 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct, CTLFLAG_RWTUN,
* no metaslab group will be excluded based on this criterion. * no metaslab group will be excluded based on this criterion.
*/ */
int zfs_mg_noalloc_threshold = 0; int zfs_mg_noalloc_threshold = 0;
TUNABLE_INT("vfs.zfs.mg_noalloc_threshold", &zfs_mg_noalloc_threshold);
SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_noalloc_threshold, CTLFLAG_RWTUN, SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_noalloc_threshold, CTLFLAG_RWTUN,
&zfs_mg_noalloc_threshold, 0, &zfs_mg_noalloc_threshold, 0,
"Percentage of metaslab group size that should be free" "Percentage of metaslab group size that should be free"
@ -94,6 +97,7 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_noalloc_threshold, CTLFLAG_RWTUN,
* When set will load all metaslabs when pool is first opened. * When set will load all metaslabs when pool is first opened.
*/ */
int metaslab_debug_load = 0; int metaslab_debug_load = 0;
TUNABLE_INT("vfs.zfs.metaslab.debug_load", &metaslab_debug_load);
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_load, CTLFLAG_RWTUN, SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_load, CTLFLAG_RWTUN,
&metaslab_debug_load, 0, &metaslab_debug_load, 0,
"Load all metaslabs when pool is first opened"); "Load all metaslabs when pool is first opened");
@ -102,6 +106,7 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_load, CTLFLAG_RWTUN,
* When set will prevent metaslabs from being unloaded. * When set will prevent metaslabs from being unloaded.
*/ */
int metaslab_debug_unload = 0; int metaslab_debug_unload = 0;
TUNABLE_INT("vfs.zfs.metaslab.debug_unload", &metaslab_debug_unload);
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_unload, CTLFLAG_RWTUN, SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_unload, CTLFLAG_RWTUN,
&metaslab_debug_unload, 0, &metaslab_debug_unload, 0,
"Prevent metaslabs from being unloaded"); "Prevent metaslabs from being unloaded");
@ -113,6 +118,8 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_unload, CTLFLAG_RWTUN,
* aggressive strategy (i.e search by size rather than offset). * aggressive strategy (i.e search by size rather than offset).
*/ */
uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE; uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE;
TUNABLE_QUAD("vfs.zfs.metaslab.df_alloc_threshold",
&metaslab_df_alloc_threshold);
SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN, SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN,
&metaslab_df_alloc_threshold, 0, &metaslab_df_alloc_threshold, 0,
"Minimum size which forces the dynamic allocator to change it's allocation strategy"); "Minimum size which forces the dynamic allocator to change it's allocation strategy");
@ -124,25 +131,27 @@ SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN,
* switch to using best-fit allocations. * switch to using best-fit allocations.
*/ */
int metaslab_df_free_pct = 4; int metaslab_df_free_pct = 4;
TUNABLE_INT("vfs.zfs.metaslab.df_free_pct", &metaslab_df_free_pct);
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, CTLFLAG_RWTUN, SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, CTLFLAG_RWTUN,
&metaslab_df_free_pct, 0, &metaslab_df_free_pct, 0,
"The minimum free space, in percent, which must be available in a " "The minimum free space, in percent, which must be available in a space map to continue allocations in a first-fit fashion");
"space map to continue allocations in a first-fit fashion");
/* /*
* A metaslab is considered "free" if it contains a contiguous * A metaslab is considered "free" if it contains a contiguous
* segment which is greater than metaslab_min_alloc_size. * segment which is greater than metaslab_min_alloc_size.
*/ */
uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS; uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
TUNABLE_QUAD("vfs.zfs.metaslab.min_alloc_size",
&metaslab_min_alloc_size);
SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, min_alloc_size, CTLFLAG_RWTUN, SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, min_alloc_size, CTLFLAG_RWTUN,
&metaslab_min_alloc_size, 0, &metaslab_min_alloc_size, 0,
"A metaslab is considered \"free\" if it contains a contiguous " "A metaslab is considered \"free\" if it contains a contiguous segment which is greater than vfs.zfs.metaslab.min_alloc_size");
"segment which is greater than vfs.zfs.metaslab.min_alloc_size");
/* /*
* Percentage of all cpus that can be used by the metaslab taskq. * Percentage of all cpus that can be used by the metaslab taskq.
*/ */
int metaslab_load_pct = 50; int metaslab_load_pct = 50;
TUNABLE_INT("vfs.zfs.metaslab.load_pct", &metaslab_load_pct);
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct, CTLFLAG_RWTUN, SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct, CTLFLAG_RWTUN,
&metaslab_load_pct, 0, &metaslab_load_pct, 0,
"Percentage of cpus that can be used by the metaslab taskq"); "Percentage of cpus that can be used by the metaslab taskq");
@ -153,6 +162,7 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct, CTLFLAG_RWTUN,
* keep it loaded. * keep it loaded.
*/ */
int metaslab_unload_delay = TXG_SIZE * 2; int metaslab_unload_delay = TXG_SIZE * 2;
TUNABLE_INT("vfs.zfs.metaslab.unload_delay", &metaslab_unload_delay);
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, unload_delay, CTLFLAG_RWTUN, SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, unload_delay, CTLFLAG_RWTUN,
&metaslab_unload_delay, 0, &metaslab_unload_delay, 0,
"Number of TXGs that an unused metaslab can be kept in memory"); "Number of TXGs that an unused metaslab can be kept in memory");
@ -163,11 +173,13 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, unload_delay, CTLFLAG_RWTUN,
boolean_t zfs_write_to_degraded = B_FALSE; boolean_t zfs_write_to_degraded = B_FALSE;
SYSCTL_INT(_vfs_zfs, OID_AUTO, write_to_degraded, CTLFLAG_RWTUN, SYSCTL_INT(_vfs_zfs, OID_AUTO, write_to_degraded, CTLFLAG_RWTUN,
&zfs_write_to_degraded, 0, "Allow writing data to degraded vdevs"); &zfs_write_to_degraded, 0, "Allow writing data to degraded vdevs");
TUNABLE_INT("vfs.zfs.write_to_degraded", &zfs_write_to_degraded);
/* /*
* Max number of metaslabs per group to preload. * Max number of metaslabs per group to preload.
*/ */
int metaslab_preload_limit = SPA_DVAS_PER_BP; int metaslab_preload_limit = SPA_DVAS_PER_BP;
TUNABLE_INT("vfs.zfs.metaslab.preload_limit", &metaslab_preload_limit);
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, CTLFLAG_RWTUN, SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, CTLFLAG_RWTUN,
&metaslab_preload_limit, 0, &metaslab_preload_limit, 0,
"Max number of metaslabs per group to preload"); "Max number of metaslabs per group to preload");
@ -176,6 +188,7 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, CTLFLAG_RWTUN,
* Enable/disable preloading of metaslab. * Enable/disable preloading of metaslab.
*/ */
boolean_t metaslab_preload_enabled = B_TRUE; boolean_t metaslab_preload_enabled = B_TRUE;
TUNABLE_INT("vfs.zfs.metaslab.preload_enabled", &metaslab_preload_enabled);
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_enabled, CTLFLAG_RWTUN, SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_enabled, CTLFLAG_RWTUN,
&metaslab_preload_enabled, 0, &metaslab_preload_enabled, 0,
"Max number of metaslabs per group to preload"); "Max number of metaslabs per group to preload");
@ -184,6 +197,8 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_enabled, CTLFLAG_RWTUN,
* Enable/disable additional weight factor for each metaslab. * Enable/disable additional weight factor for each metaslab.
*/ */
boolean_t metaslab_weight_factor_enable = B_FALSE; boolean_t metaslab_weight_factor_enable = B_FALSE;
TUNABLE_INT("vfs.zfs.metaslab.weight_factor_enable",
&metaslab_weight_factor_enable);
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, weight_factor_enable, CTLFLAG_RWTUN, SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, weight_factor_enable, CTLFLAG_RWTUN,
&metaslab_weight_factor_enable, 0, &metaslab_weight_factor_enable, 0,
"Enable additional weight factor for each metaslab"); "Enable additional weight factor for each metaslab");

View File

@ -84,7 +84,8 @@
static int check_hostid = 1; static int check_hostid = 1;
SYSCTL_DECL(_vfs_zfs); SYSCTL_DECL(_vfs_zfs);
SYSCTL_INT(_vfs_zfs, OID_AUTO, check_hostid, CTLFLAG_RWTUN, &check_hostid, 0, TUNABLE_INT("vfs.zfs.check_hostid", &check_hostid);
SYSCTL_INT(_vfs_zfs, OID_AUTO, check_hostid, CTLFLAG_RW, &check_hostid, 0,
"Check hostid on import?"); "Check hostid on import?");
/* /*

View File

@ -244,6 +244,7 @@ int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA);
int zfs_flags = 0; int zfs_flags = 0;
#endif #endif
SYSCTL_DECL(_debug); SYSCTL_DECL(_debug);
TUNABLE_INT("debug.zfs_flags", &zfs_flags);
SYSCTL_INT(_debug, OID_AUTO, zfs_flags, CTLFLAG_RWTUN, &zfs_flags, 0, SYSCTL_INT(_debug, OID_AUTO, zfs_flags, CTLFLAG_RWTUN, &zfs_flags, 0,
"ZFS debug flags."); "ZFS debug flags.");
@ -256,6 +257,7 @@ SYSCTL_INT(_debug, OID_AUTO, zfs_flags, CTLFLAG_RWTUN, &zfs_flags, 0,
*/ */
int zfs_recover = 0; int zfs_recover = 0;
SYSCTL_DECL(_vfs_zfs); SYSCTL_DECL(_vfs_zfs);
TUNABLE_INT("vfs.zfs.recover", &zfs_recover);
SYSCTL_INT(_vfs_zfs, OID_AUTO, recover, CTLFLAG_RDTUN, &zfs_recover, 0, SYSCTL_INT(_vfs_zfs, OID_AUTO, recover, CTLFLAG_RDTUN, &zfs_recover, 0,
"Try to recover from otherwise-fatal errors."); "Try to recover from otherwise-fatal errors.");
@ -268,6 +270,7 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, recover, CTLFLAG_RDTUN, &zfs_recover, 0,
* in a system panic. * in a system panic.
*/ */
uint64_t zfs_deadman_synctime_ms = 1000000ULL; uint64_t zfs_deadman_synctime_ms = 1000000ULL;
TUNABLE_QUAD("vfs.zfs.deadman_synctime_ms", &zfs_deadman_synctime_ms);
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_synctime_ms, CTLFLAG_RDTUN, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_synctime_ms, CTLFLAG_RDTUN,
&zfs_deadman_synctime_ms, 0, &zfs_deadman_synctime_ms, 0,
"Stalled ZFS I/O expiration time in milliseconds"); "Stalled ZFS I/O expiration time in milliseconds");
@ -277,6 +280,7 @@ SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_synctime_ms, CTLFLAG_RDTUN,
* for hung I/O. * for hung I/O.
*/ */
uint64_t zfs_deadman_checktime_ms = 5000ULL; uint64_t zfs_deadman_checktime_ms = 5000ULL;
TUNABLE_QUAD("vfs.zfs.deadman_checktime_ms", &zfs_deadman_checktime_ms);
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_checktime_ms, CTLFLAG_RDTUN, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_checktime_ms, CTLFLAG_RDTUN,
&zfs_deadman_checktime_ms, 0, &zfs_deadman_checktime_ms, 0,
"Period of checks for stalled ZFS I/O in milliseconds"); "Period of checks for stalled ZFS I/O in milliseconds");
@ -286,6 +290,7 @@ SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_checktime_ms, CTLFLAG_RDTUN,
* zfs_deadman_init() * zfs_deadman_init()
*/ */
int zfs_deadman_enabled = -1; int zfs_deadman_enabled = -1;
TUNABLE_INT("vfs.zfs.deadman_enabled", &zfs_deadman_enabled);
SYSCTL_INT(_vfs_zfs, OID_AUTO, deadman_enabled, CTLFLAG_RDTUN, SYSCTL_INT(_vfs_zfs, OID_AUTO, deadman_enabled, CTLFLAG_RDTUN,
&zfs_deadman_enabled, 0, "Kernel panic on stalled ZFS I/O"); &zfs_deadman_enabled, 0, "Kernel panic on stalled ZFS I/O");
@ -299,6 +304,7 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, deadman_enabled, CTLFLAG_RDTUN,
* (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
*/ */
int spa_asize_inflation = 24; int spa_asize_inflation = 24;
TUNABLE_INT("vfs.zfs.spa_asize_inflation", &spa_asize_inflation);
SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_asize_inflation, CTLFLAG_RWTUN, SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_asize_inflation, CTLFLAG_RWTUN,
&spa_asize_inflation, 0, "Worst case inflation factor for single sector writes"); &spa_asize_inflation, 0, "Worst case inflation factor for single sector writes");

View File

@ -85,22 +85,31 @@ static u_int trim_vdev_max_pending = 64;
SYSCTL_DECL(_vfs_zfs); SYSCTL_DECL(_vfs_zfs);
SYSCTL_NODE(_vfs_zfs, OID_AUTO, trim, CTLFLAG_RD, 0, "ZFS TRIM"); SYSCTL_NODE(_vfs_zfs, OID_AUTO, trim, CTLFLAG_RD, 0, "ZFS TRIM");
TUNABLE_INT("vfs.zfs.trim.txg_delay", &trim_txg_delay);
SYSCTL_UINT(_vfs_zfs_trim, OID_AUTO, txg_delay, CTLFLAG_RWTUN, &trim_txg_delay, SYSCTL_UINT(_vfs_zfs_trim, OID_AUTO, txg_delay, CTLFLAG_RWTUN, &trim_txg_delay,
0, "Delay TRIMs by up to this many TXGs"); 0, "Delay TRIMs by up to this many TXGs");
TUNABLE_INT("vfs.zfs.trim.timeout", &trim_timeout);
SYSCTL_UINT(_vfs_zfs_trim, OID_AUTO, timeout, CTLFLAG_RWTUN, &trim_timeout, 0, SYSCTL_UINT(_vfs_zfs_trim, OID_AUTO, timeout, CTLFLAG_RWTUN, &trim_timeout, 0,
"Delay TRIMs by up to this many seconds"); "Delay TRIMs by up to this many seconds");
TUNABLE_INT("vfs.zfs.trim.max_interval", &trim_max_interval);
SYSCTL_UINT(_vfs_zfs_trim, OID_AUTO, max_interval, CTLFLAG_RWTUN, SYSCTL_UINT(_vfs_zfs_trim, OID_AUTO, max_interval, CTLFLAG_RWTUN,
&trim_max_interval, 0, &trim_max_interval, 0,
"Maximum interval between TRIM queue processing (seconds)"); "Maximum interval between TRIM queue processing (seconds)");
SYSCTL_DECL(_vfs_zfs_vdev); SYSCTL_DECL(_vfs_zfs_vdev);
TUNABLE_QUAD("vfs.zfs.vdev.trim_max_bytes", &trim_vdev_max_bytes);
SYSCTL_QUAD(_vfs_zfs_vdev, OID_AUTO, trim_max_bytes, CTLFLAG_RWTUN, SYSCTL_QUAD(_vfs_zfs_vdev, OID_AUTO, trim_max_bytes, CTLFLAG_RWTUN,
&trim_vdev_max_bytes, 0, &trim_vdev_max_bytes, 0,
"Maximum pending TRIM bytes for a vdev"); "Maximum pending TRIM bytes for a vdev");
TUNABLE_INT("vfs.zfs.vdev.trim_max_pending", &trim_vdev_max_pending);
SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, trim_max_pending, CTLFLAG_RWTUN, SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, trim_max_pending, CTLFLAG_RWTUN,
&trim_vdev_max_pending, 0, &trim_vdev_max_pending, 0,
"Maximum pending TRIM segments for a vdev"); "Maximum pending TRIM segments for a vdev");
static void trim_map_vdev_commit_done(spa_t *spa, vdev_t *vd); static void trim_map_vdev_commit_done(spa_t *spa, vdev_t *vd);
static int static int

View File

@ -112,7 +112,8 @@ int zfs_txg_timeout = 5; /* max seconds worth of delta per txg */
SYSCTL_DECL(_vfs_zfs); SYSCTL_DECL(_vfs_zfs);
SYSCTL_NODE(_vfs_zfs, OID_AUTO, txg, CTLFLAG_RW, 0, "ZFS TXG"); SYSCTL_NODE(_vfs_zfs, OID_AUTO, txg, CTLFLAG_RW, 0, "ZFS TXG");
SYSCTL_INT(_vfs_zfs_txg, OID_AUTO, timeout, CTLFLAG_RWTUN, &zfs_txg_timeout, 0, TUNABLE_INT("vfs.zfs.txg.timeout", &zfs_txg_timeout);
SYSCTL_INT(_vfs_zfs_txg, OID_AUTO, timeout, CTLFLAG_RW, &zfs_txg_timeout, 0,
"Maximum seconds worth of delta per txg"); "Maximum seconds worth of delta per txg");
/* /*

View File

@ -90,10 +90,13 @@ int zfs_vdev_cache_bshift = 16;
SYSCTL_DECL(_vfs_zfs_vdev); SYSCTL_DECL(_vfs_zfs_vdev);
SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, cache, CTLFLAG_RW, 0, "ZFS VDEV Cache"); SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, cache, CTLFLAG_RW, 0, "ZFS VDEV Cache");
TUNABLE_INT("vfs.zfs.vdev.cache.max", &zfs_vdev_cache_max);
SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, max, CTLFLAG_RDTUN, SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, max, CTLFLAG_RDTUN,
&zfs_vdev_cache_max, 0, "Maximum I/O request size that increase read size"); &zfs_vdev_cache_max, 0, "Maximum I/O request size that increase read size");
TUNABLE_INT("vfs.zfs.vdev.cache.size", &zfs_vdev_cache_size);
SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, size, CTLFLAG_RDTUN, SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, size, CTLFLAG_RDTUN,
&zfs_vdev_cache_size, 0, "Size of VDEV cache"); &zfs_vdev_cache_size, 0, "Size of VDEV cache");
TUNABLE_INT("vfs.zfs.vdev.cache.bshift", &zfs_vdev_cache_bshift);
SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, bshift, CTLFLAG_RDTUN, SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, bshift, CTLFLAG_RDTUN,
&zfs_vdev_cache_bshift, 0, "Turn too small requests into 1 << this value"); &zfs_vdev_cache_bshift, 0, "Turn too small requests into 1 << this value");

View File

@ -53,12 +53,14 @@ DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev);
SYSCTL_DECL(_vfs_zfs_vdev); SYSCTL_DECL(_vfs_zfs_vdev);
/* Don't send BIO_FLUSH. */ /* Don't send BIO_FLUSH. */
static int vdev_geom_bio_flush_disable; static int vdev_geom_bio_flush_disable = 0;
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_flush_disable, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.vdev.bio_flush_disable", &vdev_geom_bio_flush_disable);
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_flush_disable, CTLFLAG_RW,
&vdev_geom_bio_flush_disable, 0, "Disable BIO_FLUSH"); &vdev_geom_bio_flush_disable, 0, "Disable BIO_FLUSH");
/* Don't send BIO_DELETE. */ /* Don't send BIO_DELETE. */
static int vdev_geom_bio_delete_disable; static int vdev_geom_bio_delete_disable = 0;
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_delete_disable, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.vdev.bio_delete_disable", &vdev_geom_bio_delete_disable);
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_delete_disable, CTLFLAG_RW,
&vdev_geom_bio_delete_disable, 0, "Disable BIO_DELETE"); &vdev_geom_bio_delete_disable, 0, "Disable BIO_DELETE");
static void static void

View File

@ -74,26 +74,32 @@ static SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, mirror, CTLFLAG_RD, 0,
/* Rotating media load calculation configuration. */ /* Rotating media load calculation configuration. */
static int rotating_inc = 0; static int rotating_inc = 0;
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_inc, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.vdev.mirror.rotating_inc", &rotating_inc);
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_inc, CTLFLAG_RW,
&rotating_inc, 0, "Rotating media load increment for non-seeking I/O's"); &rotating_inc, 0, "Rotating media load increment for non-seeking I/O's");
static int rotating_seek_inc = 5; static int rotating_seek_inc = 5;
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_seek_inc, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.vdev.mirror.rotating_seek_inc", &rotating_seek_inc);
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_seek_inc, CTLFLAG_RW,
&rotating_seek_inc, 0, "Rotating media load increment for seeking I/O's"); &rotating_seek_inc, 0, "Rotating media load increment for seeking I/O's");
static int rotating_seek_offset = 1 * 1024 * 1024; static int rotating_seek_offset = 1 * 1024 * 1024;
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_seek_offset, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.vdev.mirror.rotating_seek_offset", &rotating_seek_offset);
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_seek_offset, CTLFLAG_RW,
&rotating_seek_offset, 0, "Offset in bytes from the last I/O which " &rotating_seek_offset, 0, "Offset in bytes from the last I/O which "
"triggers a reduced rotating media seek increment"); "triggers a reduced rotating media seek increment");
/* Non-rotating media load calculation configuration. */ /* Non-rotating media load calculation configuration. */
static int non_rotating_inc = 0; static int non_rotating_inc = 0;
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, non_rotating_inc, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.vdev.mirror.non_rotating_inc", &non_rotating_inc);
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, non_rotating_inc, CTLFLAG_RW,
&non_rotating_inc, 0, &non_rotating_inc, 0,
"Non-rotating media load increment for non-seeking I/O's"); "Non-rotating media load increment for non-seeking I/O's");
static int non_rotating_seek_inc = 1; static int non_rotating_seek_inc = 1;
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, non_rotating_seek_inc, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.vdev.mirror.non_rotating_seek_inc",
&non_rotating_seek_inc);
SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, non_rotating_seek_inc, CTLFLAG_RW,
&non_rotating_seek_inc, 0, &non_rotating_seek_inc, 0,
"Non-rotating media load increment for seeking I/O's"); "Non-rotating media load increment for seeking I/O's");

View File

@ -176,18 +176,23 @@ int zfs_vdev_write_gap_limit = 4 << 10;
#ifdef __FreeBSD__ #ifdef __FreeBSD__
SYSCTL_DECL(_vfs_zfs_vdev); SYSCTL_DECL(_vfs_zfs_vdev);
SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, max_active, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.vdev.max_active", &zfs_vdev_max_active);
SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, max_active, CTLFLAG_RW,
&zfs_vdev_max_active, 0, &zfs_vdev_max_active, 0,
"The maximum number of I/Os of all types active for each device."); "The maximum number of I/Os of all types active for each device.");
#define ZFS_VDEV_QUEUE_KNOB_MIN(name) \ #define ZFS_VDEV_QUEUE_KNOB_MIN(name) \
SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, name ## _min_active, CTLFLAG_RWTUN,\ TUNABLE_INT("vfs.zfs.vdev." #name "_min_active", \
&zfs_vdev_ ## name ## _min_active); \
SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, name ## _min_active, CTLFLAG_RW, \
&zfs_vdev_ ## name ## _min_active, 0, \ &zfs_vdev_ ## name ## _min_active, 0, \
"Initial number of I/O requests of type " #name \ "Initial number of I/O requests of type " #name \
" active for each device"); " active for each device");
#define ZFS_VDEV_QUEUE_KNOB_MAX(name) \ #define ZFS_VDEV_QUEUE_KNOB_MAX(name) \
SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, name ## _max_active, CTLFLAG_RWTUN,\ TUNABLE_INT("vfs.zfs.vdev." #name "_max_active", \
&zfs_vdev_ ## name ## _max_active); \
SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, name ## _max_active, CTLFLAG_RW, \
&zfs_vdev_ ## name ## _max_active, 0, \ &zfs_vdev_ ## name ## _max_active, 0, \
"Maximum number of I/O requests of type " #name \ "Maximum number of I/O requests of type " #name \
" active for each device"); " active for each device");
@ -207,13 +212,16 @@ ZFS_VDEV_QUEUE_KNOB_MAX(trim);
#undef ZFS_VDEV_QUEUE_KNOB #undef ZFS_VDEV_QUEUE_KNOB
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, aggregation_limit, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.vdev.aggregation_limit", &zfs_vdev_aggregation_limit);
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, aggregation_limit, CTLFLAG_RW,
&zfs_vdev_aggregation_limit, 0, &zfs_vdev_aggregation_limit, 0,
"I/O requests are aggregated up to this size"); "I/O requests are aggregated up to this size");
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, read_gap_limit, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.vdev.read_gap_limit", &zfs_vdev_read_gap_limit);
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, read_gap_limit, CTLFLAG_RW,
&zfs_vdev_read_gap_limit, 0, &zfs_vdev_read_gap_limit, 0,
"Acceptable gap between two reads being aggregated"); "Acceptable gap between two reads being aggregated");
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, write_gap_limit, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.vdev.write_gap_limit", &zfs_vdev_write_gap_limit);
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, write_gap_limit, CTLFLAG_RW,
&zfs_vdev_write_gap_limit, 0, &zfs_vdev_write_gap_limit, 0,
"Acceptable gap between two writes being aggregated"); "Acceptable gap between two writes being aggregated");
#endif #endif

View File

@ -195,7 +195,8 @@ CTASSERT(sizeof(zfs_cmd_t) < IOCPARM_MAX);
static int snapshot_list_prefetch; static int snapshot_list_prefetch;
SYSCTL_DECL(_vfs_zfs); SYSCTL_DECL(_vfs_zfs);
SYSCTL_INT(_vfs_zfs, OID_AUTO, snapshot_list_prefetch, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.snapshot_list_prefetch", &snapshot_list_prefetch);
SYSCTL_INT(_vfs_zfs, OID_AUTO, snapshot_list_prefetch, CTLFLAG_RW,
&snapshot_list_prefetch, 0, "Prefetch data when listing snapshots"); &snapshot_list_prefetch, 0, "Prefetch data when listing snapshots");
static struct cdev *zfsdev; static struct cdev *zfsdev;

View File

@ -74,7 +74,8 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, super_owner, CTLFLAG_RW, &zfs_super_owner, 0,
"File system owner can perform privileged operation on his file systems"); "File system owner can perform privileged operation on his file systems");
int zfs_debug_level; int zfs_debug_level;
SYSCTL_INT(_vfs_zfs, OID_AUTO, debug, CTLFLAG_RWTUN, &zfs_debug_level, 0, TUNABLE_INT("vfs.zfs.debug", &zfs_debug_level);
SYSCTL_INT(_vfs_zfs, OID_AUTO, debug, CTLFLAG_RW, &zfs_debug_level, 0,
"Debug level"); "Debug level");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, version, CTLFLAG_RD, 0, "ZFS versions"); SYSCTL_NODE(_vfs_zfs, OID_AUTO, version, CTLFLAG_RD, 0, "ZFS versions");

View File

@ -70,7 +70,8 @@
*/ */
int zil_replay_disable = 0; int zil_replay_disable = 0;
SYSCTL_DECL(_vfs_zfs); SYSCTL_DECL(_vfs_zfs);
SYSCTL_INT(_vfs_zfs, OID_AUTO, zil_replay_disable, CTLFLAG_RWTUN, TUNABLE_INT("vfs.zfs.zil_replay_disable", &zil_replay_disable);
SYSCTL_INT(_vfs_zfs, OID_AUTO, zil_replay_disable, CTLFLAG_RW,
&zil_replay_disable, 0, "Disable intent logging replay"); &zil_replay_disable, 0, "Disable intent logging replay");
/* /*
@ -79,10 +80,12 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, zil_replay_disable, CTLFLAG_RWTUN,
* out-of-order write cache is enabled. * out-of-order write cache is enabled.
*/ */
boolean_t zfs_nocacheflush = B_FALSE; boolean_t zfs_nocacheflush = B_FALSE;
TUNABLE_INT("vfs.zfs.cache_flush_disable", &zfs_nocacheflush);
SYSCTL_INT(_vfs_zfs, OID_AUTO, cache_flush_disable, CTLFLAG_RDTUN, SYSCTL_INT(_vfs_zfs, OID_AUTO, cache_flush_disable, CTLFLAG_RDTUN,
&zfs_nocacheflush, 0, "Disable cache flush"); &zfs_nocacheflush, 0, "Disable cache flush");
boolean_t zfs_trim_enabled = B_TRUE; boolean_t zfs_trim_enabled = B_TRUE;
SYSCTL_DECL(_vfs_zfs_trim); SYSCTL_DECL(_vfs_zfs_trim);
TUNABLE_INT("vfs.zfs.trim.enabled", &zfs_trim_enabled);
SYSCTL_INT(_vfs_zfs_trim, OID_AUTO, enabled, CTLFLAG_RDTUN, &zfs_trim_enabled, 0, SYSCTL_INT(_vfs_zfs_trim, OID_AUTO, enabled, CTLFLAG_RDTUN, &zfs_trim_enabled, 0,
"Enable ZFS TRIM"); "Enable ZFS TRIM");

View File

@ -46,9 +46,11 @@ static int zio_use_uma = 1;
#else #else
static int zio_use_uma = 0; static int zio_use_uma = 0;
#endif #endif
TUNABLE_INT("vfs.zfs.zio.use_uma", &zio_use_uma);
SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, use_uma, CTLFLAG_RDTUN, &zio_use_uma, 0, SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, use_uma, CTLFLAG_RDTUN, &zio_use_uma, 0,
"Use uma(9) for ZIO allocations"); "Use uma(9) for ZIO allocations");
static int zio_exclude_metadata = 0; static int zio_exclude_metadata = 0;
TUNABLE_INT("vfs.zfs.zio.exclude_metadata", &zio_exclude_metadata);
SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata, CTLFLAG_RDTUN, &zio_exclude_metadata, 0, SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata, CTLFLAG_RDTUN, &zio_exclude_metadata, 0,
"Exclude metadata buffers from dumps as well"); "Exclude metadata buffers from dumps as well");
@ -102,12 +104,15 @@ extern vmem_t *zio_alloc_arena;
* regular blocks are not deferred. * regular blocks are not deferred.
*/ */
int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */
TUNABLE_INT("vfs.zfs.sync_pass_deferred_free", &zfs_sync_pass_deferred_free);
SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_deferred_free, CTLFLAG_RDTUN, SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_deferred_free, CTLFLAG_RDTUN,
&zfs_sync_pass_deferred_free, 0, "defer frees starting in this pass"); &zfs_sync_pass_deferred_free, 0, "defer frees starting in this pass");
int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */ int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */
TUNABLE_INT("vfs.zfs.sync_pass_dont_compress", &zfs_sync_pass_dont_compress);
SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_dont_compress, CTLFLAG_RDTUN, SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_dont_compress, CTLFLAG_RDTUN,
&zfs_sync_pass_dont_compress, 0, "don't compress starting in this pass"); &zfs_sync_pass_dont_compress, 0, "don't compress starting in this pass");
int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */
TUNABLE_INT("vfs.zfs.sync_pass_rewrite", &zfs_sync_pass_rewrite);
SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_rewrite, CTLFLAG_RDTUN, SYSCTL_INT(_vfs_zfs, OID_AUTO, sync_pass_rewrite, CTLFLAG_RDTUN,
&zfs_sync_pass_rewrite, 0, "rewrite new bps starting in this pass"); &zfs_sync_pass_rewrite, 0, "rewrite new bps starting in this pass");

View File

@ -119,6 +119,7 @@ static uint32_t zvol_minors;
SYSCTL_DECL(_vfs_zfs); SYSCTL_DECL(_vfs_zfs);
SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME"); SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME");
static int volmode = ZFS_VOLMODE_GEOM; static int volmode = ZFS_VOLMODE_GEOM;
TUNABLE_INT("vfs.zfs.vol.mode", &volmode);
SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &volmode, 0, SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &volmode, 0,
"Expose as GEOM providers (1), device files (2) or neither"); "Expose as GEOM providers (1), device files (2) or neither");

View File

@ -25,7 +25,8 @@
SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace debug parameters"); SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace debug parameters");
int dtrace_debug = 0; int dtrace_debug = 0;
SYSCTL_INT(_debug_dtrace, OID_AUTO, debug, CTLFLAG_RWTUN, &dtrace_debug, 0, ""); TUNABLE_INT("debug.dtrace.debug", &dtrace_debug);
SYSCTL_INT(_debug_dtrace, OID_AUTO, debug, CTLFLAG_RW, &dtrace_debug, 0, "");
/* Report registered DTrace providers. */ /* Report registered DTrace providers. */
static int static int

View File

@ -89,11 +89,14 @@ extern const char *freebsd32_syscallnames[];
static SYSCTL_NODE(_compat, OID_AUTO, ia32, CTLFLAG_RW, 0, "ia32 mode"); static SYSCTL_NODE(_compat, OID_AUTO, ia32, CTLFLAG_RW, 0, "ia32 mode");
static u_long ia32_maxdsiz = IA32_MAXDSIZ; static u_long ia32_maxdsiz = IA32_MAXDSIZ;
SYSCTL_ULONG(_compat_ia32, OID_AUTO, maxdsiz, CTLFLAG_RWTUN, &ia32_maxdsiz, 0, ""); SYSCTL_ULONG(_compat_ia32, OID_AUTO, maxdsiz, CTLFLAG_RW, &ia32_maxdsiz, 0, "");
TUNABLE_ULONG("compat.ia32.maxdsiz", &ia32_maxdsiz);
u_long ia32_maxssiz = IA32_MAXSSIZ; u_long ia32_maxssiz = IA32_MAXSSIZ;
SYSCTL_ULONG(_compat_ia32, OID_AUTO, maxssiz, CTLFLAG_RWTUN, &ia32_maxssiz, 0, ""); SYSCTL_ULONG(_compat_ia32, OID_AUTO, maxssiz, CTLFLAG_RW, &ia32_maxssiz, 0, "");
TUNABLE_ULONG("compat.ia32.maxssiz", &ia32_maxssiz);
static u_long ia32_maxvmem = IA32_MAXVMEM; static u_long ia32_maxvmem = IA32_MAXVMEM;
SYSCTL_ULONG(_compat_ia32, OID_AUTO, maxvmem, CTLFLAG_RWTUN, &ia32_maxvmem, 0, ""); SYSCTL_ULONG(_compat_ia32, OID_AUTO, maxvmem, CTLFLAG_RW, &ia32_maxvmem, 0, "");
TUNABLE_ULONG("compat.ia32.maxvmem", &ia32_maxvmem);
struct sysentvec ia32_freebsd_sysvec = { struct sysentvec ia32_freebsd_sysvec = {
.sv_size = FREEBSD32_SYS_MAXSYSCALL, .sv_size = FREEBSD32_SYS_MAXSYSCALL,

View File

@ -70,10 +70,12 @@ static struct mtx x86bios_lock;
static SYSCTL_NODE(_debug, OID_AUTO, x86bios, CTLFLAG_RD, NULL, static SYSCTL_NODE(_debug, OID_AUTO, x86bios, CTLFLAG_RD, NULL,
"x86bios debugging"); "x86bios debugging");
static int x86bios_trace_call; static int x86bios_trace_call;
SYSCTL_INT(_debug_x86bios, OID_AUTO, call, CTLFLAG_RWTUN, &x86bios_trace_call, 0, TUNABLE_INT("debug.x86bios.call", &x86bios_trace_call);
SYSCTL_INT(_debug_x86bios, OID_AUTO, call, CTLFLAG_RW, &x86bios_trace_call, 0,
"Trace far function calls"); "Trace far function calls");
static int x86bios_trace_int; static int x86bios_trace_int;
SYSCTL_INT(_debug_x86bios, OID_AUTO, int, CTLFLAG_RWTUN, &x86bios_trace_int, 0, TUNABLE_INT("debug.x86bios.int", &x86bios_trace_int);
SYSCTL_INT(_debug_x86bios, OID_AUTO, int, CTLFLAG_RW, &x86bios_trace_int, 0,
"Trace software interrupt handlers"); "Trace software interrupt handlers");
#ifdef X86BIOS_NATIVE_VM86 #ifdef X86BIOS_NATIVE_VM86

View File

@ -61,6 +61,7 @@ static int aac_pci_probe(device_t dev);
static int aac_pci_attach(device_t dev); static int aac_pci_attach(device_t dev);
static int aac_enable_msi = 1; static int aac_enable_msi = 1;
TUNABLE_INT("hw.aac.enable_msi", &aac_enable_msi);
SYSCTL_INT(_hw_aac, OID_AUTO, enable_msi, CTLFLAG_RDTUN, &aac_enable_msi, 0, SYSCTL_INT(_hw_aac, OID_AUTO, enable_msi, CTLFLAG_RDTUN, &aac_enable_msi, 0,
"Enable MSI interrupts"); "Enable MSI interrupts");

View File

@ -56,6 +56,7 @@ ACPI_MODULE_NAME("SCHEDULE")
* Allow the user to tune the maximum number of tasks we may enqueue. * Allow the user to tune the maximum number of tasks we may enqueue.
*/ */
static int acpi_max_tasks = ACPI_MAX_TASKS; static int acpi_max_tasks = ACPI_MAX_TASKS;
TUNABLE_INT("debug.acpi.max_tasks", &acpi_max_tasks);
SYSCTL_INT(_debug_acpi, OID_AUTO, max_tasks, CTLFLAG_RDTUN, &acpi_max_tasks, SYSCTL_INT(_debug_acpi, OID_AUTO, max_tasks, CTLFLAG_RDTUN, &acpi_max_tasks,
0, "Maximum acpi tasks"); 0, "Maximum acpi tasks");
@ -64,6 +65,7 @@ SYSCTL_INT(_debug_acpi, OID_AUTO, max_tasks, CTLFLAG_RDTUN, &acpi_max_tasks,
* some systems have problems with increased parallelism. * some systems have problems with increased parallelism.
*/ */
static int acpi_max_threads = ACPI_MAX_THREADS; static int acpi_max_threads = ACPI_MAX_THREADS;
TUNABLE_INT("debug.acpi.max_threads", &acpi_max_threads);
SYSCTL_INT(_debug_acpi, OID_AUTO, max_threads, CTLFLAG_RDTUN, &acpi_max_threads, SYSCTL_INT(_debug_acpi, OID_AUTO, max_threads, CTLFLAG_RDTUN, &acpi_max_threads,
0, "Maximum acpi threads"); 0, "Maximum acpi threads");

View File

@ -129,6 +129,7 @@ struct acpi_cpu_device {
/* Allow users to ignore processor orders in MADT. */ /* Allow users to ignore processor orders in MADT. */
static int cpu_unordered; static int cpu_unordered;
TUNABLE_INT("debug.acpi.cpu_unordered", &cpu_unordered);
SYSCTL_INT(_debug_acpi, OID_AUTO, cpu_unordered, CTLFLAG_RDTUN, SYSCTL_INT(_debug_acpi, OID_AUTO, cpu_unordered, CTLFLAG_RDTUN,
&cpu_unordered, 0, &cpu_unordered, 0,
"Do not use the MADT to match ACPI Processor objects to CPUs."); "Do not use the MADT to match ACPI Processor objects to CPUs.");

View File

@ -181,13 +181,16 @@ ACPI_SERIAL_DECL(ec, "ACPI embedded controller");
static SYSCTL_NODE(_debug_acpi, OID_AUTO, ec, CTLFLAG_RD, NULL, "EC debugging"); static SYSCTL_NODE(_debug_acpi, OID_AUTO, ec, CTLFLAG_RD, NULL, "EC debugging");
static int ec_burst_mode; static int ec_burst_mode;
SYSCTL_INT(_debug_acpi_ec, OID_AUTO, burst, CTLFLAG_RWTUN, &ec_burst_mode, 0, TUNABLE_INT("debug.acpi.ec.burst", &ec_burst_mode);
SYSCTL_INT(_debug_acpi_ec, OID_AUTO, burst, CTLFLAG_RW, &ec_burst_mode, 0,
"Enable use of burst mode (faster for nearly all systems)"); "Enable use of burst mode (faster for nearly all systems)");
static int ec_polled_mode; static int ec_polled_mode;
SYSCTL_INT(_debug_acpi_ec, OID_AUTO, polled, CTLFLAG_RWTUN, &ec_polled_mode, 0, TUNABLE_INT("debug.acpi.ec.polled", &ec_polled_mode);
SYSCTL_INT(_debug_acpi_ec, OID_AUTO, polled, CTLFLAG_RW, &ec_polled_mode, 0,
"Force use of polled mode (only if interrupt mode doesn't work)"); "Force use of polled mode (only if interrupt mode doesn't work)");
static int ec_timeout = EC_TIMEOUT; static int ec_timeout = EC_TIMEOUT;
SYSCTL_INT(_debug_acpi_ec, OID_AUTO, timeout, CTLFLAG_RWTUN, &ec_timeout, TUNABLE_INT("debug.acpi.ec.timeout", &ec_timeout);
SYSCTL_INT(_debug_acpi_ec, OID_AUTO, timeout, CTLFLAG_RW, &ec_timeout,
EC_TIMEOUT, "Total time spent waiting for a response (poll+sleep)"); EC_TIMEOUT, "Total time spent waiting for a response (poll+sleep)");
static ACPI_STATUS static ACPI_STATUS

View File

@ -92,6 +92,7 @@ static int amr_setup_mbox(struct amr_softc *sc);
static int amr_ccb_map(struct amr_softc *sc); static int amr_ccb_map(struct amr_softc *sc);
static u_int amr_force_sg32 = 0; static u_int amr_force_sg32 = 0;
TUNABLE_INT("hw.amr.force_sg32", &amr_force_sg32);
SYSCTL_DECL(_hw_amr); SYSCTL_DECL(_hw_amr);
SYSCTL_UINT(_hw_amr, OID_AUTO, force_sg32, CTLFLAG_RDTUN, &amr_force_sg32, 0, SYSCTL_UINT(_hw_amr, OID_AUTO, force_sg32, CTLFLAG_RDTUN, &amr_force_sg32, 0,
"Force the AMR driver to use 32bit scatter gather"); "Force the AMR driver to use 32bit scatter gather");

View File

@ -80,8 +80,9 @@ int ata_dma_check_80pin = 1;
/* sysctl vars */ /* sysctl vars */
static SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters"); static SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters");
TUNABLE_INT("hw.ata.ata_dma_check_80pin", &ata_dma_check_80pin);
SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin, SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin,
CTLFLAG_RWTUN, &ata_dma_check_80pin, 0, CTLFLAG_RW, &ata_dma_check_80pin, 1,
"Check for 80pin cable before setting ATA DMA mode"); "Check for 80pin cable before setting ATA DMA mode");
FEATURE(ata_cam, "ATA devices are accessed through the cam(4) driver"); FEATURE(ata_cam, "ATA devices are accessed through the cam(4) driver");

View File

@ -96,8 +96,9 @@ static SYSCTL_NODE(_hw_ath, OID_AUTO, hal, CTLFLAG_RD, 0,
#ifdef AH_DEBUG #ifdef AH_DEBUG
int ath_hal_debug = 0; int ath_hal_debug = 0;
SYSCTL_INT(_hw_ath_hal, OID_AUTO, debug, CTLFLAG_RWTUN, &ath_hal_debug, SYSCTL_INT(_hw_ath_hal, OID_AUTO, debug, CTLFLAG_RW, &ath_hal_debug,
0, "Atheros HAL debugging printfs"); 0, "Atheros HAL debugging printfs");
TUNABLE_INT("hw.ath.hal.debug", &ath_hal_debug);
#endif /* AH_DEBUG */ #endif /* AH_DEBUG */
static MALLOC_DEFINE(M_ATH_HAL, "ath_hal", "ath hal data"); static MALLOC_DEFINE(M_ATH_HAL, "ath_hal", "ath hal data");

View File

@ -240,14 +240,17 @@ SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval,
0, "ANI calibration (msecs)"); 0, "ANI calibration (msecs)");
int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &ath_rxbuf, SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
0, "rx buffers allocated"); 0, "rx buffers allocated");
TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RWTUN, &ath_txbuf, SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
0, "tx buffers allocated"); 0, "tx buffers allocated");
TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */ int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */
SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RWTUN, &ath_txbuf_mgmt, SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RW, &ath_txbuf_mgmt,
0, "tx (mgmt) buffers allocated"); 0, "tx (mgmt) buffers allocated");
TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt);
int ath_bstuck_threshold = 4; /* max missed beacons */ int ath_bstuck_threshold = 4; /* max missed beacons */
SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,

View File

@ -92,8 +92,9 @@ __FBSDID("$FreeBSD$");
uint64_t ath_debug = 0; uint64_t ath_debug = 0;
SYSCTL_DECL(_hw_ath); SYSCTL_DECL(_hw_ath);
SYSCTL_QUAD(_hw_ath, OID_AUTO, debug, CTLFLAG_RWTUN, &ath_debug, SYSCTL_QUAD(_hw_ath, OID_AUTO, debug, CTLFLAG_RW, &ath_debug,
0, "control debugging printfs"); 0, "control debugging printfs");
TUNABLE_QUAD("hw.ath.debug", &ath_debug);
void void
ath_printrxbuf(struct ath_softc *sc, const struct ath_buf *bf, ath_printrxbuf(struct ath_softc *sc, const struct ath_buf *bf,

View File

@ -535,37 +535,44 @@ static SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
/* Allowable values are TRUE or FALSE */ /* Allowable values are TRUE or FALSE */
static int bce_verbose = TRUE; static int bce_verbose = TRUE;
TUNABLE_INT("hw.bce.verbose", &bce_verbose);
SYSCTL_INT(_hw_bce, OID_AUTO, verbose, CTLFLAG_RDTUN, &bce_verbose, 0, SYSCTL_INT(_hw_bce, OID_AUTO, verbose, CTLFLAG_RDTUN, &bce_verbose, 0,
"Verbose output enable/disable"); "Verbose output enable/disable");
/* Allowable values are TRUE or FALSE */ /* Allowable values are TRUE or FALSE */
static int bce_tso_enable = TRUE; static int bce_tso_enable = TRUE;
TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable);
SYSCTL_INT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0, SYSCTL_INT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0,
"TSO Enable/Disable"); "TSO Enable/Disable");
/* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ /* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
/* ToDo: Add MSI-X support. */ /* ToDo: Add MSI-X support. */
static int bce_msi_enable = 1; static int bce_msi_enable = 1;
TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable);
SYSCTL_INT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0, SYSCTL_INT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0,
"MSI-X|MSI|INTx selector"); "MSI-X|MSI|INTx selector");
/* Allowable values are 1, 2, 4, 8. */ /* Allowable values are 1, 2, 4, 8. */
static int bce_rx_pages = DEFAULT_RX_PAGES; static int bce_rx_pages = DEFAULT_RX_PAGES;
TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages);
SYSCTL_UINT(_hw_bce, OID_AUTO, rx_pages, CTLFLAG_RDTUN, &bce_rx_pages, 0, SYSCTL_UINT(_hw_bce, OID_AUTO, rx_pages, CTLFLAG_RDTUN, &bce_rx_pages, 0,
"Receive buffer descriptor pages (1 page = 255 buffer descriptors)"); "Receive buffer descriptor pages (1 page = 255 buffer descriptors)");
/* Allowable values are 1, 2, 4, 8. */ /* Allowable values are 1, 2, 4, 8. */
static int bce_tx_pages = DEFAULT_TX_PAGES; static int bce_tx_pages = DEFAULT_TX_PAGES;
TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages);
SYSCTL_UINT(_hw_bce, OID_AUTO, tx_pages, CTLFLAG_RDTUN, &bce_tx_pages, 0, SYSCTL_UINT(_hw_bce, OID_AUTO, tx_pages, CTLFLAG_RDTUN, &bce_tx_pages, 0,
"Transmit buffer descriptor pages (1 page = 255 buffer descriptors)"); "Transmit buffer descriptor pages (1 page = 255 buffer descriptors)");
/* Allowable values are TRUE or FALSE. */ /* Allowable values are TRUE or FALSE. */
static int bce_hdr_split = TRUE; static int bce_hdr_split = TRUE;
TUNABLE_INT("hw.bce.hdr_split", &bce_hdr_split);
SYSCTL_UINT(_hw_bce, OID_AUTO, hdr_split, CTLFLAG_RDTUN, &bce_hdr_split, 0, SYSCTL_UINT(_hw_bce, OID_AUTO, hdr_split, CTLFLAG_RDTUN, &bce_hdr_split, 0,
"Frame header/payload splitting Enable/Disable"); "Frame header/payload splitting Enable/Disable");
/* Allowable values are TRUE or FALSE. */ /* Allowable values are TRUE or FALSE. */
static int bce_strict_rx_mtu = FALSE; static int bce_strict_rx_mtu = FALSE;
TUNABLE_INT("hw.bce.strict_rx_mtu", &bce_strict_rx_mtu);
SYSCTL_UINT(_hw_bce, OID_AUTO, strict_rx_mtu, CTLFLAG_RDTUN, SYSCTL_UINT(_hw_bce, OID_AUTO, strict_rx_mtu, CTLFLAG_RDTUN,
&bce_strict_rx_mtu, 0, &bce_strict_rx_mtu, 0,
"Enable/Disable strict RX frame size checking"); "Enable/Disable strict RX frame size checking");
@ -578,6 +585,7 @@ static int bce_tx_quick_cons_trip_int = 1;
/* Generate 1 interrupt for every 20 transmit completions. */ /* Generate 1 interrupt for every 20 transmit completions. */
static int bce_tx_quick_cons_trip_int = DEFAULT_TX_QUICK_CONS_TRIP_INT; static int bce_tx_quick_cons_trip_int = DEFAULT_TX_QUICK_CONS_TRIP_INT;
#endif #endif
TUNABLE_INT("hw.bce.tx_quick_cons_trip_int", &bce_tx_quick_cons_trip_int);
SYSCTL_UINT(_hw_bce, OID_AUTO, tx_quick_cons_trip_int, CTLFLAG_RDTUN, SYSCTL_UINT(_hw_bce, OID_AUTO, tx_quick_cons_trip_int, CTLFLAG_RDTUN,
&bce_tx_quick_cons_trip_int, 0, &bce_tx_quick_cons_trip_int, 0,
"Transmit BD trip point during interrupts"); "Transmit BD trip point during interrupts");
@ -590,6 +598,7 @@ static int bce_tx_quick_cons_trip = 1;
/* Generate 1 interrupt for every 20 transmit completions. */ /* Generate 1 interrupt for every 20 transmit completions. */
static int bce_tx_quick_cons_trip = DEFAULT_TX_QUICK_CONS_TRIP; static int bce_tx_quick_cons_trip = DEFAULT_TX_QUICK_CONS_TRIP;
#endif #endif
TUNABLE_INT("hw.bce.tx_quick_cons_trip", &bce_tx_quick_cons_trip);
SYSCTL_UINT(_hw_bce, OID_AUTO, tx_quick_cons_trip, CTLFLAG_RDTUN, SYSCTL_UINT(_hw_bce, OID_AUTO, tx_quick_cons_trip, CTLFLAG_RDTUN,
&bce_tx_quick_cons_trip, 0, &bce_tx_quick_cons_trip, 0,
"Transmit BD trip point"); "Transmit BD trip point");
@ -602,6 +611,7 @@ static int bce_tx_ticks_int = 0;
/* Generate an interrupt if 80us have elapsed since the last TX completion. */ /* Generate an interrupt if 80us have elapsed since the last TX completion. */
static int bce_tx_ticks_int = DEFAULT_TX_TICKS_INT; static int bce_tx_ticks_int = DEFAULT_TX_TICKS_INT;
#endif #endif
TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int);
SYSCTL_UINT(_hw_bce, OID_AUTO, tx_ticks_int, CTLFLAG_RDTUN, SYSCTL_UINT(_hw_bce, OID_AUTO, tx_ticks_int, CTLFLAG_RDTUN,
&bce_tx_ticks_int, 0, "Transmit ticks count during interrupt"); &bce_tx_ticks_int, 0, "Transmit ticks count during interrupt");
@ -613,6 +623,7 @@ static int bce_tx_ticks = 0;
/* Generate an interrupt if 80us have elapsed since the last TX completion. */ /* Generate an interrupt if 80us have elapsed since the last TX completion. */
static int bce_tx_ticks = DEFAULT_TX_TICKS; static int bce_tx_ticks = DEFAULT_TX_TICKS;
#endif #endif
TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks);
SYSCTL_UINT(_hw_bce, OID_AUTO, tx_ticks, CTLFLAG_RDTUN, SYSCTL_UINT(_hw_bce, OID_AUTO, tx_ticks, CTLFLAG_RDTUN,
&bce_tx_ticks, 0, "Transmit ticks count"); &bce_tx_ticks, 0, "Transmit ticks count");
@ -624,6 +635,7 @@ static int bce_rx_quick_cons_trip_int = 1;
/* Generate 1 interrupt for every 6 received frames. */ /* Generate 1 interrupt for every 6 received frames. */
static int bce_rx_quick_cons_trip_int = DEFAULT_RX_QUICK_CONS_TRIP_INT; static int bce_rx_quick_cons_trip_int = DEFAULT_RX_QUICK_CONS_TRIP_INT;
#endif #endif
TUNABLE_INT("hw.bce.rx_quick_cons_trip_int", &bce_rx_quick_cons_trip_int);
SYSCTL_UINT(_hw_bce, OID_AUTO, rx_quick_cons_trip_int, CTLFLAG_RDTUN, SYSCTL_UINT(_hw_bce, OID_AUTO, rx_quick_cons_trip_int, CTLFLAG_RDTUN,
&bce_rx_quick_cons_trip_int, 0, &bce_rx_quick_cons_trip_int, 0,
"Receive BD trip point duirng interrupts"); "Receive BD trip point duirng interrupts");
@ -636,6 +648,7 @@ static int bce_rx_quick_cons_trip = 1;
/* Generate 1 interrupt for every 6 received frames. */ /* Generate 1 interrupt for every 6 received frames. */
static int bce_rx_quick_cons_trip = DEFAULT_RX_QUICK_CONS_TRIP; static int bce_rx_quick_cons_trip = DEFAULT_RX_QUICK_CONS_TRIP;
#endif #endif
TUNABLE_INT("hw.bce.rx_quick_cons_trip", &bce_rx_quick_cons_trip);
SYSCTL_UINT(_hw_bce, OID_AUTO, rx_quick_cons_trip, CTLFLAG_RDTUN, SYSCTL_UINT(_hw_bce, OID_AUTO, rx_quick_cons_trip, CTLFLAG_RDTUN,
&bce_rx_quick_cons_trip, 0, &bce_rx_quick_cons_trip, 0,
"Receive BD trip point"); "Receive BD trip point");
@ -648,6 +661,7 @@ static int bce_rx_ticks_int = 0;
/* Generate an int. if 18us have elapsed since the last received frame. */ /* Generate an int. if 18us have elapsed since the last received frame. */
static int bce_rx_ticks_int = DEFAULT_RX_TICKS_INT; static int bce_rx_ticks_int = DEFAULT_RX_TICKS_INT;
#endif #endif
TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int);
SYSCTL_UINT(_hw_bce, OID_AUTO, rx_ticks_int, CTLFLAG_RDTUN, SYSCTL_UINT(_hw_bce, OID_AUTO, rx_ticks_int, CTLFLAG_RDTUN,
&bce_rx_ticks_int, 0, "Receive ticks count during interrupt"); &bce_rx_ticks_int, 0, "Receive ticks count during interrupt");
@ -659,6 +673,7 @@ static int bce_rx_ticks = 0;
/* Generate an int. if 18us have elapsed since the last received frame. */ /* Generate an int. if 18us have elapsed since the last received frame. */
static int bce_rx_ticks = DEFAULT_RX_TICKS; static int bce_rx_ticks = DEFAULT_RX_TICKS;
#endif #endif
TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks);
SYSCTL_UINT(_hw_bce, OID_AUTO, rx_ticks, CTLFLAG_RDTUN, SYSCTL_UINT(_hw_bce, OID_AUTO, rx_ticks, CTLFLAG_RDTUN,
&bce_rx_ticks, 0, "Receive ticks count"); &bce_rx_ticks, 0, "Receive ticks count");

View File

@ -542,8 +542,10 @@ DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
static int bge_allow_asf = 1; static int bge_allow_asf = 1;
TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
static SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters"); static SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RDTUN, &bge_allow_asf, 0, SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
"Allow ASF mode if available"); "Allow ASF mode if available");
#define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500" #define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
@ -6235,6 +6237,7 @@ bge_add_sysctls(struct bge_softc *sc)
{ {
struct sysctl_ctx_list *ctx; struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children; struct sysctl_oid_list *children;
char tn[32];
int unit; int unit;
ctx = device_get_sysctl_ctx(sc->bge_dev); ctx = device_get_sysctl_ctx(sc->bge_dev);
@ -6273,14 +6276,18 @@ bge_add_sysctls(struct bge_softc *sc)
* consumes a lot of CPU cycles, so leave it off by default. * consumes a lot of CPU cycles, so leave it off by default.
*/ */
sc->bge_forced_collapse = 0; sc->bge_forced_collapse = 0;
snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse", SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
CTLFLAG_RWTUN, &sc->bge_forced_collapse, 0, CTLFLAG_RW, &sc->bge_forced_collapse, 0,
"Number of fragmented TX buffers of a frame allowed before " "Number of fragmented TX buffers of a frame allowed before "
"forced collapsing"); "forced collapsing");
sc->bge_msi = 1; sc->bge_msi = 1;
snprintf(tn, sizeof(tn), "dev.bge.%d.msi", unit);
TUNABLE_INT_FETCH(tn, &sc->bge_msi);
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "msi", SYSCTL_ADD_INT(ctx, children, OID_AUTO, "msi",
CTLFLAG_RDTUN, &sc->bge_msi, 0, "Enable MSI"); CTLFLAG_RD, &sc->bge_msi, 0, "Enable MSI");
/* /*
* It seems all Broadcom controllers have a bug that can generate UDP * It seems all Broadcom controllers have a bug that can generate UDP
@ -6293,8 +6300,10 @@ bge_add_sysctls(struct bge_softc *sc)
* dev.bge.0.forced_udpcsum. * dev.bge.0.forced_udpcsum.
*/ */
sc->bge_forced_udpcsum = 0; sc->bge_forced_udpcsum = 0;
snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum", SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
CTLFLAG_RWTUN, &sc->bge_forced_udpcsum, 0, CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
"Enable UDP checksum offloading even if controller can " "Enable UDP checksum offloading even if controller can "
"generate UDP checksum value 0"); "generate UDP checksum value 0");

View File

@ -83,8 +83,9 @@ static SYSCTL_NODE(_hw, OID_AUTO, bwn, CTLFLAG_RD, 0,
#ifdef BWN_DEBUG #ifdef BWN_DEBUG
static int bwn_debug = 0; static int bwn_debug = 0;
SYSCTL_INT(_hw_bwn, OID_AUTO, debug, CTLFLAG_RWTUN, &bwn_debug, 0, SYSCTL_INT(_hw_bwn, OID_AUTO, debug, CTLFLAG_RW, &bwn_debug, 0,
"Broadcom debugging printfs"); "Broadcom debugging printfs");
TUNABLE_INT("hw.bwn.debug", &bwn_debug);
enum { enum {
BWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ BWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
BWN_DEBUG_RECV = 0x00000002, /* basic recv operation */ BWN_DEBUG_RECV = 0x00000002, /* basic recv operation */

View File

@ -297,56 +297,67 @@ SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
/* Debug */ /* Debug */
unsigned long bxe_debug = 0; unsigned long bxe_debug = 0;
SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN, TUNABLE_ULONG("hw.bxe.debug", &bxe_debug);
SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, (CTLFLAG_RDTUN),
&bxe_debug, 0, "Debug logging mode"); &bxe_debug, 0, "Debug logging mode");
/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
static int bxe_interrupt_mode = INTR_MODE_MSIX; static int bxe_interrupt_mode = INTR_MODE_MSIX;
TUNABLE_INT("hw.bxe.interrupt_mode", &bxe_interrupt_mode);
SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN, SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
&bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode"); &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */ /* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
static int bxe_queue_count = 4; static int bxe_queue_count = 4;
TUNABLE_INT("hw.bxe.queue_count", &bxe_queue_count);
SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
&bxe_queue_count, 0, "Multi-Queue queue count"); &bxe_queue_count, 0, "Multi-Queue queue count");
/* max number of buffers per queue (default RX_BD_USABLE) */ /* max number of buffers per queue (default RX_BD_USABLE) */
static int bxe_max_rx_bufs = 0; static int bxe_max_rx_bufs = 0;
TUNABLE_INT("hw.bxe.max_rx_bufs", &bxe_max_rx_bufs);
SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN, SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
&bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue"); &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
/* Host interrupt coalescing RX tick timer (usecs) */ /* Host interrupt coalescing RX tick timer (usecs) */
static int bxe_hc_rx_ticks = 25; static int bxe_hc_rx_ticks = 25;
TUNABLE_INT("hw.bxe.hc_rx_ticks", &bxe_hc_rx_ticks);
SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN, SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
&bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks"); &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
/* Host interrupt coalescing TX tick timer (usecs) */ /* Host interrupt coalescing TX tick timer (usecs) */
static int bxe_hc_tx_ticks = 50; static int bxe_hc_tx_ticks = 50;
TUNABLE_INT("hw.bxe.hc_tx_ticks", &bxe_hc_tx_ticks);
SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN, SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
&bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks"); &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
/* Maximum number of Rx packets to process at a time */ /* Maximum number of Rx packets to process at a time */
static int bxe_rx_budget = 0xffffffff; static int bxe_rx_budget = 0xffffffff;
TUNABLE_INT("hw.bxe.rx_budget", &bxe_rx_budget);
SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN, SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
&bxe_rx_budget, 0, "Rx processing budget"); &bxe_rx_budget, 0, "Rx processing budget");
/* Maximum LRO aggregation size */ /* Maximum LRO aggregation size */
static int bxe_max_aggregation_size = 0; static int bxe_max_aggregation_size = 0;
TUNABLE_INT("hw.bxe.max_aggregation_size", &bxe_max_aggregation_size);
SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN, SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
&bxe_max_aggregation_size, 0, "max aggregation size"); &bxe_max_aggregation_size, 0, "max aggregation size");
/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */ /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
static int bxe_mrrs = -1; static int bxe_mrrs = -1;
TUNABLE_INT("hw.bxe.mrrs", &bxe_mrrs);
SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN, SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
&bxe_mrrs, 0, "PCIe maximum read request size"); &bxe_mrrs, 0, "PCIe maximum read request size");
/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */ /* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
static int bxe_autogreeen = 0; static int bxe_autogreeen = 0;
TUNABLE_INT("hw.bxe.autogreeen", &bxe_autogreeen);
SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN, SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
&bxe_autogreeen, 0, "AutoGrEEEn support"); &bxe_autogreeen, 0, "AutoGrEEEn support");
/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */ /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
static int bxe_udp_rss = 0; static int bxe_udp_rss = 0;
TUNABLE_INT("hw.bxe.udp_rss", &bxe_udp_rss);
SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN, SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
&bxe_udp_rss, 0, "UDP RSS support"); &bxe_udp_rss, 0, "UDP RSS support");

View File

@ -57,12 +57,16 @@ __FBSDID("$FreeBSD$");
static SYSCTL_NODE(_hw, OID_AUTO, cardbus, CTLFLAG_RD, 0, "CardBus parameters"); static SYSCTL_NODE(_hw, OID_AUTO, cardbus, CTLFLAG_RD, 0, "CardBus parameters");
int cardbus_debug = 0; int cardbus_debug = 0;
SYSCTL_INT(_hw_cardbus, OID_AUTO, debug, CTLFLAG_RWTUN, TUNABLE_INT("hw.cardbus.debug", &cardbus_debug);
&cardbus_debug, 0, "CardBus debug"); SYSCTL_INT(_hw_cardbus, OID_AUTO, debug, CTLFLAG_RW,
&cardbus_debug, 0,
"CardBus debug");
int cardbus_cis_debug = 0; int cardbus_cis_debug = 0;
SYSCTL_INT(_hw_cardbus, OID_AUTO, cis_debug, CTLFLAG_RWTUN, TUNABLE_INT("hw.cardbus.cis_debug", &cardbus_cis_debug);
&cardbus_cis_debug, 0, "CardBus CIS debug"); SYSCTL_INT(_hw_cardbus, OID_AUTO, cis_debug, CTLFLAG_RW,
&cardbus_cis_debug, 0,
"CardBus CIS debug");
#define DPRINTF(a) if (cardbus_debug) printf a #define DPRINTF(a) if (cardbus_debug) printf a
#define DEVPRINTF(x) if (cardbus_debug) device_printf x #define DEVPRINTF(x) if (cardbus_debug) device_printf x

View File

@ -107,12 +107,14 @@ driver_intr_t csintr;
static SYSCTL_NODE(_hw, OID_AUTO, cs, CTLFLAG_RD, 0, "cs device parameters"); static SYSCTL_NODE(_hw, OID_AUTO, cs, CTLFLAG_RD, 0, "cs device parameters");
int cs_ignore_cksum_failure = 0; int cs_ignore_cksum_failure = 0;
SYSCTL_INT(_hw_cs, OID_AUTO, ignore_checksum_failure, CTLFLAG_RWTUN, TUNABLE_INT("hw.cs.ignore_checksum_failure", &cs_ignore_cksum_failure);
SYSCTL_INT(_hw_cs, OID_AUTO, ignore_checksum_failure, CTLFLAG_RW,
&cs_ignore_cksum_failure, 0, &cs_ignore_cksum_failure, 0,
"ignore checksum errors in cs card EEPROM"); "ignore checksum errors in cs card EEPROM");
static int cs_recv_delay = 570; static int cs_recv_delay = 570;
SYSCTL_INT(_hw_cs, OID_AUTO, recv_delay, CTLFLAG_RWTUN, &cs_recv_delay, 570, ""); TUNABLE_INT("hw.cs.recv_delay", &cs_recv_delay);
SYSCTL_INT(_hw_cs, OID_AUTO, recv_delay, CTLFLAG_RW, &cs_recv_delay, 570, "");
static int cs8900_eeint2irq[16] = { static int cs8900_eeint2irq[16] = {
10, 11, 12, 5, 255, 255, 255, 255, 10, 11, 12, 5, 255, 255, 255, 255,

View File

@ -5,7 +5,6 @@
#include <sys/cdefs.h> #include <sys/cdefs.h>
#include <sys/param.h> #include <sys/param.h>
#include <sys/types.h> #include <sys/types.h>
#include <sys/sysctl.h>
#include <machine/bus.h> #include <machine/bus.h>
#include <cxgb_osdep.h> #include <cxgb_osdep.h>
#include <common/cxgb_common.h> #include <common/cxgb_common.h>
@ -17,5 +16,3 @@
#include <common/cxgb_sge_defs.h> #include <common/cxgb_sge_defs.h>
#include <common/cxgb_firmware_exports.h> #include <common/cxgb_firmware_exports.h>
#include <common/jhash.h> #include <common/jhash.h>
SYSCTL_DECL(_hw_cxgb);

View File

@ -208,6 +208,7 @@ static SLIST_HEAD(, uld_info) t3_uld_list;
*/ */
static int msi_allowed = 2; static int msi_allowed = 2;
TUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters"); SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
SYSCTL_INT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0, SYSCTL_INT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
"MSI-X, MSI, INTx selector"); "MSI-X, MSI, INTx selector");
@ -217,6 +218,7 @@ SYSCTL_INT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
* To disable it and force a single queue-set per port, use multiq = 0 * To disable it and force a single queue-set per port, use multiq = 0
*/ */
static int multiq = 1; static int multiq = 1;
TUNABLE_INT("hw.cxgb.multiq", &multiq);
SYSCTL_INT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0, SYSCTL_INT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0,
"use min(ncpus/ports, 8) queue-sets per port"); "use min(ncpus/ports, 8) queue-sets per port");
@ -226,14 +228,17 @@ SYSCTL_INT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0,
* *
*/ */
static int force_fw_update = 0; static int force_fw_update = 0;
TUNABLE_INT("hw.cxgb.force_fw_update", &force_fw_update);
SYSCTL_INT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0, SYSCTL_INT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
"update firmware even if up to date"); "update firmware even if up to date");
int cxgb_use_16k_clusters = -1; int cxgb_use_16k_clusters = -1;
TUNABLE_INT("hw.cxgb.use_16k_clusters", &cxgb_use_16k_clusters);
SYSCTL_INT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN, SYSCTL_INT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
&cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue "); &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
static int nfilters = -1; static int nfilters = -1;
TUNABLE_INT("hw.cxgb.nfilters", &nfilters);
SYSCTL_INT(_hw_cxgb, OID_AUTO, nfilters, CTLFLAG_RDTUN, SYSCTL_INT(_hw_cxgb, OID_AUTO, nfilters, CTLFLAG_RDTUN,
&nfilters, 0, "max number of entries in the filter table"); &nfilters, 0, "max number of entries in the filter table");

View File

@ -86,11 +86,13 @@ CTASSERT(NUM_CPL_HANDLERS >= NUM_CPL_CMDS);
extern struct sysctl_oid_list sysctl__hw_cxgb_children; extern struct sysctl_oid_list sysctl__hw_cxgb_children;
int cxgb_txq_buf_ring_size = TX_ETH_Q_SIZE; int cxgb_txq_buf_ring_size = TX_ETH_Q_SIZE;
TUNABLE_INT("hw.cxgb.txq_mr_size", &cxgb_txq_buf_ring_size);
SYSCTL_INT(_hw_cxgb, OID_AUTO, txq_mr_size, CTLFLAG_RDTUN, &cxgb_txq_buf_ring_size, 0, SYSCTL_INT(_hw_cxgb, OID_AUTO, txq_mr_size, CTLFLAG_RDTUN, &cxgb_txq_buf_ring_size, 0,
"size of per-queue mbuf ring"); "size of per-queue mbuf ring");
static int cxgb_tx_coalesce_force = 0; static int cxgb_tx_coalesce_force = 0;
SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RWTUN, TUNABLE_INT("hw.cxgb.tx_coalesce_force", &cxgb_tx_coalesce_force);
SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RW,
&cxgb_tx_coalesce_force, 0, &cxgb_tx_coalesce_force, 0,
"coalesce small packets into a single work request regardless of ring state"); "coalesce small packets into a single work request regardless of ring state");
@ -104,15 +106,19 @@ SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RWTUN,
static int cxgb_tx_coalesce_enable_start = COALESCE_START_DEFAULT; static int cxgb_tx_coalesce_enable_start = COALESCE_START_DEFAULT;
SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_start, CTLFLAG_RWTUN, TUNABLE_INT("hw.cxgb.tx_coalesce_enable_start",
&cxgb_tx_coalesce_enable_start);
SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_start, CTLFLAG_RW,
&cxgb_tx_coalesce_enable_start, 0, &cxgb_tx_coalesce_enable_start, 0,
"coalesce enable threshold"); "coalesce enable threshold");
static int cxgb_tx_coalesce_enable_stop = COALESCE_STOP_DEFAULT; static int cxgb_tx_coalesce_enable_stop = COALESCE_STOP_DEFAULT;
SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_stop, CTLFLAG_RWTUN, TUNABLE_INT("hw.cxgb.tx_coalesce_enable_stop", &cxgb_tx_coalesce_enable_stop);
SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_stop, CTLFLAG_RW,
&cxgb_tx_coalesce_enable_stop, 0, &cxgb_tx_coalesce_enable_stop, 0,
"coalesce disable threshold"); "coalesce disable threshold");
static int cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT; static int cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_reclaim_threshold, CTLFLAG_RWTUN, TUNABLE_INT("hw.cxgb.tx_reclaim_threshold", &cxgb_tx_reclaim_threshold);
SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_reclaim_threshold, CTLFLAG_RW,
&cxgb_tx_reclaim_threshold, 0, &cxgb_tx_reclaim_threshold, 0,
"tx cleaning minimum threshold"); "tx cleaning minimum threshold");

View File

@ -106,35 +106,43 @@ static char *states[] = {
SYSCTL_NODE(_hw, OID_AUTO, iw_cxgb, CTLFLAG_RD, 0, "iw_cxgb driver parameters"); SYSCTL_NODE(_hw, OID_AUTO, iw_cxgb, CTLFLAG_RD, 0, "iw_cxgb driver parameters");
static int ep_timeout_secs = 60; static int ep_timeout_secs = 60;
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0, TUNABLE_INT("hw.iw_cxgb.ep_timeout_secs", &ep_timeout_secs);
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, ep_timeout_secs, CTLFLAG_RW, &ep_timeout_secs, 0,
"CM Endpoint operation timeout in seconds (default=60)"); "CM Endpoint operation timeout in seconds (default=60)");
static int mpa_rev = 1; static int mpa_rev = 1;
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0, TUNABLE_INT("hw.iw_cxgb.mpa_rev", &mpa_rev);
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, mpa_rev, CTLFLAG_RW, &mpa_rev, 0,
"MPA Revision, 0 supports amso1100, 1 is spec compliant. (default=1)"); "MPA Revision, 0 supports amso1100, 1 is spec compliant. (default=1)");
static int markers_enabled = 0; static int markers_enabled = 0;
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0, TUNABLE_INT("hw.iw_cxgb.markers_enabled", &markers_enabled);
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, markers_enabled, CTLFLAG_RW, &markers_enabled, 0,
"Enable MPA MARKERS (default(0)=disabled)"); "Enable MPA MARKERS (default(0)=disabled)");
static int crc_enabled = 1; static int crc_enabled = 1;
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0, TUNABLE_INT("hw.iw_cxgb.crc_enabled", &crc_enabled);
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, crc_enabled, CTLFLAG_RW, &crc_enabled, 0,
"Enable MPA CRC (default(1)=enabled)"); "Enable MPA CRC (default(1)=enabled)");
static int rcv_win = 256 * 1024; static int rcv_win = 256 * 1024;
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0, TUNABLE_INT("hw.iw_cxgb.rcv_win", &rcv_win);
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, rcv_win, CTLFLAG_RW, &rcv_win, 0,
"TCP receive window in bytes (default=256KB)"); "TCP receive window in bytes (default=256KB)");
static int snd_win = 32 * 1024; static int snd_win = 32 * 1024;
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0, TUNABLE_INT("hw.iw_cxgb.snd_win", &snd_win);
SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, snd_win, CTLFLAG_RW, &snd_win, 0,
"TCP send window in bytes (default=32KB)"); "TCP send window in bytes (default=32KB)");
static unsigned int nocong = 0; static unsigned int nocong = 0;
SYSCTL_UINT(_hw_iw_cxgb, OID_AUTO, nocong, CTLFLAG_RWTUN, &nocong, 0, TUNABLE_INT("hw.iw_cxgb.nocong", &nocong);
SYSCTL_UINT(_hw_iw_cxgb, OID_AUTO, nocong, CTLFLAG_RW, &nocong, 0,
"Turn off congestion control (default=0)"); "Turn off congestion control (default=0)");
static unsigned int cong_flavor = 1; static unsigned int cong_flavor = 1;
SYSCTL_UINT(_hw_iw_cxgb, OID_AUTO, cong_flavor, CTLFLAG_RWTUN, &cong_flavor, 0, TUNABLE_INT("hw.iw_cxgb.cong_flavor", &cong_flavor);
SYSCTL_UINT(_hw_iw_cxgb, OID_AUTO, cong_flavor, CTLFLAG_RW, &cong_flavor, 0,
"TCP Congestion control flavor (default=1)"); "TCP Congestion control flavor (default=1)");
static void ep_timeout(void *arg); static void ep_timeout(void *arg);

View File

@ -769,72 +769,88 @@ process_socket_event(struct c4iw_ep *ep)
SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD, 0, "iw_cxgbe driver parameters"); SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD, 0, "iw_cxgbe driver parameters");
int db_delay_usecs = 1; int db_delay_usecs = 1;
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_delay_usecs, CTLFLAG_RWTUN, &db_delay_usecs, 0, TUNABLE_INT("hw.iw_cxgbe.db_delay_usecs", &db_delay_usecs);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_delay_usecs, CTLFLAG_RW, &db_delay_usecs, 0,
"Usecs to delay awaiting db fifo to drain"); "Usecs to delay awaiting db fifo to drain");
static int dack_mode = 1; static int dack_mode = 1;
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0, TUNABLE_INT("hw.iw_cxgbe.dack_mode", &dack_mode);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RW, &dack_mode, 0,
"Delayed ack mode (default = 1)"); "Delayed ack mode (default = 1)");
int c4iw_max_read_depth = 8; int c4iw_max_read_depth = 8;
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0, TUNABLE_INT("hw.iw_cxgbe.c4iw_max_read_depth", &c4iw_max_read_depth);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RW, &c4iw_max_read_depth, 0,
"Per-connection max ORD/IRD (default = 8)"); "Per-connection max ORD/IRD (default = 8)");
static int enable_tcp_timestamps; static int enable_tcp_timestamps;
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0, TUNABLE_INT("hw.iw_cxgbe.enable_tcp_timestamps", &enable_tcp_timestamps);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RW, &enable_tcp_timestamps, 0,
"Enable tcp timestamps (default = 0)"); "Enable tcp timestamps (default = 0)");
static int enable_tcp_sack; static int enable_tcp_sack;
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0, TUNABLE_INT("hw.iw_cxgbe.enable_tcp_sack", &enable_tcp_sack);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RW, &enable_tcp_sack, 0,
"Enable tcp SACK (default = 0)"); "Enable tcp SACK (default = 0)");
static int enable_tcp_window_scaling = 1; static int enable_tcp_window_scaling = 1;
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0, TUNABLE_INT("hw.iw_cxgbe.enable_tcp_window_scaling", &enable_tcp_window_scaling);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RW, &enable_tcp_window_scaling, 0,
"Enable tcp window scaling (default = 1)"); "Enable tcp window scaling (default = 1)");
int c4iw_debug = 1; int c4iw_debug = 1;
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0, TUNABLE_INT("hw.iw_cxgbe.c4iw_debug", &c4iw_debug);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RW, &c4iw_debug, 0,
"Enable debug logging (default = 0)"); "Enable debug logging (default = 0)");
static int peer2peer; static int peer2peer;
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0, TUNABLE_INT("hw.iw_cxgbe.peer2peer", &peer2peer);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RW, &peer2peer, 0,
"Support peer2peer ULPs (default = 0)"); "Support peer2peer ULPs (default = 0)");
static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0, TUNABLE_INT("hw.iw_cxgbe.p2p_type", &p2p_type);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RW, &p2p_type, 0,
"RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)"); "RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)");
static int ep_timeout_secs = 60; static int ep_timeout_secs = 60;
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0, TUNABLE_INT("hw.iw_cxgbe.ep_timeout_secs", &ep_timeout_secs);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RW, &ep_timeout_secs, 0,
"CM Endpoint operation timeout in seconds (default = 60)"); "CM Endpoint operation timeout in seconds (default = 60)");
static int mpa_rev = 1; static int mpa_rev = 1;
TUNABLE_INT("hw.iw_cxgbe.mpa_rev", &mpa_rev);
#ifdef IW_CM_MPAV2 #ifdef IW_CM_MPAV2
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0, SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RW, &mpa_rev, 0,
"MPA Revision, 0 supports amso1100, 1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)"); "MPA Revision, 0 supports amso1100, 1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)");
#else #else
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0, SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RW, &mpa_rev, 0,
"MPA Revision, 0 supports amso1100, 1 is RFC0544 spec compliant (default = 1)"); "MPA Revision, 0 supports amso1100, 1 is RFC0544 spec compliant (default = 1)");
#endif #endif
static int markers_enabled; static int markers_enabled;
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0, TUNABLE_INT("hw.iw_cxgbe.markers_enabled", &markers_enabled);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RW, &markers_enabled, 0,
"Enable MPA MARKERS (default(0) = disabled)"); "Enable MPA MARKERS (default(0) = disabled)");
static int crc_enabled = 1; static int crc_enabled = 1;
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0, TUNABLE_INT("hw.iw_cxgbe.crc_enabled", &crc_enabled);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RW, &crc_enabled, 0,
"Enable MPA CRC (default(1) = enabled)"); "Enable MPA CRC (default(1) = enabled)");
static int rcv_win = 256 * 1024; static int rcv_win = 256 * 1024;
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0, TUNABLE_INT("hw.iw_cxgbe.rcv_win", &rcv_win);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RW, &rcv_win, 0,
"TCP receive window in bytes (default = 256KB)"); "TCP receive window in bytes (default = 256KB)");
static int snd_win = 128 * 1024; static int snd_win = 128 * 1024;
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0, TUNABLE_INT("hw.iw_cxgbe.snd_win", &snd_win);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RW, &snd_win, 0,
"TCP send window in bytes (default = 128KB)"); "TCP send window in bytes (default = 128KB)");
int db_fc_threshold = 2000; int db_fc_threshold = 2000;
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_fc_threshold, CTLFLAG_RWTUN, &db_fc_threshold, 0, TUNABLE_INT("hw.iw_cxgbe.db_fc_threshold", &db_fc_threshold);
SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_fc_threshold, CTLFLAG_RW, &db_fc_threshold, 0,
"QP count/threshold that triggers automatic"); "QP count/threshold that triggers automatic");
static void static void

View File

@ -132,6 +132,7 @@ static struct cdevsw drm_cdevsw = {
}; };
static int drm_msi = 1; /* Enable by default. */ static int drm_msi = 1; /* Enable by default. */
TUNABLE_INT("hw.drm.msi", &drm_msi);
SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device"); SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1, SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1,
"Enable MSI interrupts for drm devices"); "Enable MSI interrupts for drm devices");

View File

@ -70,7 +70,7 @@ int drm_sysctl_init(struct drm_device *dev)
dev->sysctl = info; dev->sysctl = info;
/* Add the sysctl node for DRI if it doesn't already exist */ /* Add the sysctl node for DRI if it doesn't already exist */
drioid = SYSCTL_ADD_NODE(&info->ctx, SYSCTL_CHILDREN(&sysctl___hw), OID_AUTO, "dri", CTLFLAG_RW, NULL, "DRI Graphics"); drioid = SYSCTL_ADD_NODE( &info->ctx, &sysctl__hw_children, OID_AUTO, "dri", CTLFLAG_RW, NULL, "DRI Graphics");
if (!drioid) if (!drioid)
return 1; return 1;

View File

@ -203,6 +203,7 @@ static struct cdevsw drm_cdevsw = {
}; };
static int drm_msi = 1; /* Enable by default. */ static int drm_msi = 1; /* Enable by default. */
TUNABLE_INT("hw.drm.msi", &drm_msi);
SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device"); SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1, SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1,
"Enable MSI interrupts for drm devices"); "Enable MSI interrupts for drm devices");

View File

@ -68,7 +68,7 @@ int drm_sysctl_init(struct drm_device *dev)
dev->sysctl = info; dev->sysctl = info;
/* Add the sysctl node for DRI if it doesn't already exist */ /* Add the sysctl node for DRI if it doesn't already exist */
drioid = SYSCTL_ADD_NODE(&info->ctx, SYSCTL_CHILDREN(&sysctl___hw), OID_AUTO, drioid = SYSCTL_ADD_NODE(&info->ctx, &sysctl__hw_children, OID_AUTO,
"dri", CTLFLAG_RW, NULL, "DRI Graphics"); "dri", CTLFLAG_RW, NULL, "DRI Graphics");
if (!drioid) if (!drioid)
return 1; return 1;

View File

@ -353,6 +353,8 @@ static SYSCTL_NODE(_hw, OID_AUTO, em, CTLFLAG_RD, 0, "EM driver parameters");
static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV); static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR); static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
SYSCTL_INT(_hw_em, OID_AUTO, tx_int_delay, CTLFLAG_RDTUN, &em_tx_int_delay_dflt, SYSCTL_INT(_hw_em, OID_AUTO, tx_int_delay, CTLFLAG_RDTUN, &em_tx_int_delay_dflt,
0, "Default transmit interrupt delay in usecs"); 0, "Default transmit interrupt delay in usecs");
SYSCTL_INT(_hw_em, OID_AUTO, rx_int_delay, CTLFLAG_RDTUN, &em_rx_int_delay_dflt, SYSCTL_INT(_hw_em, OID_AUTO, rx_int_delay, CTLFLAG_RDTUN, &em_rx_int_delay_dflt,
@ -360,6 +362,8 @@ SYSCTL_INT(_hw_em, OID_AUTO, rx_int_delay, CTLFLAG_RDTUN, &em_rx_int_delay_dflt,
static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV); static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV); static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
SYSCTL_INT(_hw_em, OID_AUTO, tx_abs_int_delay, CTLFLAG_RDTUN, SYSCTL_INT(_hw_em, OID_AUTO, tx_abs_int_delay, CTLFLAG_RDTUN,
&em_tx_abs_int_delay_dflt, 0, &em_tx_abs_int_delay_dflt, 0,
"Default transmit interrupt delay limit in usecs"); "Default transmit interrupt delay limit in usecs");
@ -369,26 +373,32 @@ SYSCTL_INT(_hw_em, OID_AUTO, rx_abs_int_delay, CTLFLAG_RDTUN,
static int em_rxd = EM_DEFAULT_RXD; static int em_rxd = EM_DEFAULT_RXD;
static int em_txd = EM_DEFAULT_TXD; static int em_txd = EM_DEFAULT_TXD;
TUNABLE_INT("hw.em.rxd", &em_rxd);
TUNABLE_INT("hw.em.txd", &em_txd);
SYSCTL_INT(_hw_em, OID_AUTO, rxd, CTLFLAG_RDTUN, &em_rxd, 0, SYSCTL_INT(_hw_em, OID_AUTO, rxd, CTLFLAG_RDTUN, &em_rxd, 0,
"Number of receive descriptors per queue"); "Number of receive descriptors per queue");
SYSCTL_INT(_hw_em, OID_AUTO, txd, CTLFLAG_RDTUN, &em_txd, 0, SYSCTL_INT(_hw_em, OID_AUTO, txd, CTLFLAG_RDTUN, &em_txd, 0,
"Number of transmit descriptors per queue"); "Number of transmit descriptors per queue");
static int em_smart_pwr_down = FALSE; static int em_smart_pwr_down = FALSE;
TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
SYSCTL_INT(_hw_em, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &em_smart_pwr_down, SYSCTL_INT(_hw_em, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &em_smart_pwr_down,
0, "Set to true to leave smart power down enabled on newer adapters"); 0, "Set to true to leave smart power down enabled on newer adapters");
/* Controls whether promiscuous also shows bad packets */ /* Controls whether promiscuous also shows bad packets */
static int em_debug_sbp = FALSE; static int em_debug_sbp = FALSE;
TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
SYSCTL_INT(_hw_em, OID_AUTO, sbp, CTLFLAG_RDTUN, &em_debug_sbp, 0, SYSCTL_INT(_hw_em, OID_AUTO, sbp, CTLFLAG_RDTUN, &em_debug_sbp, 0,
"Show bad packets in promiscuous mode"); "Show bad packets in promiscuous mode");
static int em_enable_msix = TRUE; static int em_enable_msix = TRUE;
TUNABLE_INT("hw.em.enable_msix", &em_enable_msix);
SYSCTL_INT(_hw_em, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &em_enable_msix, 0, SYSCTL_INT(_hw_em, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &em_enable_msix, 0,
"Enable MSI-X interrupts"); "Enable MSI-X interrupts");
/* How many packets rxeof tries to clean at a time */ /* How many packets rxeof tries to clean at a time */
static int em_rx_process_limit = 100; static int em_rx_process_limit = 100;
TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
SYSCTL_INT(_hw_em, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, SYSCTL_INT(_hw_em, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
&em_rx_process_limit, 0, &em_rx_process_limit, 0,
"Maximum number of received packets to process " "Maximum number of received packets to process "
@ -396,6 +406,7 @@ SYSCTL_INT(_hw_em, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
/* Energy efficient ethernet - default to OFF */ /* Energy efficient ethernet - default to OFF */
static int eee_setting = 1; static int eee_setting = 1;
TUNABLE_INT("hw.em.eee_setting", &eee_setting);
SYSCTL_INT(_hw_em, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &eee_setting, 0, SYSCTL_INT(_hw_em, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &eee_setting, 0,
"Enable Energy Efficient Ethernet"); "Enable Energy Efficient Ethernet");

View File

@ -327,6 +327,8 @@ static SYSCTL_NODE(_hw, OID_AUTO, igb, CTLFLAG_RD, 0, "IGB driver parameters");
/* Descriptor defaults */ /* Descriptor defaults */
static int igb_rxd = IGB_DEFAULT_RXD; static int igb_rxd = IGB_DEFAULT_RXD;
static int igb_txd = IGB_DEFAULT_TXD; static int igb_txd = IGB_DEFAULT_TXD;
TUNABLE_INT("hw.igb.rxd", &igb_rxd);
TUNABLE_INT("hw.igb.txd", &igb_txd);
SYSCTL_INT(_hw_igb, OID_AUTO, rxd, CTLFLAG_RDTUN, &igb_rxd, 0, SYSCTL_INT(_hw_igb, OID_AUTO, rxd, CTLFLAG_RDTUN, &igb_rxd, 0,
"Number of receive descriptors per queue"); "Number of receive descriptors per queue");
SYSCTL_INT(_hw_igb, OID_AUTO, txd, CTLFLAG_RDTUN, &igb_txd, 0, SYSCTL_INT(_hw_igb, OID_AUTO, txd, CTLFLAG_RDTUN, &igb_txd, 0,
@ -339,7 +341,8 @@ SYSCTL_INT(_hw_igb, OID_AUTO, txd, CTLFLAG_RDTUN, &igb_txd, 0,
** traffic for that interrupt vector ** traffic for that interrupt vector
*/ */
static int igb_enable_aim = TRUE; static int igb_enable_aim = TRUE;
SYSCTL_INT(_hw_igb, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &igb_enable_aim, 0, TUNABLE_INT("hw.igb.enable_aim", &igb_enable_aim);
SYSCTL_INT(_hw_igb, OID_AUTO, enable_aim, CTLFLAG_RW, &igb_enable_aim, 0,
"Enable adaptive interrupt moderation"); "Enable adaptive interrupt moderation");
/* /*
@ -347,6 +350,7 @@ SYSCTL_INT(_hw_igb, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &igb_enable_aim, 0,
* but this allows it to be forced off for testing. * but this allows it to be forced off for testing.
*/ */
static int igb_enable_msix = 1; static int igb_enable_msix = 1;
TUNABLE_INT("hw.igb.enable_msix", &igb_enable_msix);
SYSCTL_INT(_hw_igb, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &igb_enable_msix, 0, SYSCTL_INT(_hw_igb, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &igb_enable_msix, 0,
"Enable MSI-X interrupts"); "Enable MSI-X interrupts");
@ -354,6 +358,7 @@ SYSCTL_INT(_hw_igb, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &igb_enable_msix, 0,
** Tuneable Interrupt rate ** Tuneable Interrupt rate
*/ */
static int igb_max_interrupt_rate = 8000; static int igb_max_interrupt_rate = 8000;
TUNABLE_INT("hw.igb.max_interrupt_rate", &igb_max_interrupt_rate);
SYSCTL_INT(_hw_igb, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, SYSCTL_INT(_hw_igb, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
&igb_max_interrupt_rate, 0, "Maximum interrupts per second"); &igb_max_interrupt_rate, 0, "Maximum interrupts per second");
@ -362,6 +367,7 @@ SYSCTL_INT(_hw_igb, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
** Tuneable number of buffers in the buf-ring (drbr_xxx) ** Tuneable number of buffers in the buf-ring (drbr_xxx)
*/ */
static int igb_buf_ring_size = IGB_BR_SIZE; static int igb_buf_ring_size = IGB_BR_SIZE;
TUNABLE_INT("hw.igb.buf_ring_size", &igb_buf_ring_size);
SYSCTL_INT(_hw_igb, OID_AUTO, buf_ring_size, CTLFLAG_RDTUN, SYSCTL_INT(_hw_igb, OID_AUTO, buf_ring_size, CTLFLAG_RDTUN,
&igb_buf_ring_size, 0, "Size of the bufring"); &igb_buf_ring_size, 0, "Size of the bufring");
#endif #endif
@ -375,6 +381,7 @@ SYSCTL_INT(_hw_igb, OID_AUTO, buf_ring_size, CTLFLAG_RDTUN,
** a very workload dependent type feature. ** a very workload dependent type feature.
*/ */
static int igb_header_split = FALSE; static int igb_header_split = FALSE;
TUNABLE_INT("hw.igb.hdr_split", &igb_header_split);
SYSCTL_INT(_hw_igb, OID_AUTO, header_split, CTLFLAG_RDTUN, &igb_header_split, 0, SYSCTL_INT(_hw_igb, OID_AUTO, header_split, CTLFLAG_RDTUN, &igb_header_split, 0,
"Enable receive mbuf header split"); "Enable receive mbuf header split");
@ -384,6 +391,7 @@ SYSCTL_INT(_hw_igb, OID_AUTO, header_split, CTLFLAG_RDTUN, &igb_header_split, 0,
** MSIX messages if left at 0. ** MSIX messages if left at 0.
*/ */
static int igb_num_queues = 0; static int igb_num_queues = 0;
TUNABLE_INT("hw.igb.num_queues", &igb_num_queues);
SYSCTL_INT(_hw_igb, OID_AUTO, num_queues, CTLFLAG_RDTUN, &igb_num_queues, 0, SYSCTL_INT(_hw_igb, OID_AUTO, num_queues, CTLFLAG_RDTUN, &igb_num_queues, 0,
"Number of queues to configure, 0 indicates autoconfigure"); "Number of queues to configure, 0 indicates autoconfigure");
@ -396,6 +404,7 @@ static int igb_last_bind_cpu = -1;
/* How many packets rxeof tries to clean at a time */ /* How many packets rxeof tries to clean at a time */
static int igb_rx_process_limit = 100; static int igb_rx_process_limit = 100;
TUNABLE_INT("hw.igb.rx_process_limit", &igb_rx_process_limit);
SYSCTL_INT(_hw_igb, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, SYSCTL_INT(_hw_igb, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
&igb_rx_process_limit, 0, &igb_rx_process_limit, 0,
"Maximum number of received packets to process at a time, -1 means unlimited"); "Maximum number of received packets to process at a time, -1 means unlimited");

View File

@ -101,6 +101,7 @@ static video_adapter_t *vesa_adp;
static SYSCTL_NODE(_debug, OID_AUTO, vesa, CTLFLAG_RD, NULL, "VESA debugging"); static SYSCTL_NODE(_debug, OID_AUTO, vesa, CTLFLAG_RD, NULL, "VESA debugging");
static int vesa_shadow_rom; static int vesa_shadow_rom;
TUNABLE_INT("debug.vesa.shadow_rom", &vesa_shadow_rom);
SYSCTL_INT(_debug_vesa, OID_AUTO, shadow_rom, CTLFLAG_RDTUN, &vesa_shadow_rom, SYSCTL_INT(_debug_vesa, OID_AUTO, shadow_rom, CTLFLAG_RDTUN, &vesa_shadow_rom,
0, "Enable video BIOS shadow"); 0, "Enable video BIOS shadow");

View File

@ -78,13 +78,14 @@
#undef OHCI_DEBUG #undef OHCI_DEBUG
static int nocyclemaster; static int nocyclemaster = 0;
int firewire_phydma_enable = 1; int firewire_phydma_enable = 1;
SYSCTL_DECL(_hw_firewire); SYSCTL_DECL(_hw_firewire);
SYSCTL_INT(_hw_firewire, OID_AUTO, nocyclemaster, CTLFLAG_RWTUN, SYSCTL_INT(_hw_firewire, OID_AUTO, nocyclemaster, CTLFLAG_RW, &nocyclemaster, 0,
&nocyclemaster, 0, "Do not send cycle start packets"); "Do not send cycle start packets");
SYSCTL_INT(_hw_firewire, OID_AUTO, phydma_enable, CTLFLAG_RWTUN, SYSCTL_INT(_hw_firewire, OID_AUTO, phydma_enable, CTLFLAG_RW,
&firewire_phydma_enable, 0, "Allow physical request DMA from firewire"); &firewire_phydma_enable, 1, "Allow physical request DMA from firewire");
TUNABLE_INT("hw.firewire.phydma_enable", &firewire_phydma_enable);
static char dbcode[16][0x10]={"OUTM", "OUTL","INPM","INPL", static char dbcode[16][0x10]={"OUTM", "OUTL","INPM","INPL",
"STOR","LOAD","NOP ","STOP",}; "STOR","LOAD","NOP ","STOP",};

View File

@ -88,17 +88,21 @@ static int tx_speed = 2;
static int rx_queue_len = FWMAXQUEUE; static int rx_queue_len = FWMAXQUEUE;
static MALLOC_DEFINE(M_FWE, "if_fwe", "Ethernet over FireWire interface"); static MALLOC_DEFINE(M_FWE, "if_fwe", "Ethernet over FireWire interface");
SYSCTL_INT(_debug, OID_AUTO, if_fwe_debug, CTLFLAG_RWTUN, &fwedebug, 0, ""); SYSCTL_INT(_debug, OID_AUTO, if_fwe_debug, CTLFLAG_RW, &fwedebug, 0, "");
SYSCTL_DECL(_hw_firewire); SYSCTL_DECL(_hw_firewire);
static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwe, CTLFLAG_RD, 0, static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwe, CTLFLAG_RD, 0,
"Ethernet emulation subsystem"); "Ethernet emulation subsystem");
SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, stream_ch, CTLFLAG_RWTUN, &stream_ch, 0, SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, stream_ch, CTLFLAG_RW, &stream_ch, 0,
"Stream channel to use"); "Stream channel to use");
SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, tx_speed, CTLFLAG_RWTUN, &tx_speed, 0, SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, tx_speed, CTLFLAG_RW, &tx_speed, 0,
"Transmission speed"); "Transmission speed");
SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, rx_queue_len, CTLFLAG_RWTUN, &rx_queue_len, SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, rx_queue_len, CTLFLAG_RW, &rx_queue_len,
0, "Length of the receive queue"); 0, "Length of the receive queue");
TUNABLE_INT("hw.firewire.fwe.stream_ch", &stream_ch);
TUNABLE_INT("hw.firewire.fwe.tx_speed", &tx_speed);
TUNABLE_INT("hw.firewire.fwe.rx_queue_len", &rx_queue_len);
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
static poll_handler_t fwe_poll; static poll_handler_t fwe_poll;

View File

@ -105,9 +105,11 @@ SYSCTL_INT(_debug, OID_AUTO, if_fwip_debug, CTLFLAG_RW, &fwipdebug, 0, "");
SYSCTL_DECL(_hw_firewire); SYSCTL_DECL(_hw_firewire);
static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwip, CTLFLAG_RD, 0, static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwip, CTLFLAG_RD, 0,
"Firewire ip subsystem"); "Firewire ip subsystem");
SYSCTL_INT(_hw_firewire_fwip, OID_AUTO, rx_queue_len, CTLFLAG_RWTUN, &rx_queue_len, SYSCTL_INT(_hw_firewire_fwip, OID_AUTO, rx_queue_len, CTLFLAG_RW, &rx_queue_len,
0, "Length of the receive queue"); 0, "Length of the receive queue");
TUNABLE_INT("hw.firewire.fwip.rx_queue_len", &rx_queue_len);
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
static poll_handler_t fwip_poll; static poll_handler_t fwip_poll;

View File

@ -134,23 +134,31 @@ static int sbp_tags = 0;
SYSCTL_DECL(_hw_firewire); SYSCTL_DECL(_hw_firewire);
static SYSCTL_NODE(_hw_firewire, OID_AUTO, sbp, CTLFLAG_RD, 0, static SYSCTL_NODE(_hw_firewire, OID_AUTO, sbp, CTLFLAG_RD, 0,
"SBP-II Subsystem"); "SBP-II Subsystem");
SYSCTL_INT(_debug, OID_AUTO, sbp_debug, CTLFLAG_RWTUN, &debug, 0, SYSCTL_INT(_debug, OID_AUTO, sbp_debug, CTLFLAG_RW, &debug, 0,
"SBP debug flag"); "SBP debug flag");
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, auto_login, CTLFLAG_RWTUN, &auto_login, 0, SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, auto_login, CTLFLAG_RW, &auto_login, 0,
"SBP perform login automatically"); "SBP perform login automatically");
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, max_speed, CTLFLAG_RWTUN, &max_speed, 0, SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, max_speed, CTLFLAG_RW, &max_speed, 0,
"SBP transfer max speed"); "SBP transfer max speed");
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, exclusive_login, CTLFLAG_RWTUN, SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, exclusive_login, CTLFLAG_RW,
&ex_login, 0, "SBP enable exclusive login"); &ex_login, 0, "SBP enable exclusive login");
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, login_delay, CTLFLAG_RWTUN, SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, login_delay, CTLFLAG_RW,
&login_delay, 0, "SBP login delay in msec"); &login_delay, 0, "SBP login delay in msec");
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, scan_delay, CTLFLAG_RWTUN, SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, scan_delay, CTLFLAG_RW,
&scan_delay, 0, "SBP scan delay in msec"); &scan_delay, 0, "SBP scan delay in msec");
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, use_doorbell, CTLFLAG_RWTUN, SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, use_doorbell, CTLFLAG_RW,
&use_doorbell, 0, "SBP use doorbell request"); &use_doorbell, 0, "SBP use doorbell request");
SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, tags, CTLFLAG_RWTUN, &sbp_tags, 0, SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, tags, CTLFLAG_RW, &sbp_tags, 0,
"SBP tagged queuing support"); "SBP tagged queuing support");
TUNABLE_INT("hw.firewire.sbp.auto_login", &auto_login);
TUNABLE_INT("hw.firewire.sbp.max_speed", &max_speed);
TUNABLE_INT("hw.firewire.sbp.exclusive_login", &ex_login);
TUNABLE_INT("hw.firewire.sbp.login_delay", &login_delay);
TUNABLE_INT("hw.firewire.sbp.scan_delay", &scan_delay);
TUNABLE_INT("hw.firewire.sbp.use_doorbell", &use_doorbell);
TUNABLE_INT("hw.firewire.sbp.tags", &sbp_tags);
#define NEED_RESPONSE 0 #define NEED_RESPONSE 0
#define SBP_SEG_MAX rounddown(0xffff, PAGE_SIZE) #define SBP_SEG_MAX rounddown(0xffff, PAGE_SIZE)

View File

@ -314,6 +314,7 @@ glxiic_attach(device_t dev)
struct sysctl_oid *tree; struct sysctl_oid *tree;
int error, irq, unit; int error, irq, unit;
uint32_t irq_map; uint32_t irq_map;
char tn[32];
sc = device_get_softc(dev); sc = device_get_softc(dev);
sc->dev = dev; sc->dev = dev;
@ -401,8 +402,10 @@ glxiic_attach(device_t dev)
tree = device_get_sysctl_tree(dev); tree = device_get_sysctl_tree(dev);
sc->timeout = GLXIIC_DEFAULT_TIMEOUT; sc->timeout = GLXIIC_DEFAULT_TIMEOUT;
snprintf(tn, sizeof(tn), "dev.glxiic.%d.timeout", unit);
TUNABLE_INT_FETCH(tn, &sc->timeout);
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"timeout", CTLFLAG_RWTUN, &sc->timeout, 0, "timeout", CTLFLAG_RW | CTLFLAG_TUN, &sc->timeout, 0,
"activity timeout in ms"); "activity timeout in ms");
glxiic_gpio_enable(sc); glxiic_gpio_enable(sc);

View File

@ -617,7 +617,7 @@ hpt_status(FORMAL_HANDLER_ARGS)
NULL, 0, hpt_status, "A", "Get/Set " #name " state") NULL, 0, hpt_status, "A", "Get/Set " #name " state")
#else #else
#define hptregister_node(name) \ #define hptregister_node(name) \
SYSCTL_ROOT_NODE(OID_AUTO, name, CTLFLAG_RW, 0, "Get/Set " #name " state root node"); \ SYSCTL_NODE(, OID_AUTO, name, CTLFLAG_RW, 0, "Get/Set " #name " state root node"); \
SYSCTL_OID(_ ## name, OID_AUTO, status, CTLTYPE_STRING|CTLFLAG_RW, \ SYSCTL_OID(_ ## name, OID_AUTO, status, CTLTYPE_STRING|CTLFLAG_RW, \
NULL, 0, hpt_status, "A", "Get/Set " #name " state"); NULL, 0, hpt_status, "A", "Get/Set " #name " state");
#endif #endif

View File

@ -66,7 +66,8 @@ SYSCTL_DECL(_kern_hwpmc);
*/ */
static int pmclog_buffer_size = PMC_LOG_BUFFER_SIZE; static int pmclog_buffer_size = PMC_LOG_BUFFER_SIZE;
SYSCTL_INT(_kern_hwpmc, OID_AUTO, logbuffersize, CTLFLAG_RDTUN, TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "logbuffersize", &pmclog_buffer_size);
SYSCTL_INT(_kern_hwpmc, OID_AUTO, logbuffersize, CTLFLAG_TUN|CTLFLAG_RD,
&pmclog_buffer_size, 0, "size of log buffers in kilobytes"); &pmclog_buffer_size, 0, "size of log buffers in kilobytes");
/* /*
@ -74,7 +75,8 @@ SYSCTL_INT(_kern_hwpmc, OID_AUTO, logbuffersize, CTLFLAG_RDTUN,
*/ */
static int pmc_nlogbuffers = PMC_NLOGBUFFERS; static int pmc_nlogbuffers = PMC_NLOGBUFFERS;
SYSCTL_INT(_kern_hwpmc, OID_AUTO, nbuffers, CTLFLAG_RDTUN, TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nbuffers", &pmc_nlogbuffers);
SYSCTL_INT(_kern_hwpmc, OID_AUTO, nbuffers, CTLFLAG_TUN|CTLFLAG_RD,
&pmc_nlogbuffers, 0, "number of global log buffers"); &pmc_nlogbuffers, 0, "number of global log buffers");
/* /*

View File

@ -234,7 +234,8 @@ static void pmc_generic_cpu_finalize(struct pmc_mdep *md);
SYSCTL_DECL(_kern_hwpmc); SYSCTL_DECL(_kern_hwpmc);
static int pmc_callchaindepth = PMC_CALLCHAIN_DEPTH; static int pmc_callchaindepth = PMC_CALLCHAIN_DEPTH;
SYSCTL_INT(_kern_hwpmc, OID_AUTO, callchaindepth, CTLFLAG_RDTUN, TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "callchaindepth", &pmc_callchaindepth);
SYSCTL_INT(_kern_hwpmc, OID_AUTO, callchaindepth, CTLFLAG_TUN|CTLFLAG_RD,
&pmc_callchaindepth, 0, "depth of call chain records"); &pmc_callchaindepth, 0, "depth of call chain records");
#ifdef DEBUG #ifdef DEBUG
@ -243,7 +244,7 @@ char pmc_debugstr[PMC_DEBUG_STRSIZE];
TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr, TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr,
sizeof(pmc_debugstr)); sizeof(pmc_debugstr));
SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags, SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,
CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NOFETCH, CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_TUN,
0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags"); 0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags");
#endif #endif
@ -253,7 +254,8 @@ SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,
*/ */
static int pmc_hashsize = PMC_HASH_SIZE; static int pmc_hashsize = PMC_HASH_SIZE;
SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_RDTUN, TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "hashsize", &pmc_hashsize);
SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_TUN|CTLFLAG_RD,
&pmc_hashsize, 0, "rows in hash tables"); &pmc_hashsize, 0, "rows in hash tables");
/* /*
@ -261,7 +263,8 @@ SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_RDTUN,
*/ */
static int pmc_nsamples = PMC_NSAMPLES; static int pmc_nsamples = PMC_NSAMPLES;
SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_RDTUN, TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nsamples", &pmc_nsamples);
SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_TUN|CTLFLAG_RD,
&pmc_nsamples, 0, "number of PC samples per CPU"); &pmc_nsamples, 0, "number of PC samples per CPU");
@ -270,7 +273,8 @@ SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_RDTUN,
*/ */
static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE; static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE;
SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_RDTUN, TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "mtxpoolsize", &pmc_mtxpool_size);
SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_TUN|CTLFLAG_RD,
&pmc_mtxpool_size, 0, "size of spin mutex pool"); &pmc_mtxpool_size, 0, "size of spin mutex pool");
@ -284,7 +288,8 @@ SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_RDTUN,
*/ */
static int pmc_unprivileged_syspmcs = 0; static int pmc_unprivileged_syspmcs = 0;
SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RWTUN, TUNABLE_INT("security.bsd.unprivileged_syspmcs", &pmc_unprivileged_syspmcs);
SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RW,
&pmc_unprivileged_syspmcs, 0, &pmc_unprivileged_syspmcs, 0,
"allow unprivileged process to allocate system PMCs"); "allow unprivileged process to allocate system PMCs");

View File

@ -61,19 +61,24 @@
SYSCTL_NODE(_kern, OID_AUTO, icl, CTLFLAG_RD, 0, "iSCSI Common Layer"); SYSCTL_NODE(_kern, OID_AUTO, icl, CTLFLAG_RD, 0, "iSCSI Common Layer");
static int debug = 1; static int debug = 1;
TUNABLE_INT("kern.icl.debug", &debug);
SYSCTL_INT(_kern_icl, OID_AUTO, debug, CTLFLAG_RWTUN, SYSCTL_INT(_kern_icl, OID_AUTO, debug, CTLFLAG_RWTUN,
&debug, 0, "Enable debug messages"); &debug, 0, "Enable debug messages");
static int coalesce = 1; static int coalesce = 1;
TUNABLE_INT("kern.icl.coalesce", &coalesce);
SYSCTL_INT(_kern_icl, OID_AUTO, coalesce, CTLFLAG_RWTUN, SYSCTL_INT(_kern_icl, OID_AUTO, coalesce, CTLFLAG_RWTUN,
&coalesce, 0, "Try to coalesce PDUs before sending"); &coalesce, 0, "Try to coalesce PDUs before sending");
static int partial_receive_len = 128 * 1024; static int partial_receive_len = 128 * 1024;
TUNABLE_INT("kern.icl.partial_receive_len", &partial_receive_len);
SYSCTL_INT(_kern_icl, OID_AUTO, partial_receive_len, CTLFLAG_RWTUN, SYSCTL_INT(_kern_icl, OID_AUTO, partial_receive_len, CTLFLAG_RWTUN,
&partial_receive_len, 0, "Minimum read size for partially received " &partial_receive_len, 0, "Minimum read size for partially received "
"data segment"); "data segment");
static int sendspace = 1048576; static int sendspace = 1048576;
TUNABLE_INT("kern.icl.sendspace", &sendspace);
SYSCTL_INT(_kern_icl, OID_AUTO, sendspace, CTLFLAG_RWTUN, SYSCTL_INT(_kern_icl, OID_AUTO, sendspace, CTLFLAG_RWTUN,
&sendspace, 0, "Default send socket buffer size"); &sendspace, 0, "Default send socket buffer size");
static int recvspace = 1048576; static int recvspace = 1048576;
TUNABLE_INT("kern.icl.recvspace", &recvspace);
SYSCTL_INT(_kern_icl, OID_AUTO, recvspace, CTLFLAG_RWTUN, SYSCTL_INT(_kern_icl, OID_AUTO, recvspace, CTLFLAG_RWTUN,
&recvspace, 0, "Default receive socket buffer size"); &recvspace, 0, "Default receive socket buffer size");

View File

@ -78,21 +78,27 @@ static struct iscsi_softc *sc;
SYSCTL_NODE(_kern, OID_AUTO, iscsi, CTLFLAG_RD, 0, "iSCSI initiator"); SYSCTL_NODE(_kern, OID_AUTO, iscsi, CTLFLAG_RD, 0, "iSCSI initiator");
static int debug = 1; static int debug = 1;
TUNABLE_INT("kern.iscsi.debug", &debug);
SYSCTL_INT(_kern_iscsi, OID_AUTO, debug, CTLFLAG_RWTUN, SYSCTL_INT(_kern_iscsi, OID_AUTO, debug, CTLFLAG_RWTUN,
&debug, 0, "Enable debug messages"); &debug, 0, "Enable debug messages");
static int ping_timeout = 5; static int ping_timeout = 5;
TUNABLE_INT("kern.iscsi.ping_timeout", &ping_timeout);
SYSCTL_INT(_kern_iscsi, OID_AUTO, ping_timeout, CTLFLAG_RWTUN, &ping_timeout, SYSCTL_INT(_kern_iscsi, OID_AUTO, ping_timeout, CTLFLAG_RWTUN, &ping_timeout,
0, "Timeout for ping (NOP-Out) requests, in seconds"); 0, "Timeout for ping (NOP-Out) requests, in seconds");
static int iscsid_timeout = 60; static int iscsid_timeout = 60;
TUNABLE_INT("kern.iscsi.iscsid_timeout", &iscsid_timeout);
SYSCTL_INT(_kern_iscsi, OID_AUTO, iscsid_timeout, CTLFLAG_RWTUN, &iscsid_timeout, SYSCTL_INT(_kern_iscsi, OID_AUTO, iscsid_timeout, CTLFLAG_RWTUN, &iscsid_timeout,
0, "Time to wait for iscsid(8) to handle reconnection, in seconds"); 0, "Time to wait for iscsid(8) to handle reconnection, in seconds");
static int login_timeout = 60; static int login_timeout = 60;
TUNABLE_INT("kern.iscsi.login_timeout", &login_timeout);
SYSCTL_INT(_kern_iscsi, OID_AUTO, login_timeout, CTLFLAG_RWTUN, &login_timeout, SYSCTL_INT(_kern_iscsi, OID_AUTO, login_timeout, CTLFLAG_RWTUN, &login_timeout,
0, "Time to wait for iscsid(8) to finish Login Phase, in seconds"); 0, "Time to wait for iscsid(8) to finish Login Phase, in seconds");
static int maxtags = 255; static int maxtags = 255;
TUNABLE_INT("kern.iscsi.maxtags", &maxtags);
SYSCTL_INT(_kern_iscsi, OID_AUTO, maxtags, CTLFLAG_RWTUN, &maxtags, SYSCTL_INT(_kern_iscsi, OID_AUTO, maxtags, CTLFLAG_RWTUN, &maxtags,
0, "Max number of IO requests queued"); 0, "Max number of IO requests queued");
static int fail_on_disconnection = 0; static int fail_on_disconnection = 0;
TUNABLE_INT("kern.iscsi.fail_on_disconnection", &fail_on_disconnection);
SYSCTL_INT(_kern_iscsi, OID_AUTO, fail_on_disconnection, CTLFLAG_RWTUN, SYSCTL_INT(_kern_iscsi, OID_AUTO, fail_on_disconnection, CTLFLAG_RWTUN,
&fail_on_disconnection, 0, "Destroy CAM SIM on connection failure"); &fail_on_disconnection, 0, "Destroy CAM SIM on connection failure");

View File

@ -77,11 +77,11 @@ struct mtx iscsi_dbg_mtx;
#endif #endif
static int max_sessions = MAX_SESSIONS; static int max_sessions = MAX_SESSIONS;
SYSCTL_INT(_net, OID_AUTO, iscsi_initiator_max_sessions, CTLFLAG_RDTUN, SYSCTL_INT(_net, OID_AUTO, iscsi_initiator_max_sessions, CTLFLAG_RDTUN, &max_sessions, MAX_SESSIONS,
&max_sessions, 0, "Max sessions allowed"); "Max sessions allowed");
static int max_pdus = MAX_PDUS; static int max_pdus = MAX_PDUS;
SYSCTL_INT(_net, OID_AUTO, iscsi_initiator_max_pdus, CTLFLAG_RDTUN, SYSCTL_INT(_net, OID_AUTO, iscsi_initiator_max_pdus, CTLFLAG_RDTUN, &max_pdus, MAX_PDUS,
&max_pdus, 0, "Max PDU pool"); "Max pdu pool");
static char isid[6+1] = { static char isid[6+1] = {
0x80, 0x80,
@ -711,6 +711,9 @@ iscsi_start(void)
{ {
debug_called(8); debug_called(8);
TUNABLE_INT_FETCH("net.iscsi_initiator.max_sessions", &max_sessions);
TUNABLE_INT_FETCH("net.iscsi_initiator.max_pdus", &max_pdus);
isc = malloc(sizeof(struct isc_softc), M_ISCSI, M_ZERO|M_WAITOK); isc = malloc(sizeof(struct isc_softc), M_ISCSI, M_ZERO|M_WAITOK);
mtx_init(&isc->isc_mtx, "iscsi-isc", NULL, MTX_DEF); mtx_init(&isc->isc_mtx, "iscsi-isc", NULL, MTX_DEF);

View File

@ -244,15 +244,18 @@ static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
** traffic for that interrupt vector ** traffic for that interrupt vector
*/ */
static int ixgbe_enable_aim = TRUE; static int ixgbe_enable_aim = TRUE;
SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0, TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RW, &ixgbe_enable_aim, 0,
"Enable adaptive interrupt moderation"); "Enable adaptive interrupt moderation");
static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
TUNABLE_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate);
SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
&ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
/* How many packets rxeof tries to clean at a time */ /* How many packets rxeof tries to clean at a time */
static int ixgbe_rx_process_limit = 256; static int ixgbe_rx_process_limit = 256;
TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
&ixgbe_rx_process_limit, 0, &ixgbe_rx_process_limit, 0,
"Maximum number of received packets to process at a time," "Maximum number of received packets to process at a time,"
@ -260,6 +263,7 @@ SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
/* How many packets txeof tries to clean at a time */ /* How many packets txeof tries to clean at a time */
static int ixgbe_tx_process_limit = 256; static int ixgbe_tx_process_limit = 256;
TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN, SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
&ixgbe_tx_process_limit, 0, &ixgbe_tx_process_limit, 0,
"Maximum number of sent packets to process at a time," "Maximum number of sent packets to process at a time,"
@ -279,6 +283,7 @@ static int ixgbe_smart_speed = ixgbe_smart_speed_on;
* but this allows it to be forced off for testing. * but this allows it to be forced off for testing.
*/ */
static int ixgbe_enable_msix = 1; static int ixgbe_enable_msix = 1;
TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
"Enable MSI-X interrupts"); "Enable MSI-X interrupts");
@ -289,6 +294,7 @@ SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
* can be overriden manually here. * can be overriden manually here.
*/ */
static int ixgbe_num_queues = 0; static int ixgbe_num_queues = 0;
TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0, SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
"Number of queues to configure, 0 indicates autoconfigure"); "Number of queues to configure, 0 indicates autoconfigure");
@ -298,11 +304,13 @@ SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
** the better performing choice. ** the better performing choice.
*/ */
static int ixgbe_txd = PERFORM_TXD; static int ixgbe_txd = PERFORM_TXD;
TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0, SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
"Number of receive descriptors per queue"); "Number of receive descriptors per queue");
/* Number of RX descriptors per ring */ /* Number of RX descriptors per ring */
static int ixgbe_rxd = PERFORM_RXD; static int ixgbe_rxd = PERFORM_RXD;
TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0, SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
"Number of receive descriptors per queue"); "Number of receive descriptors per queue");

View File

@ -65,22 +65,27 @@ SYSCTL_NODE(_hw, OID_AUTO, malo, CTLFLAG_RD, 0,
"Marvell 88w8335 driver parameters"); "Marvell 88w8335 driver parameters");
static int malo_txcoalesce = 8; /* # tx pkts to q before poking f/w*/ static int malo_txcoalesce = 8; /* # tx pkts to q before poking f/w*/
SYSCTL_INT(_hw_malo, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &malo_txcoalesce, SYSCTL_INT(_hw_malo, OID_AUTO, txcoalesce, CTLFLAG_RW, &malo_txcoalesce,
0, "tx buffers to send at once"); 0, "tx buffers to send at once");
TUNABLE_INT("hw.malo.txcoalesce", &malo_txcoalesce);
static int malo_rxbuf = MALO_RXBUF; /* # rx buffers to allocate */ static int malo_rxbuf = MALO_RXBUF; /* # rx buffers to allocate */
SYSCTL_INT(_hw_malo, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &malo_rxbuf, SYSCTL_INT(_hw_malo, OID_AUTO, rxbuf, CTLFLAG_RW, &malo_rxbuf,
0, "rx buffers allocated"); 0, "rx buffers allocated");
TUNABLE_INT("hw.malo.rxbuf", &malo_rxbuf);
static int malo_rxquota = MALO_RXBUF; /* # max buffers to process */ static int malo_rxquota = MALO_RXBUF; /* # max buffers to process */
SYSCTL_INT(_hw_malo, OID_AUTO, rxquota, CTLFLAG_RWTUN, &malo_rxquota, SYSCTL_INT(_hw_malo, OID_AUTO, rxquota, CTLFLAG_RW, &malo_rxquota,
0, "max rx buffers to process per interrupt"); 0, "max rx buffers to process per interrupt");
TUNABLE_INT("hw.malo.rxquota", &malo_rxquota);
static int malo_txbuf = MALO_TXBUF; /* # tx buffers to allocate */ static int malo_txbuf = MALO_TXBUF; /* # tx buffers to allocate */
SYSCTL_INT(_hw_malo, OID_AUTO, txbuf, CTLFLAG_RWTUN, &malo_txbuf, SYSCTL_INT(_hw_malo, OID_AUTO, txbuf, CTLFLAG_RW, &malo_txbuf,
0, "tx buffers allocated"); 0, "tx buffers allocated");
TUNABLE_INT("hw.malo.txbuf", &malo_txbuf);
#ifdef MALO_DEBUG #ifdef MALO_DEBUG
static int malo_debug = 0; static int malo_debug = 0;
SYSCTL_INT(_hw_malo, OID_AUTO, debug, CTLFLAG_RWTUN, &malo_debug, SYSCTL_INT(_hw_malo, OID_AUTO, debug, CTLFLAG_RW, &malo_debug,
0, "control debugging printfs"); 0, "control debugging printfs");
TUNABLE_INT("hw.malo.debug", &malo_debug);
enum { enum {
MALO_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ MALO_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
MALO_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ MALO_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */

View File

@ -86,8 +86,9 @@ static SYSCTL_NODE(_hw_malo, OID_AUTO, pci, CTLFLAG_RD, 0,
"Marvell 88W8335 driver PCI parameters"); "Marvell 88W8335 driver PCI parameters");
static int msi_disable = 0; /* MSI disabled */ static int msi_disable = 0; /* MSI disabled */
SYSCTL_INT(_hw_malo_pci, OID_AUTO, msi_disable, CTLFLAG_RWTUN, &msi_disable, SYSCTL_INT(_hw_malo_pci, OID_AUTO, msi_disable, CTLFLAG_RW, &msi_disable,
0, "MSI disabled"); 0, "MSI disabled");
TUNABLE_INT("hw.malo.pci.msi_disable", &msi_disable);
/* /*
* Devices supported by this driver. * Devices supported by this driver.

View File

@ -132,27 +132,33 @@ static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters"); SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
static int mfi_event_locale = MFI_EVT_LOCALE_ALL; static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale, SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
0, "event message locale"); 0, "event message locale");
static int mfi_event_class = MFI_EVT_CLASS_INFO; static int mfi_event_class = MFI_EVT_CLASS_INFO;
TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class, SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
0, "event message class"); 0, "event message class");
static int mfi_max_cmds = 128; static int mfi_max_cmds = 128;
TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds, SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
0, "Max commands limit (-1 = controller limit)"); 0, "Max commands limit (-1 = controller limit)");
static int mfi_detect_jbod_change = 1; static int mfi_detect_jbod_change = 1;
TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN, SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
&mfi_detect_jbod_change, 0, "Detect a change to a JBOD"); &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
int mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS; int mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
TUNABLE_INT("hw.mfi.polled_cmd_timeout", &mfi_polled_cmd_timeout);
SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN, SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
&mfi_polled_cmd_timeout, 0, &mfi_polled_cmd_timeout, 0,
"Polled command timeout - used for firmware flash etc (in seconds)"); "Polled command timeout - used for firmware flash etc (in seconds)");
static int mfi_cmd_timeout = MFI_CMD_TIMEOUT; static int mfi_cmd_timeout = MFI_CMD_TIMEOUT;
TUNABLE_INT("hw.mfi.cmd_timeout", &mfi_cmd_timeout);
SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout, SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
0, "Command timeout (in seconds)"); 0, "Command timeout (in seconds)");

View File

@ -90,7 +90,8 @@ static struct mfi_command * mfip_start(void *);
static void mfip_done(struct mfi_command *cm); static void mfip_done(struct mfi_command *cm);
static int mfi_allow_disks = 0; static int mfi_allow_disks = 0;
SYSCTL_INT(_hw_mfi, OID_AUTO, allow_cam_disk_passthrough, CTLFLAG_RDTUN, TUNABLE_INT("hw.mfi.allow_cam_disk_passthrough", &mfi_allow_disks);
SYSCTL_INT(_hw_mfi, OID_AUTO, allow_cam_disk_passthrough, CTLFLAG_RD,
&mfi_allow_disks, 0, "event message locale"); &mfi_allow_disks, 0, "event message locale");
static devclass_t mfip_devclass; static devclass_t mfip_devclass;

View File

@ -108,10 +108,12 @@ DRIVER_MODULE(mfi, pci, mfi_pci_driver, mfi_devclass, 0, 0);
MODULE_VERSION(mfi, 1); MODULE_VERSION(mfi, 1);
static int mfi_msi = 1; static int mfi_msi = 1;
TUNABLE_INT("hw.mfi.msi", &mfi_msi);
SYSCTL_INT(_hw_mfi, OID_AUTO, msi, CTLFLAG_RDTUN, &mfi_msi, 0, SYSCTL_INT(_hw_mfi, OID_AUTO, msi, CTLFLAG_RDTUN, &mfi_msi, 0,
"Enable use of MSI interrupts"); "Enable use of MSI interrupts");
static int mfi_mrsas_enable; static int mfi_mrsas_enable = 0;
TUNABLE_INT("hw.mfi.mrsas_enable", &mfi_mrsas_enable);
SYSCTL_INT(_hw_mfi, OID_AUTO, mrsas_enable, CTLFLAG_RDTUN, &mfi_mrsas_enable, SYSCTL_INT(_hw_mfi, OID_AUTO, mrsas_enable, CTLFLAG_RDTUN, &mfi_mrsas_enable,
0, "Allow mrasas to take newer cards"); 0, "Allow mrasas to take newer cards");
@ -184,6 +186,7 @@ mfi_pci_probe(device_t dev)
device_set_desc(dev, id->desc); device_set_desc(dev, id->desc);
/* give priority to mrsas if tunable set */ /* give priority to mrsas if tunable set */
TUNABLE_INT_FETCH("hw.mfi.mrsas_enable", &mfi_mrsas_enable);
if ((id->flags & MFI_FLAGS_MRSAS) && mfi_mrsas_enable) if ((id->flags & MFI_FLAGS_MRSAS) && mfi_mrsas_enable)
return (BUS_PROBE_LOW_PRIORITY); return (BUS_PROBE_LOW_PRIORITY);
else else

View File

@ -86,6 +86,7 @@ static void mfi_queue_map_sync(struct mfi_softc *sc);
extern int mfi_polled_cmd_timeout; extern int mfi_polled_cmd_timeout;
static int mfi_fw_reset_test = 0; static int mfi_fw_reset_test = 0;
#ifdef MFI_DEBUG #ifdef MFI_DEBUG
TUNABLE_INT("hw.mfi.fw_reset_test", &mfi_fw_reset_test);
SYSCTL_INT(_hw_mfi, OID_AUTO, fw_reset_test, CTLFLAG_RWTUN, &mfi_fw_reset_test, SYSCTL_INT(_hw_mfi, OID_AUTO, fw_reset_test, CTLFLAG_RWTUN, &mfi_fw_reset_test,
0, "Force a firmware reset condition"); 0, "Force a firmware reset condition");
#endif #endif

View File

@ -188,25 +188,31 @@ static int mwl_rxdesc = MWL_RXDESC; /* # rx desc's to allocate */
SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc, SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
0, "rx descriptors allocated"); 0, "rx descriptors allocated");
static int mwl_rxbuf = MWL_RXBUF; /* # rx buffers to allocate */ static int mwl_rxbuf = MWL_RXBUF; /* # rx buffers to allocate */
SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf, SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RW, &mwl_rxbuf,
0, "rx buffers allocated"); 0, "rx buffers allocated");
TUNABLE_INT("hw.mwl.rxbuf", &mwl_rxbuf);
static int mwl_txbuf = MWL_TXBUF; /* # tx buffers to allocate */ static int mwl_txbuf = MWL_TXBUF; /* # tx buffers to allocate */
SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf, SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RW, &mwl_txbuf,
0, "tx buffers allocated"); 0, "tx buffers allocated");
TUNABLE_INT("hw.mwl.txbuf", &mwl_txbuf);
static int mwl_txcoalesce = 8; /* # tx packets to q before poking f/w*/ static int mwl_txcoalesce = 8; /* # tx packets to q before poking f/w*/
SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce, SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RW, &mwl_txcoalesce,
0, "tx buffers to send at once"); 0, "tx buffers to send at once");
TUNABLE_INT("hw.mwl.txcoalesce", &mwl_txcoalesce);
static int mwl_rxquota = MWL_RXBUF; /* # max buffers to process */ static int mwl_rxquota = MWL_RXBUF; /* # max buffers to process */
SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota, SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RW, &mwl_rxquota,
0, "max rx buffers to process per interrupt"); 0, "max rx buffers to process per interrupt");
TUNABLE_INT("hw.mwl.rxquota", &mwl_rxquota);
static int mwl_rxdmalow = 3; /* # min buffers for wakeup */ static int mwl_rxdmalow = 3; /* # min buffers for wakeup */
SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow, SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RW, &mwl_rxdmalow,
0, "min free rx buffers before restarting traffic"); 0, "min free rx buffers before restarting traffic");
TUNABLE_INT("hw.mwl.rxdmalow", &mwl_rxdmalow);
#ifdef MWL_DEBUG #ifdef MWL_DEBUG
static int mwl_debug = 0; static int mwl_debug = 0;
SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug, SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RW, &mwl_debug,
0, "control debugging printfs"); 0, "control debugging printfs");
TUNABLE_INT("hw.mwl.debug", &mwl_debug);
enum { enum {
MWL_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ MWL_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
MWL_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ MWL_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */

Some files were not shown because too many files have changed in this diff Show More