This commit is contained in:
Attilio Rao 2011-05-16 16:34:03 +00:00
commit 7e7a34e520
34 changed files with 370 additions and 233 deletions

View File

@ -552,7 +552,8 @@ evalpipe(union node *n)
if (prevfd >= 0)
close(prevfd);
prevfd = pip[0];
close(pip[1]);
if (pip[1] != -1)
close(pip[1]);
}
INTON;
if (n->npipe.backgnd == 0) {

View File

@ -465,7 +465,7 @@ ulimitcmd(int argc __unused, char **argv __unused)
"(-%c) ", l->option);
out1fmt("%-18s %18s ", l->name, optbuf);
if (val == RLIM_INFINITY)
out1fmt("unlimited\n");
out1str("unlimited\n");
else
{
val /= l->factor;
@ -491,7 +491,7 @@ ulimitcmd(int argc __unused, char **argv __unused)
val = limit.rlim_max;
if (val == RLIM_INFINITY)
out1fmt("unlimited\n");
out1str("unlimited\n");
else
{
val /= l->factor;

View File

@ -681,14 +681,13 @@ exportcmd(int argc, char **argv)
out1str(cmdname);
out1c(' ');
}
p = strchr(vp->text, '=');
if (values && !(vp->flags & VUNSET)) {
p++;
outbin(vp->text, p - vp->text,
out1);
out1qstr(p);
outbin(vp->text,
vp->name_len + 1, out1);
out1qstr(vp->text +
vp->name_len + 1);
} else
outbin(vp->text, p - vp->text,
outbin(vp->text, vp->name_len,
out1);
out1c('\n');
}

View File

@ -416,7 +416,7 @@ gpart_autofill(struct gctl_req *req)
struct gprovider *pp;
off_t first, last, a_first;
off_t size, start, a_lba;
off_t lba, len, alignment;
off_t lba, len, alignment, offset;
uintmax_t grade;
const char *s;
int error, has_size, has_start, has_alignment;
@ -467,8 +467,6 @@ gpart_autofill(struct gctl_req *req)
error = g_parse_lba(s, pp->lg_sectorsize, &size);
if (error)
errc(EXIT_FAILURE, error, "Invalid size param");
if (size > alignment)
size = ALIGNDOWN(size, alignment);
}
s = gctl_get_ascii(req, "start");
@ -478,22 +476,29 @@ gpart_autofill(struct gctl_req *req)
error = g_parse_lba(s, pp->lg_sectorsize, &start);
if (error)
errc(EXIT_FAILURE, error, "Invalid start param");
start = ALIGNUP(start, alignment);
}
/* No autofill necessary. */
if (has_size && has_start && !has_alignment)
goto done;
/* Adjust parameters to offset value for better alignment */
s = find_provcfg(pp, "offset");
offset = (s == NULL) ? 0:
(off_t)strtoimax(s, NULL, 0) / pp->lg_sectorsize;
start = ALIGNUP(start + offset, alignment);
if (size + offset > alignment)
size = ALIGNDOWN(size + offset, alignment);
first = (off_t)strtoimax(find_geomcfg(gp, "first"), NULL, 0);
last = (off_t)strtoimax(find_geomcfg(gp, "last"), NULL, 0);
grade = ~0ULL;
a_first = ALIGNUP(first, alignment);
last = ALIGNDOWN(last, alignment);
a_first = ALIGNUP(first + offset, alignment);
last = ALIGNDOWN(last + offset, alignment);
while ((pp = find_provider(gp, first)) != NULL) {
s = find_provcfg(pp, "start");
lba = (off_t)strtoimax(s, NULL, 0);
a_lba = ALIGNDOWN(lba, alignment);
a_lba = ALIGNDOWN(lba + offset, alignment);
if (first < a_lba && a_first < a_lba) {
/* Free space [first, lba> */
len = a_lba - a_first;
@ -519,7 +524,7 @@ gpart_autofill(struct gctl_req *req)
s = find_provcfg(pp, "end");
first = (off_t)strtoimax(s, NULL, 0) + 1;
a_first = ALIGNUP(first, alignment);
a_first = ALIGNUP(first + offset, alignment);
}
if (a_first <= last) {
/* Free space [first-last] */
@ -543,12 +548,11 @@ gpart_autofill(struct gctl_req *req)
}
}
}
if (grade == ~0ULL) {
geom_deletetree(&mesh);
return (ENOSPC);
}
start -= offset; /* Return back to real offset */
done:
snprintf(ssize, sizeof(ssize), "%jd", (intmax_t)size);
gctl_change_param(req, "size", -1, ssize);

View File

@ -341,6 +341,16 @@ static cyc_backend_t cyclic_backend;
MALLOC_DEFINE(M_CYCLIC, "cyclic", "Cyclic timer subsystem");
static __inline hrtime_t
cyc_gethrtime(void)
{
struct bintime bt;
binuptime(&bt);
return ((hrtime_t)bt.sec * NANOSEC +
(((uint64_t)NANOSEC * (uint32_t)(bt.frac >> 32)) >> 32));
}
/*
* Returns 1 if the upheap propagated to the root, 0 if it did not. This
* allows the caller to reprogram the backend only when the root has been
@ -507,7 +517,7 @@ cyclic_fire(cpu_t *c)
cyc_index_t *heap = cpu->cyp_heap;
cyclic_t *cyclic, *cyclics = cpu->cyp_cyclics;
void *arg = be->cyb_arg;
hrtime_t now = gethrtime();
hrtime_t now = cyc_gethrtime();
hrtime_t exp;
if (cpu->cyp_nelems == 0) {
@ -687,7 +697,7 @@ cyclic_add_xcall(cyc_xcallarg_t *arg)
* If a start time hasn't been explicitly specified, we'll
* start on the next interval boundary.
*/
cyclic->cy_expire = (gethrtime() / cyclic->cy_interval + 1) *
cyclic->cy_expire = (cyc_gethrtime() / cyclic->cy_interval + 1) *
cyclic->cy_interval;
} else {
cyclic->cy_expire = when->cyt_when;

View File

@ -30,6 +30,7 @@ static void enable(cyb_arg_t);
static void disable(cyb_arg_t);
static void reprogram(cyb_arg_t, hrtime_t);
static void xcall(cyb_arg_t, cpu_t *, cyc_func_t, void *);
static void cyclic_clock(struct trapframe *frame);
static cyc_backend_t be = {
NULL, /* cyb_configure */
@ -45,6 +46,7 @@ static void
cyclic_ap_start(void *dummy)
{
/* Initialise the rest of the CPUs. */
cyclic_clock_func = cyclic_clock;
cyclic_mp_init();
}
@ -63,18 +65,10 @@ cyclic_machdep_init(void)
static void
cyclic_machdep_uninit(void)
{
int i;
for (i = 0; i <= mp_maxid; i++)
/* Reset the cyclic clock callback hook. */
cyclic_clock_func[i] = NULL;
/* De-register the cyclic backend. */
cyclic_uninit();
}
static hrtime_t exp_due[MAXCPU];
/*
* This function is the one registered by the machine dependent
* initialiser as the callback for high speed timer events.
@ -84,7 +78,7 @@ cyclic_clock(struct trapframe *frame)
{
cpu_t *c = &solaris_cpu[curcpu];
if (c->cpu_cyclic != NULL && gethrtime() >= exp_due[curcpu]) {
if (c->cpu_cyclic != NULL) {
if (TRAPF_USERMODE(frame)) {
c->cpu_profile_pc = 0;
c->cpu_profile_upc = TRAPF_PC(frame);
@ -102,24 +96,32 @@ cyclic_clock(struct trapframe *frame)
}
}
static void enable(cyb_arg_t arg)
static void
enable(cyb_arg_t arg __unused)
{
/* Register the cyclic clock callback function. */
cyclic_clock_func[curcpu] = cyclic_clock;
}
static void disable(cyb_arg_t arg)
static void
disable(cyb_arg_t arg __unused)
{
/* Reset the cyclic clock callback function. */
cyclic_clock_func[curcpu] = NULL;
}
static void reprogram(cyb_arg_t arg, hrtime_t exp)
static void
reprogram(cyb_arg_t arg __unused, hrtime_t exp)
{
exp_due[curcpu] = exp;
struct bintime bt;
struct timespec ts;
ts.tv_sec = exp / 1000000000;
ts.tv_nsec = exp % 1000000000;
timespec2bintime(&ts, &bt);
clocksource_cyc_set(&bt);
}
static void xcall(cyb_arg_t arg, cpu_t *c, cyc_func_t func, void *param)
static void xcall(cyb_arg_t arg __unused, cpu_t *c, cyc_func_t func,
void *param)
{
cpuset_t cpus;

View File

@ -3966,9 +3966,21 @@ ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
sc->sc_stats.ast_tx_fifoerr++;
if (ts->ts_status & HAL_TXERR_FILT)
sc->sc_stats.ast_tx_filtered++;
if (ts->ts_status & HAL_TXERR_XTXOP)
sc->sc_stats.ast_tx_xtxop++;
if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED)
sc->sc_stats.ast_tx_timerexpired++;
/* XXX HAL_TX_DATA_UNDERRUN */
/* XXX HAL_TX_DELIM_UNDERRUN */
if (bf->bf_m->m_flags & M_FF)
sc->sc_stats.ast_ff_txerr++;
}
/* XXX when is this valid? */
if (ts->ts_status & HAL_TX_DESC_CFG_ERR)
sc->sc_stats.ast_tx_desccfgerr++;
sr = ts->ts_shortretry;
lr = ts->ts_longretry;
sc->sc_stats.ast_tx_shortretry += sr;

View File

@ -709,6 +709,12 @@ ath_sysctl_stats_attach(struct ath_softc *sc)
&sc->sc_stats.ast_tx_timeout, 0, "TX Global Timeout");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_cst", CTLFLAG_RD,
&sc->sc_stats.ast_tx_cst, 0, "TX Carrier Sense Timeout");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_xtxop", CTLFLAG_RD,
&sc->sc_stats.ast_tx_xtxop, 0, "TX exceeded TXOP");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_timerexpired", CTLFLAG_RD,
&sc->sc_stats.ast_tx_timerexpired, 0, "TX exceeded TX_TIMER register");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_desccfgerr", CTLFLAG_RD,
&sc->sc_stats.ast_tx_desccfgerr, 0, "TX Descriptor Cfg Error");
/* Attach the RX phy error array */
ath_sysctl_stats_attach_rxphyerr(sc, child);

View File

@ -121,17 +121,20 @@ struct ath_stats {
u_int32_t ast_be_missed; /* missed beacons */
u_int32_t ast_ani_cal; /* ANI calibrations performed */
u_int32_t ast_rx_agg; /* number of aggregate frames RX'ed */
u_int32_t ast_rx_halfgi;
u_int32_t ast_rx_2040;
u_int32_t ast_rx_pre_crc_err;
u_int32_t ast_rx_post_crc_err;
u_int32_t ast_rx_decrypt_busy_err;
u_int32_t ast_rx_halfgi; /* RX half-GI */
u_int32_t ast_rx_2040; /* RX 40mhz frame */
u_int32_t ast_rx_pre_crc_err; /* RX pre-delimiter CRC error */
u_int32_t ast_rx_post_crc_err; /* RX post-delimiter CRC error */
u_int32_t ast_rx_decrypt_busy_err; /* RX decrypt engine busy error */
u_int32_t ast_rx_hi_rx_chain;
u_int32_t ast_tx_htprotect; /* HT tx frames with protection */
u_int32_t ast_rx_hitqueueend;
u_int32_t ast_rx_hitqueueend; /* RX hit descr queue end */
u_int32_t ast_tx_timeout; /* Global TX timeout */
u_int32_t ast_tx_cst; /* Carrier sense timeout */
u_int32_t ast_pad[16];
u_int32_t ast_tx_xtxop; /* tx exceeded TXOP */
u_int32_t ast_tx_timerexpired; /* tx exceeded TX_TIMER */
u_int32_t ast_tx_desccfgerr; /* tx desc cfg error */
u_int32_t ast_pad[13];
};
#define SIOCGATHSTATS _IOWR('i', 137, struct ifreq)

View File

@ -2836,7 +2836,7 @@ bge_attach(device_t dev)
if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
/* Jumbo frame on BCM5719 A0 does not work. */
sc->bge_flags &= ~BGE_FLAG_JUMBO_FRAME;
sc->bge_flags &= ~BGE_FLAG_JUMBO;
}
break;
case BGE_ASICREV_BCM5755:

View File

@ -153,9 +153,10 @@ struct glxiic_softc {
};
#ifdef GLXIIC_DEBUG
#define DEBUG(fmt, args...) log(LOG_DEBUG, "%s: " fmt "\n" , __func__ , ## args)
#define GLXIIC_DEBUG_LOG(fmt, args...) \
log(LOG_DEBUG, "%s: " fmt "\n" , __func__ , ## args)
#else
#define DEBUG(fmt, args...)
#define GLXIIC_DEBUG_LOG(fmt, args...)
#endif
#define GLXIIC_SCLFRQ(n) ((n << 1))
@ -540,7 +541,7 @@ glxiic_timeout(void *arg)
sc = (struct glxiic_softc *)arg;
DEBUG("timeout in state %d", sc->state);
GLXIIC_DEBUG_LOG("timeout in state %d", sc->state);
if (glxiic_state_table[sc->state].master) {
sc->error = IIC_ETIMEOUT;
@ -604,7 +605,7 @@ glxiic_handle_slave_match_locked(struct glxiic_softc *sc, uint8_t status)
glxiic_set_state_locked(sc, GLXIIC_STATE_SLAVE_RX);
iicbus_intr(sc->iicbus, INTR_GENERAL, &addr);
} else {
DEBUG("unknown slave match");
GLXIIC_DEBUG_LOG("unknown slave match");
return (IIC_ESTATUS);
}
@ -618,7 +619,7 @@ glxiic_state_idle_callback(struct glxiic_softc *sc, uint8_t status)
GLXIIC_ASSERT_LOCKED(sc);
if ((status & GLXIIC_SMB_STS_BER_BIT) != 0) {
DEBUG("bus error in idle");
GLXIIC_DEBUG_LOG("bus error in idle");
return (IIC_EBUSERR);
}
@ -637,7 +638,7 @@ glxiic_state_slave_tx_callback(struct glxiic_softc *sc, uint8_t status)
GLXIIC_ASSERT_LOCKED(sc);
if ((status & GLXIIC_SMB_STS_BER_BIT) != 0) {
DEBUG("bus error in slave tx");
GLXIIC_DEBUG_LOG("bus error in slave tx");
return (IIC_EBUSERR);
}
@ -658,7 +659,7 @@ glxiic_state_slave_tx_callback(struct glxiic_softc *sc, uint8_t status)
}
if ((status & GLXIIC_SMB_STS_SDAST_BIT) == 0) {
DEBUG("not awaiting data in slave tx");
GLXIIC_DEBUG_LOG("not awaiting data in slave tx");
return (IIC_ESTATUS);
}
@ -678,7 +679,7 @@ glxiic_state_slave_rx_callback(struct glxiic_softc *sc, uint8_t status)
GLXIIC_ASSERT_LOCKED(sc);
if ((status & GLXIIC_SMB_STS_BER_BIT) != 0) {
DEBUG("bus error in slave rx");
GLXIIC_DEBUG_LOG("bus error in slave rx");
return (IIC_EBUSERR);
}
@ -694,7 +695,7 @@ glxiic_state_slave_rx_callback(struct glxiic_softc *sc, uint8_t status)
}
if ((status & GLXIIC_SMB_STS_SDAST_BIT) == 0) {
DEBUG("no pending data in slave rx");
GLXIIC_DEBUG_LOG("no pending data in slave rx");
return (IIC_ESTATUS);
}
@ -714,17 +715,17 @@ glxiic_state_master_addr_callback(struct glxiic_softc *sc, uint8_t status)
GLXIIC_ASSERT_LOCKED(sc);
if ((status & GLXIIC_SMB_STS_BER_BIT) != 0) {
DEBUG("bus error after master start");
GLXIIC_DEBUG_LOG("bus error after master start");
return (IIC_EBUSERR);
}
if ((status & GLXIIC_SMB_STS_MASTER_BIT) == 0) {
DEBUG("not bus master after master start");
GLXIIC_DEBUG_LOG("not bus master after master start");
return (IIC_ESTATUS);
}
if ((status & GLXIIC_SMB_STS_SDAST_BIT) == 0) {
DEBUG("not awaiting address in master addr");
GLXIIC_DEBUG_LOG("not awaiting address in master addr");
return (IIC_ESTATUS);
}
@ -755,17 +756,17 @@ glxiic_state_master_tx_callback(struct glxiic_softc *sc, uint8_t status)
GLXIIC_ASSERT_LOCKED(sc);
if ((status & GLXIIC_SMB_STS_BER_BIT) != 0) {
DEBUG("bus error in master tx");
GLXIIC_DEBUG_LOG("bus error in master tx");
return (IIC_EBUSERR);
}
if ((status & GLXIIC_SMB_STS_MASTER_BIT) == 0) {
DEBUG("not bus master in master tx");
GLXIIC_DEBUG_LOG("not bus master in master tx");
return (IIC_ESTATUS);
}
if ((status & GLXIIC_SMB_STS_NEGACK_BIT) != 0) {
DEBUG("slave nack in master tx");
GLXIIC_DEBUG_LOG("slave nack in master tx");
return (IIC_ENOACK);
}
@ -775,7 +776,7 @@ glxiic_state_master_tx_callback(struct glxiic_softc *sc, uint8_t status)
}
if ((status & GLXIIC_SMB_STS_SDAST_BIT) == 0) {
DEBUG("not awaiting data in master tx");
GLXIIC_DEBUG_LOG("not awaiting data in master tx");
return (IIC_ESTATUS);
}
@ -796,17 +797,17 @@ glxiic_state_master_rx_callback(struct glxiic_softc *sc, uint8_t status)
GLXIIC_ASSERT_LOCKED(sc);
if ((status & GLXIIC_SMB_STS_BER_BIT) != 0) {
DEBUG("bus error in master rx");
GLXIIC_DEBUG_LOG("bus error in master rx");
return (IIC_EBUSERR);
}
if ((status & GLXIIC_SMB_STS_MASTER_BIT) == 0) {
DEBUG("not bus master in master rx");
GLXIIC_DEBUG_LOG("not bus master in master rx");
return (IIC_ESTATUS);
}
if ((status & GLXIIC_SMB_STS_NEGACK_BIT) != 0) {
DEBUG("slave nack in rx");
GLXIIC_DEBUG_LOG("slave nack in rx");
return (IIC_ENOACK);
}
@ -825,7 +826,7 @@ glxiic_state_master_rx_callback(struct glxiic_softc *sc, uint8_t status)
}
if ((status & GLXIIC_SMB_STS_SDAST_BIT) == 0) {
DEBUG("no pending data in master rx");
GLXIIC_DEBUG_LOG("no pending data in master rx");
return (IIC_ESTATUS);
}
@ -849,17 +850,17 @@ glxiic_state_master_stop_callback(struct glxiic_softc *sc, uint8_t status)
GLXIIC_ASSERT_LOCKED(sc);
if ((status & GLXIIC_SMB_STS_BER_BIT) != 0) {
DEBUG("bus error in master stop");
GLXIIC_DEBUG_LOG("bus error in master stop");
return (IIC_EBUSERR);
}
if ((status & GLXIIC_SMB_STS_MASTER_BIT) == 0) {
DEBUG("not bus master in master stop");
GLXIIC_DEBUG_LOG("not bus master in master stop");
return (IIC_ESTATUS);
}
if ((status & GLXIIC_SMB_STS_NEGACK_BIT) != 0) {
DEBUG("slave nack in master stop");
GLXIIC_DEBUG_LOG("slave nack in master stop");
return (IIC_ENOACK);
}

View File

@ -78,17 +78,17 @@ static int nfs3_jukebox_delay = 10;
static int nfs_skip_wcc_data_onerr = 1;
static int nfs_keytab_enctype = ETYPE_DES_CBC_CRC;
SYSCTL_DECL(_vfs_newnfs);
SYSCTL_DECL(_vfs_nfs);
SYSCTL_INT(_vfs_newnfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0,
SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0,
"Buffer reservation size 2 < x < 64");
SYSCTL_INT(_vfs_newnfs, OID_AUTO, reconnects, CTLFLAG_RD, &nfs_reconnects, 0,
SYSCTL_INT(_vfs_nfs, OID_AUTO, reconnects, CTLFLAG_RD, &nfs_reconnects, 0,
"Number of times the nfs client has had to reconnect");
SYSCTL_INT(_vfs_newnfs, OID_AUTO, nfs3_jukebox_delay, CTLFLAG_RW, &nfs3_jukebox_delay, 0,
SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs3_jukebox_delay, CTLFLAG_RW, &nfs3_jukebox_delay, 0,
"Number of seconds to delay a retry after receiving EJUKEBOX");
SYSCTL_INT(_vfs_newnfs, OID_AUTO, skip_wcc_data_onerr, CTLFLAG_RW, &nfs_skip_wcc_data_onerr, 0,
SYSCTL_INT(_vfs_nfs, OID_AUTO, skip_wcc_data_onerr, CTLFLAG_RW, &nfs_skip_wcc_data_onerr, 0,
"Disable weak cache consistency checking when server returns an error");
SYSCTL_INT(_vfs_newnfs, OID_AUTO, keytab_enctype, CTLFLAG_RW, &nfs_keytab_enctype, 0,
SYSCTL_INT(_vfs_nfs, OID_AUTO, keytab_enctype, CTLFLAG_RW, &nfs_keytab_enctype, 0,
"Encryption type for the keytab entry used by nfs");
static void nfs_down(struct nfsmount *, struct thread *, const char *,

View File

@ -69,14 +69,12 @@ void (*ncl_call_invalcaches)(struct vnode *) = NULL;
static int nfs_realign_test;
static int nfs_realign_count;
SYSCTL_NODE(_vfs, OID_AUTO, newnfs, CTLFLAG_RW, 0, "New NFS filesystem");
SYSCTL_INT(_vfs_newnfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test,
SYSCTL_NODE(_vfs, OID_AUTO, nfs, CTLFLAG_RW, 0, "New NFS filesystem");
SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test,
0, "Number of realign tests done");
SYSCTL_INT(_vfs_newnfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count,
SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count,
0, "Number of mbuf realignments done");
SYSCTL_INT(_vfs_newnfs, OID_AUTO, nfs4acl_enable, CTLFLAG_RW, &nfsrv_useacl,
0, "Enable NFSv4 ACLs");
SYSCTL_STRING(_vfs_newnfs, OID_AUTO, callback_addr, CTLFLAG_RW,
SYSCTL_STRING(_vfs_nfs, OID_AUTO, callback_addr, CTLFLAG_RW,
nfsv4_callbackaddr, sizeof(nfsv4_callbackaddr),
"NFSv4 callback addr for server to use");

View File

@ -46,8 +46,6 @@ __FBSDID("$FreeBSD$");
NFSDLOCKMUTEX;
SYSCTL_DECL(_vfs_newnfs);
SVCPOOL *nfscbd_pool;
static int nfs_cbproc(struct nfsrv_descript *, u_int32_t);

View File

@ -80,11 +80,11 @@ static void nfssvc_iod(void *);
static int nfs_asyncdaemon[NFS_MAXASYNCDAEMON];
SYSCTL_DECL(_vfs_newnfs);
SYSCTL_DECL(_vfs_nfs);
/* Maximum number of seconds a nfsiod kthread will sleep before exiting */
static unsigned int nfs_iodmaxidle = 120;
SYSCTL_UINT(_vfs_newnfs, OID_AUTO, iodmaxidle, CTLFLAG_RW, &nfs_iodmaxidle, 0,
SYSCTL_UINT(_vfs_nfs, OID_AUTO, iodmaxidle, CTLFLAG_RW, &nfs_iodmaxidle, 0,
"Max number of seconds an nfsiod kthread will sleep before exiting");
/* Maximum number of nfsiod kthreads */
@ -123,7 +123,7 @@ sysctl_iodmin(SYSCTL_HANDLER_ARGS)
mtx_unlock(&ncl_iod_mutex);
return (0);
}
SYSCTL_PROC(_vfs_newnfs, OID_AUTO, iodmin, CTLTYPE_UINT | CTLFLAG_RW, 0,
SYSCTL_PROC(_vfs_nfs, OID_AUTO, iodmin, CTLTYPE_UINT | CTLFLAG_RW, 0,
sizeof (nfs_iodmin), sysctl_iodmin, "IU",
"Min number of nfsiod kthreads to keep as spares");
@ -159,7 +159,7 @@ sysctl_iodmax(SYSCTL_HANDLER_ARGS)
mtx_unlock(&ncl_iod_mutex);
return (0);
}
SYSCTL_PROC(_vfs_newnfs, OID_AUTO, iodmax, CTLTYPE_UINT | CTLFLAG_RW, 0,
SYSCTL_PROC(_vfs_nfs, OID_AUTO, iodmax, CTLTYPE_UINT | CTLFLAG_RW, 0,
sizeof (ncl_iodmax), sysctl_iodmax, "IU",
"Max number of nfsiod kthreads");
@ -214,7 +214,7 @@ nfsiod_setup(void *dummy)
{
int error;
TUNABLE_INT_FETCH("vfs.newnfs.iodmin", &nfs_iodmin);
TUNABLE_INT_FETCH("vfs.nfs.iodmin", &nfs_iodmin);
nfscl_init();
mtx_lock(&ncl_iod_mutex);
/* Silently limit the start number of nfsiod's */
@ -231,7 +231,7 @@ nfsiod_setup(void *dummy)
SYSINIT(newnfsiod, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, nfsiod_setup, NULL);
static int nfs_defect = 0;
SYSCTL_INT(_vfs_newnfs, OID_AUTO, defect, CTLFLAG_RW, &nfs_defect, 0,
SYSCTL_INT(_vfs_nfs, OID_AUTO, defect, CTLFLAG_RW, &nfs_defect, 0,
"Allow nfsiods to migrate serving different mounts");
/*

View File

@ -174,9 +174,9 @@ ncl_printf(const char *fmt, ...)
#ifdef NFS_ACDEBUG
#include <sys/sysctl.h>
SYSCTL_DECL(_vfs_newnfs);
SYSCTL_DECL(_vfs_nfs);
static int nfs_acdebug;
SYSCTL_INT(_vfs_newnfs, OID_AUTO, acdebug, CTLFLAG_RW, &nfs_acdebug, 0, "");
SYSCTL_INT(_vfs_nfs, OID_AUTO, acdebug, CTLFLAG_RW, &nfs_acdebug, 0, "");
#endif
/*

View File

@ -83,18 +83,16 @@ extern struct nfsstats newnfsstats;
MALLOC_DEFINE(M_NEWNFSREQ, "newnfsclient_req", "New NFS request header");
MALLOC_DEFINE(M_NEWNFSMNT, "newnfsmnt", "New NFS mount struct");
SYSCTL_DECL(_vfs_newnfs);
SYSCTL_STRUCT(_vfs_newnfs, NFS_NFSSTATS, nfsstats, CTLFLAG_RW,
&newnfsstats, nfsstats, "S,nfsstats");
SYSCTL_DECL(_vfs_nfs);
static int nfs_ip_paranoia = 1;
SYSCTL_INT(_vfs_newnfs, OID_AUTO, nfs_ip_paranoia, CTLFLAG_RW,
SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_ip_paranoia, CTLFLAG_RW,
&nfs_ip_paranoia, 0, "");
static int nfs_tprintf_initial_delay = NFS_TPRINTF_INITIAL_DELAY;
SYSCTL_INT(_vfs_newnfs, NFS_TPRINTF_INITIAL_DELAY,
SYSCTL_INT(_vfs_nfs, NFS_TPRINTF_INITIAL_DELAY,
downdelayinitial, CTLFLAG_RW, &nfs_tprintf_initial_delay, 0, "");
/* how long between console messages "nfs server foo not responding" */
static int nfs_tprintf_delay = NFS_TPRINTF_DELAY;
SYSCTL_INT(_vfs_newnfs, NFS_TPRINTF_DELAY,
SYSCTL_INT(_vfs_nfs, NFS_TPRINTF_DELAY,
downdelayinterval, CTLFLAG_RW, &nfs_tprintf_delay, 0, "");
static int nfs_mountroot(struct mount *);
@ -152,14 +150,14 @@ struct nfsv3_diskless nfsv3_diskless = { { { 0 } } };
int nfs_diskless_valid = 0;
#endif
SYSCTL_INT(_vfs_newnfs, OID_AUTO, diskless_valid, CTLFLAG_RD,
SYSCTL_INT(_vfs_nfs, OID_AUTO, diskless_valid, CTLFLAG_RD,
&nfs_diskless_valid, 0,
"Has the diskless struct been filled correctly");
SYSCTL_STRING(_vfs_newnfs, OID_AUTO, diskless_rootpath, CTLFLAG_RD,
SYSCTL_STRING(_vfs_nfs, OID_AUTO, diskless_rootpath, CTLFLAG_RD,
nfsv3_diskless.root_hostnam, 0, "Path to nfs root");
SYSCTL_OPAQUE(_vfs_newnfs, OID_AUTO, diskless_rootaddr, CTLFLAG_RD,
SYSCTL_OPAQUE(_vfs_nfs, OID_AUTO, diskless_rootaddr, CTLFLAG_RD,
&nfsv3_diskless.root_saddr, sizeof(nfsv3_diskless.root_saddr),
"%Ssockaddr_in", "Diskless root nfs address");

View File

@ -199,27 +199,27 @@ static int nfs_renameit(struct vnode *sdvp, struct vnode *svp,
*/
#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1))
SYSCTL_DECL(_vfs_newnfs);
SYSCTL_DECL(_vfs_nfs);
static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO;
SYSCTL_INT(_vfs_newnfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
&nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
static int nfs_prime_access_cache = 0;
SYSCTL_INT(_vfs_newnfs, OID_AUTO, prime_access_cache, CTLFLAG_RW,
SYSCTL_INT(_vfs_nfs, OID_AUTO, prime_access_cache, CTLFLAG_RW,
&nfs_prime_access_cache, 0,
"Prime NFS ACCESS cache when fetching attributes");
static int newnfs_commit_on_close = 0;
SYSCTL_INT(_vfs_newnfs, OID_AUTO, commit_on_close, CTLFLAG_RW,
SYSCTL_INT(_vfs_nfs, OID_AUTO, commit_on_close, CTLFLAG_RW,
&newnfs_commit_on_close, 0, "write+commit on close, else only write");
static int nfs_clean_pages_on_close = 1;
SYSCTL_INT(_vfs_newnfs, OID_AUTO, clean_pages_on_close, CTLFLAG_RW,
SYSCTL_INT(_vfs_nfs, OID_AUTO, clean_pages_on_close, CTLFLAG_RW,
&nfs_clean_pages_on_close, 0, "NFS clean dirty pages on close");
int newnfs_directio_enable = 0;
SYSCTL_INT(_vfs_newnfs, OID_AUTO, nfs_directio_enable, CTLFLAG_RW,
SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_enable, CTLFLAG_RW,
&newnfs_directio_enable, 0, "Enable NFS directio");
/*
@ -234,14 +234,14 @@ SYSCTL_INT(_vfs_newnfs, OID_AUTO, nfs_directio_enable, CTLFLAG_RW,
* meaningful.
*/
int newnfs_directio_allow_mmap = 1;
SYSCTL_INT(_vfs_newnfs, OID_AUTO, nfs_directio_allow_mmap, CTLFLAG_RW,
SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_allow_mmap, CTLFLAG_RW,
&newnfs_directio_allow_mmap, 0, "Enable mmaped IO on file with O_DIRECT opens");
#if 0
SYSCTL_INT(_vfs_newnfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
&newnfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count");
SYSCTL_INT(_vfs_newnfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
&newnfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count");
#endif

View File

@ -39,6 +39,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/queue.h>
#include <sys/sbuf.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/uuid.h>
#include <geom/geom.h>
@ -104,6 +105,13 @@ struct g_part_alias_list {
{ "netbsd-swap", G_PART_ALIAS_NETBSD_SWAP },
};
SYSCTL_DECL(_kern_geom);
SYSCTL_NODE(_kern_geom, OID_AUTO, part, CTLFLAG_RW, 0, "GEOM_PART stuff");
static u_int check_integrity = 1;
TUNABLE_INT("kern.geom.part.check_integrity", &check_integrity);
SYSCTL_UINT(_kern_geom_part, OID_AUTO, check_integrity, CTLFLAG_RW,
&check_integrity, 1, "Enable integrity checking");
/*
* The GEOM partitioning class.
*/
@ -231,47 +239,111 @@ g_part_geometry(struct g_part_table *table, struct g_consumer *cp,
}
}
#define DPRINTF(...) if (bootverbose) { \
printf("GEOM_PART: " __VA_ARGS__); \
}
static int
g_part_check_integrity(struct g_part_table *table, struct g_consumer *cp)
{
struct g_part_entry *e1, *e2;
struct g_provider *pp;
int failed;
failed = 0;
pp = cp->provider;
if (table->gpt_first > table->gpt_last ||
table->gpt_last > pp->mediasize / pp->sectorsize - 1)
goto fail;
if (table->gpt_last < table->gpt_first) {
DPRINTF("last LBA is below first LBA: %jd < %jd\n",
(intmax_t)table->gpt_last, (intmax_t)table->gpt_first);
failed++;
}
if (table->gpt_last > pp->mediasize / pp->sectorsize - 1) {
DPRINTF("last LBA extends beyond mediasize: "
"%jd > %jd\n", (intmax_t)table->gpt_last,
(intmax_t)pp->mediasize / pp->sectorsize - 1);
failed++;
}
LIST_FOREACH(e1, &table->gpt_entry, gpe_entry) {
if (e1->gpe_deleted || e1->gpe_internal)
continue;
if (e1->gpe_start < table->gpt_first ||
e1->gpe_start > table->gpt_last ||
e1->gpe_end < e1->gpe_start ||
e1->gpe_end > table->gpt_last)
goto fail;
if (e1->gpe_start < table->gpt_first) {
DPRINTF("partition %d has start offset below first "
"LBA: %jd < %jd\n", e1->gpe_index,
(intmax_t)e1->gpe_start,
(intmax_t)table->gpt_first);
failed++;
}
if (e1->gpe_start > table->gpt_last) {
DPRINTF("partition %d has start offset beyond last "
"LBA: %jd > %jd\n", e1->gpe_index,
(intmax_t)e1->gpe_start,
(intmax_t)table->gpt_last);
failed++;
}
if (e1->gpe_end < e1->gpe_start) {
DPRINTF("partition %d has end offset below start "
"offset: %jd < %jd\n", e1->gpe_index,
(intmax_t)e1->gpe_end,
(intmax_t)e1->gpe_start);
failed++;
}
if (e1->gpe_end > table->gpt_last) {
DPRINTF("partition %d has end offset beyond last "
"LBA: %jd > %jd\n", e1->gpe_index,
(intmax_t)e1->gpe_end,
(intmax_t)table->gpt_last);
failed++;
}
e2 = e1;
while ((e2 = LIST_NEXT(e2, gpe_entry)) != NULL) {
if (e2->gpe_deleted || e2->gpe_internal)
continue;
if (e1->gpe_start >= e2->gpe_start &&
e1->gpe_start <= e2->gpe_end)
goto fail;
e1->gpe_start <= e2->gpe_end) {
DPRINTF("partition %d has start offset inside "
"partition %d: start[%d] %jd >= start[%d] "
"%jd <= end[%d] %jd\n",
e1->gpe_index, e2->gpe_index,
e2->gpe_index, (intmax_t)e2->gpe_start,
e1->gpe_index, (intmax_t)e1->gpe_start,
e2->gpe_index, (intmax_t)e2->gpe_end);
failed++;
}
if (e1->gpe_end >= e2->gpe_start &&
e1->gpe_end <= e2->gpe_end)
goto fail;
e1->gpe_end <= e2->gpe_end) {
DPRINTF("partition %d has end offset inside "
"partition %d: start[%d] %jd >= end[%d] "
"%jd <= end[%d] %jd\n",
e1->gpe_index, e2->gpe_index,
e2->gpe_index, (intmax_t)e2->gpe_start,
e1->gpe_index, (intmax_t)e1->gpe_end,
e2->gpe_index, (intmax_t)e2->gpe_end);
failed++;
}
if (e1->gpe_start < e2->gpe_start &&
e1->gpe_end > e2->gpe_end)
goto fail;
e1->gpe_end > e2->gpe_end) {
DPRINTF("partition %d contains partition %d: "
"start[%d] %jd > start[%d] %jd, end[%d] "
"%jd < end[%d] %jd\n",
e1->gpe_index, e2->gpe_index,
e1->gpe_index, (intmax_t)e1->gpe_start,
e2->gpe_index, (intmax_t)e2->gpe_start,
e2->gpe_index, (intmax_t)e2->gpe_end,
e1->gpe_index, (intmax_t)e1->gpe_end);
failed++;
}
}
}
return (0);
fail:
if (bootverbose)
if (failed != 0) {
printf("GEOM_PART: integrity check failed (%s, %s)\n",
pp->name, table->gpt_scheme->name);
return (EINVAL);
if (check_integrity != 0)
return (EINVAL);
table->gpt_corrupt = 1;
}
return (0);
}
#undef DPRINTF
struct g_part_entry *
g_part_new_entry(struct g_part_table *table, int index, quad_t start,

View File

@ -56,7 +56,7 @@ __FBSDID("$FreeBSD$");
#ifdef KDTRACE_HOOKS
#include <sys/dtrace_bsd.h>
cyclic_clock_func_t cyclic_clock_func[MAXCPU];
cyclic_clock_func_t cyclic_clock_func = NULL;
#endif
int cpu_disable_deep_sleep = 0; /* Timer dies in C3. */
@ -128,6 +128,9 @@ struct pcpu_state {
struct bintime nexthard; /* Next hardlock() event. */
struct bintime nextstat; /* Next statclock() event. */
struct bintime nextprof; /* Next profclock() event. */
#ifdef KDTRACE_HOOKS
struct bintime nextcyc; /* Next OpenSolaris cyclics event. */
#endif
int ipi; /* This CPU needs IPI. */
int idle; /* This CPU is in idle mode. */
};
@ -190,17 +193,10 @@ handleevents(struct bintime *now, int fake)
usermode = TRAPF_USERMODE(frame);
pc = TRAPF_PC(frame);
}
#ifdef KDTRACE_HOOKS
/*
* If the DTrace hooks are configured and a callback function
* has been registered, then call it to process the high speed
* timers.
*/
if (!fake && cyclic_clock_func[curcpu] != NULL)
(*cyclic_clock_func[curcpu])(frame);
#endif
runs = 0;
state = DPCPU_PTR(timerstate);
while (bintime_cmp(now, &state->nexthard, >=)) {
bintime_add(&state->nexthard, &hardperiod);
runs++;
@ -224,6 +220,16 @@ handleevents(struct bintime *now, int fake)
}
} else
state->nextprof = state->nextstat;
#ifdef KDTRACE_HOOKS
if (fake == 0 && cyclic_clock_func != NULL &&
state->nextcyc.sec != -1 &&
bintime_cmp(now, &state->nextcyc, >=)) {
state->nextcyc.sec = -1;
(*cyclic_clock_func)(frame);
}
#endif
getnextcpuevent(&t, 0);
if (fake == 2) {
state->nextevent = t;
@ -263,10 +269,13 @@ getnextcpuevent(struct bintime *event, int idle)
} else { /* If CPU is active - handle all types of events. */
if (bintime_cmp(event, &state->nextstat, >))
*event = state->nextstat;
if (profiling &&
bintime_cmp(event, &state->nextprof, >))
if (profiling && bintime_cmp(event, &state->nextprof, >))
*event = state->nextprof;
}
#ifdef KDTRACE_HOOKS
if (state->nextcyc.sec != -1 && bintime_cmp(event, &state->nextcyc, >))
*event = state->nextcyc;
#endif
}
/*
@ -590,6 +599,9 @@ cpu_initclocks_bsp(void)
CPU_FOREACH(cpu) {
state = DPCPU_ID_PTR(cpu, timerstate);
mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
#ifdef KDTRACE_HOOKS
state->nextcyc.sec = -1;
#endif
}
#ifdef SMP
callout_new_inserted = cpu_new_callout;
@ -784,6 +796,43 @@ cpu_activeclock(void)
spinlock_exit();
}
#ifdef KDTRACE_HOOKS
void
clocksource_cyc_set(const struct bintime *t)
{
struct bintime now;
struct pcpu_state *state;
state = DPCPU_PTR(timerstate);
if (periodic)
now = state->now;
else
binuptime(&now);
CTR4(KTR_SPARE2, "set_cyc at %d: now %d.%08x%08x",
curcpu, now.sec, (unsigned int)(now.frac >> 32),
(unsigned int)(now.frac & 0xffffffff));
CTR4(KTR_SPARE2, "set_cyc at %d: t %d.%08x%08x",
curcpu, t->sec, (unsigned int)(t->frac >> 32),
(unsigned int)(t->frac & 0xffffffff));
ET_HW_LOCK(state);
if (bintime_cmp(t, &state->nextcyc, ==)) {
ET_HW_UNLOCK(state);
return;
}
state->nextcyc = *t;
if (bintime_cmp(&state->nextcyc, &state->nextevent, >=)) {
ET_HW_UNLOCK(state);
return;
}
state->nextevent = state->nextcyc;
if (!periodic)
loadtimer(&now, 0);
ET_HW_UNLOCK(state);
}
#endif
#ifdef SMP
static void
cpu_new_callout(int cpu, int ticks)

View File

@ -94,7 +94,8 @@ _assert_sbuf_integrity(const char *fun, struct sbuf *s)
KASSERT(s->s_buf != NULL,
("%s called with uninitialized or corrupt sbuf", fun));
KASSERT(s->s_len < s->s_size,
("wrote past end of sbuf (%d >= %d)", s->s_len, s->s_size));
("wrote past end of sbuf (%jd >= %jd)",
(intmax_t)s->s_len, (intmax_t)s->s_size));
}
static void
@ -255,16 +256,17 @@ sbuf_clear(struct sbuf *s)
* Effectively truncates the sbuf at the new position.
*/
int
sbuf_setpos(struct sbuf *s, int pos)
sbuf_setpos(struct sbuf *s, ssize_t pos)
{
assert_sbuf_integrity(s);
assert_sbuf_state(s, 0);
KASSERT(pos >= 0,
("attempt to seek to a negative position (%d)", pos));
("attempt to seek to a negative position (%jd)", (intmax_t)pos));
KASSERT(pos < s->s_size,
("attempt to seek past end of sbuf (%d >= %d)", pos, s->s_size));
("attempt to seek past end of sbuf (%jd >= %jd)",
(intmax_t)pos, (intmax_t)s->s_size));
if (pos < 0 || pos > s->s_len)
return (-1);
@ -640,7 +642,7 @@ sbuf_trim(struct sbuf *s)
* Check if an sbuf has an error.
*/
int
sbuf_error(struct sbuf *s)
sbuf_error(const struct sbuf *s)
{
return (s->s_error);
@ -691,7 +693,7 @@ sbuf_data(struct sbuf *s)
/*
* Return the length of the sbuf data.
*/
int
ssize_t
sbuf_len(struct sbuf *s)
{
@ -728,7 +730,7 @@ sbuf_delete(struct sbuf *s)
* Check if an sbuf has been finished.
*/
int
sbuf_done(struct sbuf *s)
sbuf_done(const struct sbuf *s)
{
return (SBUF_ISFINISHED(s));

View File

@ -361,7 +361,7 @@ nfsm_adv_xx(int s, struct mbuf **md, caddr_t *dpos)
*
* We would prefer to avoid this situation entirely. The situation does not
* occur with NFS/UDP and is supposed to only occassionally occur with TCP.
* Use vfs.nfs.realign_count and realign_test to check this.
* Use vfs.nfs_common.realign_count and realign_test to check this.
*/
int
nfs_realign(struct mbuf **pm, int how)

View File

@ -107,7 +107,7 @@
#endif
/*
* vfs.nfs sysctl(3) identifiers
* vfs.oldnfs sysctl(3) identifiers
*/
#define NFS_NFSSTATS 1 /* struct: struct nfsstats */

View File

@ -93,16 +93,16 @@ static int nfs3_jukebox_delay = 10;
static int nfs_skip_wcc_data_onerr = 1;
static int fake_wchan;
SYSCTL_DECL(_vfs_nfs);
SYSCTL_DECL(_vfs_oldnfs);
SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0,
SYSCTL_INT(_vfs_oldnfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0,
"Buffer reservation size 2 < x < 64");
SYSCTL_INT(_vfs_nfs, OID_AUTO, reconnects, CTLFLAG_RD, &nfs_reconnects, 0,
SYSCTL_INT(_vfs_oldnfs, OID_AUTO, reconnects, CTLFLAG_RD, &nfs_reconnects, 0,
"Number of times the nfs client has had to reconnect");
SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs3_jukebox_delay, CTLFLAG_RW,
SYSCTL_INT(_vfs_oldnfs, OID_AUTO, nfs3_jukebox_delay, CTLFLAG_RW,
&nfs3_jukebox_delay, 0,
"Number of seconds to delay a retry after receiving EJUKEBOX");
SYSCTL_INT(_vfs_nfs, OID_AUTO, skip_wcc_data_onerr, CTLFLAG_RW,
SYSCTL_INT(_vfs_oldnfs, OID_AUTO, skip_wcc_data_onerr, CTLFLAG_RW,
&nfs_skip_wcc_data_onerr, 0,
"Disable weak cache consistency checking when server returns an error");

View File

@ -78,11 +78,11 @@ static void nfssvc_iod(void *);
static int nfs_asyncdaemon[NFS_MAXASYNCDAEMON];
SYSCTL_DECL(_vfs_nfs);
SYSCTL_DECL(_vfs_oldnfs);
/* Maximum number of seconds a nfsiod kthread will sleep before exiting */
static unsigned int nfs_iodmaxidle = 120;
SYSCTL_UINT(_vfs_nfs, OID_AUTO, iodmaxidle, CTLFLAG_RW, &nfs_iodmaxidle, 0,
SYSCTL_UINT(_vfs_oldnfs, OID_AUTO, iodmaxidle, CTLFLAG_RW, &nfs_iodmaxidle, 0,
"Max number of seconds an nfsiod kthread will sleep before exiting");
/* Maximum number of nfsiod kthreads */
@ -121,7 +121,7 @@ sysctl_iodmin(SYSCTL_HANDLER_ARGS)
mtx_unlock(&nfs_iod_mtx);
return (0);
}
SYSCTL_PROC(_vfs_nfs, OID_AUTO, iodmin, CTLTYPE_UINT | CTLFLAG_RW, 0,
SYSCTL_PROC(_vfs_oldnfs, OID_AUTO, iodmin, CTLTYPE_UINT | CTLFLAG_RW, 0,
sizeof (nfs_iodmin), sysctl_iodmin, "IU",
"Min number of nfsiod kthreads to keep as spares");
@ -158,7 +158,7 @@ sysctl_iodmax(SYSCTL_HANDLER_ARGS)
mtx_unlock(&nfs_iod_mtx);
return (0);
}
SYSCTL_PROC(_vfs_nfs, OID_AUTO, iodmax, CTLTYPE_UINT | CTLFLAG_RW, 0,
SYSCTL_PROC(_vfs_oldnfs, OID_AUTO, iodmax, CTLTYPE_UINT | CTLFLAG_RW, 0,
sizeof (nfs_iodmax), sysctl_iodmax, "IU",
"Max number of nfsiod kthreads");
@ -213,7 +213,7 @@ nfsiod_setup(void *dummy)
{
int error;
TUNABLE_INT_FETCH("vfs.nfs.iodmin", &nfs_iodmin);
TUNABLE_INT_FETCH("vfs.oldnfs.iodmin", &nfs_iodmin);
mtx_lock(&nfs_iod_mtx);
/* Silently limit the start number of nfsiod's */
if (nfs_iodmin > NFS_MAXASYNCDAEMON)
@ -229,7 +229,7 @@ nfsiod_setup(void *dummy)
SYSINIT(nfsiod, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, nfsiod_setup, NULL);
static int nfs_defect = 0;
SYSCTL_INT(_vfs_nfs, OID_AUTO, defect, CTLFLAG_RW, &nfs_defect, 0,
SYSCTL_INT(_vfs_oldnfs, OID_AUTO, defect, CTLFLAG_RW, &nfs_defect, 0,
"Allow nfsiods to migrate serving different mounts");
/*

View File

@ -653,9 +653,9 @@ nfs_loadattrcache(struct vnode **vpp, struct mbuf **mdp, caddr_t *dposp,
#ifdef NFS_ACDEBUG
#include <sys/sysctl.h>
SYSCTL_DECL(_vfs_nfs);
SYSCTL_DECL(_vfs_oldnfs);
static int nfs_acdebug;
SYSCTL_INT(_vfs_nfs, OID_AUTO, acdebug, CTLFLAG_RW, &nfs_acdebug, 0,
SYSCTL_INT(_vfs_oldnfs, OID_AUTO, acdebug, CTLFLAG_RW, &nfs_acdebug, 0,
"Toggle acdebug (attribute cache debug) flag");
#endif

View File

@ -90,25 +90,25 @@ uma_zone_t nfsmount_zone;
struct nfsstats nfsstats;
SYSCTL_NODE(_vfs, OID_AUTO, nfs, CTLFLAG_RW, 0, "NFS filesystem");
SYSCTL_STRUCT(_vfs_nfs, NFS_NFSSTATS, nfsstats, CTLFLAG_RW,
SYSCTL_NODE(_vfs, OID_AUTO, oldnfs, CTLFLAG_RW, 0, "Old NFS filesystem");
SYSCTL_STRUCT(_vfs_oldnfs, NFS_NFSSTATS, nfsstats, CTLFLAG_RW,
&nfsstats, nfsstats, "S,nfsstats");
static int nfs_ip_paranoia = 1;
SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_ip_paranoia, CTLFLAG_RW,
SYSCTL_INT(_vfs_oldnfs, OID_AUTO, nfs_ip_paranoia, CTLFLAG_RW,
&nfs_ip_paranoia, 0,
"Disallow accepting replies from IPs which differ from those sent");
#ifdef NFS_DEBUG
int nfs_debug;
SYSCTL_INT(_vfs_nfs, OID_AUTO, debug, CTLFLAG_RW, &nfs_debug, 0,
SYSCTL_INT(_vfs_oldnfs, OID_AUTO, debug, CTLFLAG_RW, &nfs_debug, 0,
"Toggle debug flag");
#endif
static int nfs_tprintf_initial_delay = NFS_TPRINTF_INITIAL_DELAY;
SYSCTL_INT(_vfs_nfs, NFS_TPRINTF_INITIAL_DELAY,
SYSCTL_INT(_vfs_oldnfs, NFS_TPRINTF_INITIAL_DELAY,
downdelayinitial, CTLFLAG_RW, &nfs_tprintf_initial_delay, 0,
"Delay before printing \"nfs server not responding\" messages");
/* how long between console messages "nfs server foo not responding" */
static int nfs_tprintf_delay = NFS_TPRINTF_DELAY;
SYSCTL_INT(_vfs_nfs, NFS_TPRINTF_DELAY,
SYSCTL_INT(_vfs_oldnfs, NFS_TPRINTF_DELAY,
downdelayinterval, CTLFLAG_RW, &nfs_tprintf_delay, 0,
"Delay between printing \"nfs server not responding\" messages");
@ -176,14 +176,14 @@ struct nfsv3_diskless nfsv3_diskless = { { { 0 } } };
int nfs_diskless_valid = 0;
#endif
SYSCTL_INT(_vfs_nfs, OID_AUTO, diskless_valid, CTLFLAG_RD,
SYSCTL_INT(_vfs_oldnfs, OID_AUTO, diskless_valid, CTLFLAG_RD,
&nfs_diskless_valid, 0,
"Has the diskless struct been filled correctly");
SYSCTL_STRING(_vfs_nfs, OID_AUTO, diskless_rootpath, CTLFLAG_RD,
SYSCTL_STRING(_vfs_oldnfs, OID_AUTO, diskless_rootpath, CTLFLAG_RD,
nfsv3_diskless.root_hostnam, 0, "Path to nfs root");
SYSCTL_OPAQUE(_vfs_nfs, OID_AUTO, diskless_rootaddr, CTLFLAG_RD,
SYSCTL_OPAQUE(_vfs_oldnfs, OID_AUTO, diskless_rootaddr, CTLFLAG_RD,
&nfsv3_diskless.root_saddr, sizeof nfsv3_diskless.root_saddr,
"%Ssockaddr_in", "Diskless root nfs address");

View File

@ -217,27 +217,27 @@ struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
int nfs_numasync = 0;
#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1))
SYSCTL_DECL(_vfs_nfs);
SYSCTL_DECL(_vfs_oldnfs);
static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO;
SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
SYSCTL_INT(_vfs_oldnfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
&nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
static int nfs_prime_access_cache = 0;
SYSCTL_INT(_vfs_nfs, OID_AUTO, prime_access_cache, CTLFLAG_RW,
SYSCTL_INT(_vfs_oldnfs, OID_AUTO, prime_access_cache, CTLFLAG_RW,
&nfs_prime_access_cache, 0,
"Prime NFS ACCESS cache when fetching attributes");
static int nfsv3_commit_on_close = 0;
SYSCTL_INT(_vfs_nfs, OID_AUTO, nfsv3_commit_on_close, CTLFLAG_RW,
SYSCTL_INT(_vfs_oldnfs, OID_AUTO, nfsv3_commit_on_close, CTLFLAG_RW,
&nfsv3_commit_on_close, 0, "write+commit on close, else only write");
static int nfs_clean_pages_on_close = 1;
SYSCTL_INT(_vfs_nfs, OID_AUTO, clean_pages_on_close, CTLFLAG_RW,
SYSCTL_INT(_vfs_oldnfs, OID_AUTO, clean_pages_on_close, CTLFLAG_RW,
&nfs_clean_pages_on_close, 0, "NFS clean dirty pages on close");
int nfs_directio_enable = 0;
SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_enable, CTLFLAG_RW,
SYSCTL_INT(_vfs_oldnfs, OID_AUTO, nfs_directio_enable, CTLFLAG_RW,
&nfs_directio_enable, 0, "Enable NFS directio");
/*
@ -252,14 +252,14 @@ SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_enable, CTLFLAG_RW,
* meaningful.
*/
int nfs_directio_allow_mmap = 1;
SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_allow_mmap, CTLFLAG_RW,
SYSCTL_INT(_vfs_oldnfs, OID_AUTO, nfs_directio_allow_mmap, CTLFLAG_RW,
&nfs_directio_allow_mmap, 0, "Enable mmaped IO on file with O_DIRECT opens");
#if 0
SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
SYSCTL_INT(_vfs_oldnfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
&nfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count");
SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
SYSCTL_INT(_vfs_oldnfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
&nfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count");
#endif

View File

@ -1148,8 +1148,6 @@ moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
{
vm_offset_t pa = VM_PAGE_TO_PHYS(m);
if (!moea64_initialized)
panic("moea64_zero_page: can't zero pa %#" PRIxPTR, pa);
if (size + off > PAGE_SIZE)
panic("moea64_zero_page: size + off > PAGE_SIZE");
@ -1172,9 +1170,6 @@ moea64_zero_page(mmu_t mmu, vm_page_t m)
vm_offset_t pa = VM_PAGE_TO_PHYS(m);
vm_offset_t va, off;
if (!moea64_initialized)
panic("moea64_zero_page: can't zero pa %#zx", pa);
if (!hw_direct_map) {
mtx_lock(&moea64_scratchpage_mtx);

View File

@ -93,7 +93,7 @@ mem_valid(vm_offset_t addr, int len)
for (i = 0; i < npregions; i++)
if ((addr >= pregions[i].mr_start)
&& (addr + len < pregions[i].mr_start + pregions[i].mr_size))
&& (addr + len <= pregions[i].mr_start + pregions[i].mr_size))
return (0);
return (EFAULT);

View File

@ -44,14 +44,9 @@ struct reg;
* subsystem into the appropriate timer interrupt.
*/
typedef void (*cyclic_clock_func_t)(struct trapframe *);
extern cyclic_clock_func_t cyclic_clock_func;
/*
* These external variables are actually machine-dependent, so
* they might not actually exist.
*
* Defining them here avoids a proliferation of header files.
*/
extern cyclic_clock_func_t cyclic_clock_func[];
void clocksource_cyc_set(const struct bintime *t);
/*
* The dtrace module handles traps that occur during a DTrace probe.

View File

@ -44,8 +44,8 @@ struct sbuf {
sbuf_drain_func *s_drain_func; /* drain function */
void *s_drain_arg; /* user-supplied drain argument */
int s_error; /* current error code */
int s_size; /* size of storage buffer */
int s_len; /* current length of string */
ssize_t s_size; /* size of storage buffer */
ssize_t s_len; /* current length of string */
#define SBUF_FIXEDLEN 0x00000000 /* fixed length buffer (default) */
#define SBUF_AUTOEXTEND 0x00000001 /* automatically extend buffer */
#define SBUF_USRFLAGMSK 0x0000ffff /* mask of flags the user may specify */
@ -63,7 +63,7 @@ struct sbuf *sbuf_new(struct sbuf *, char *, int, int);
#define sbuf_new_auto() \
sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND)
void sbuf_clear(struct sbuf *);
int sbuf_setpos(struct sbuf *, int);
int sbuf_setpos(struct sbuf *, ssize_t);
int sbuf_bcat(struct sbuf *, const void *, size_t);
int sbuf_bcpy(struct sbuf *, const void *, size_t);
int sbuf_cat(struct sbuf *, const char *);
@ -75,11 +75,11 @@ int sbuf_vprintf(struct sbuf *, const char *, __va_list)
int sbuf_putc(struct sbuf *, int);
void sbuf_set_drain(struct sbuf *, sbuf_drain_func *, void *);
int sbuf_trim(struct sbuf *);
int sbuf_error(struct sbuf *);
int sbuf_error(const struct sbuf *);
int sbuf_finish(struct sbuf *);
char *sbuf_data(struct sbuf *);
int sbuf_len(struct sbuf *);
int sbuf_done(struct sbuf *);
ssize_t sbuf_len(struct sbuf *);
int sbuf_done(const struct sbuf *);
void sbuf_delete(struct sbuf *);
#ifdef _KERNEL

View File

@ -221,10 +221,10 @@ readstats(struct nfsstats **stp, struct nfsrvstats **srvstp, int zero)
if (zero)
bzero(&zerostat, sizeof(zerostat));
buflen = sizeof(struct nfsstats);
if (*stp != NULL && sysctlbyname("vfs.nfs.nfsstats", *stp,
if (*stp != NULL && sysctlbyname("vfs.oldnfs.nfsstats", *stp,
&buflen, zero ? &zerostat : NULL, zero ? buflen : 0) < 0) {
if (errno != ENOENT)
err(1, "sysctl: vfs.nfs.nfsstats");
err(1, "sysctl: vfs.oldnfs.nfsstats");
*stp = NULL;
}
buflen = sizeof(struct nfsrvstats);

View File

@ -24,14 +24,14 @@
.\"
.\" $FreeBSD$
.\"
.Dd April 10, 2011
.Dd May 15, 2011
.Dt NFSV4 4
.Os
.Sh NAME
.Nm NFSv4
.Nd NFS Version 4 Protocol
.Sh DESCRIPTION
The experimental NFS client and server provides support for the
The NFS client and server provides support for the
.Tn NFSv4
specification; see
.%T "Network File System (NFS) Version 4 Protocol RFC 3530" .
@ -153,27 +153,11 @@ with RPCSEC_GSS (sec=krb5, krb5i, krb5p), only names and KerberosV tickets
will go on the wire.
.Sh SERVER SETUP
.Pp
To set up the experimental NFS server that supports
To set up the NFS server that supports
.Nm ,
you will need to either build a kernel with:
.sp
.Bd -literal -offset indent -compact
options NFSD
.Ed
and not
.Bd -literal -offset indent -compact
options NFSSERVER
.Ed
.sp
or start
.Xr mountd 8
and
.Xr nfsd 8
with the ``-e'' option to force use of the experimental server.
The
.Xr nfsuserd 8
daemon must also be running.
This will occur if
you will need to either set the variables in
.Xr rc.conf 5
as follows:
.sp
.Bd -literal -offset indent -compact
nfs_server_enable="YES"
@ -181,8 +165,14 @@ nfsv4_server_enable="YES"
nfsuserd_enable="YES"
.Ed
.sp
are set in
.Xr rc.conf 5 .
or start
.Xr mountd 8
and
.Xr nfsd 8
without the ``-o'' option, which would force use of the old server.
The
.Xr nfsuserd 8
daemon must also be running.
.Pp
You will also need to add at least one ``V4:'' line to the
.Xr exports 5
@ -196,7 +186,7 @@ there are a couple of
.Xr sysctl 8
variables that you can change, which might improve performance.
.Bl -tag -width Ds
.It Cm vfs.newnfs.issue_delegations
.It Cm vfs.nfsd.issue_delegations
when set non-zero, allows the server to issue Open Delegations to
clients.
These delegations permit the client to manipulate the file
@ -208,7 +198,7 @@ This can only be enabled when the file systems being exported to
clients are not being accessed locally on the server and, if being
accessed via NFS Version 2 or 3 clients, these clients cannot be
using the NLM.
.It Cm vfs.newnfs.enable_locallocks
.It Cm vfs.nfsd.enable_locallocks
can be set to 0 to disable acquisition of local byte range locks.
Disabling local locking can only be done if neither local accesses
to the exported file systems nor the NLM is operating on them.
@ -217,7 +207,7 @@ to the exported file systems nor the NLM is operating on them.
Note that Samba server access would be considered ``local access'' for the above
discussion.
.Pp
To build a kernel with the experimental
To build a kernel with the NFS server that supports
.Nm
linked into it, the
.sp
@ -235,7 +225,9 @@ To do an
mount, specify the ``nfsv4'' option on the
.Xr mount_nfs 8
command line.
This will force use of the experimental client plus set ``tcp'' and
This will force use of the client that supports
.Nm
plus set ``tcp'' and
.Nm .
.Pp
The
@ -269,16 +261,16 @@ To get callbacks to work when behind a NAT gateway, a port for the callback
service will need to be set up on the NAT gateway and then the address
of the NAT gateway (host IP plus port#) will need to be set by assigning the
.Xr sysctl 8
variable vfs.newnfs.callback_addr to a string of the form:
variable vfs.nfs.callback_addr to a string of the form:
.sp
N.N.N.N.N.N
.sp
where the first 4 Ns are the host IP address and the last two are the
port# in network byte order (all decimal #s in the range 0-255).
.Pp
To build a kernel with the experimental
To build a kernel with the client that supports
.Nm
client linked into it, the option
linked into it, the option
.sp
.Bd -literal -offset indent -compact
options NFSCL