Add KERNEL_PANICKED macro for use in place of direct panicstr tests
This commit is contained in:
parent
76a49ebaa6
commit
879e0604ee
@ -81,7 +81,7 @@ __FBSDID("$FreeBSD$");
|
||||
*/
|
||||
#define CKB_CTX_LOCK_ASSERT() \
|
||||
do { \
|
||||
if (!kdb_active && panicstr == NULL) \
|
||||
if (!kdb_active && !KERNEL_PANICKED()) \
|
||||
mtx_assert(&Giant, MA_OWNED); \
|
||||
} while (0)
|
||||
#else
|
||||
|
@ -77,7 +77,7 @@ __FBSDID("$FreeBSD$");
|
||||
*/
|
||||
#define KMI_CTX_LOCK_ASSERT() \
|
||||
do { \
|
||||
if (!kdb_active && panicstr == NULL) \
|
||||
if (!kdb_active && !KERNEL_PANICKED()) \
|
||||
mtx_assert(&Giant, MA_OWNED); \
|
||||
} while (0)
|
||||
#else
|
||||
|
@ -42,7 +42,7 @@ typedef enum {
|
||||
} kmutex_type_t;
|
||||
|
||||
#define MUTEX_HELD(x) (mutex_owned(x))
|
||||
#define MUTEX_NOT_HELD(x) (!mutex_owned(x) || panicstr)
|
||||
#define MUTEX_NOT_HELD(x) (!mutex_owned(x) || KERNEL_PANICKED())
|
||||
|
||||
typedef struct sx kmutex_t;
|
||||
|
||||
|
@ -7296,7 +7296,7 @@ zfs_shutdown(void *arg __unused, int howto __unused)
|
||||
/*
|
||||
* ZFS fini routines can not properly work in a panic-ed system.
|
||||
*/
|
||||
if (panicstr == NULL)
|
||||
if (!KERNEL_PANICKED())
|
||||
(void)zfs__fini();
|
||||
}
|
||||
|
||||
|
@ -293,7 +293,7 @@ zfs_sync(vfs_t *vfsp, int waitfor)
|
||||
* Data integrity is job one. We don't want a compromised kernel
|
||||
* writing to the storage pool, so we never sync during panic.
|
||||
*/
|
||||
if (panicstr)
|
||||
if (KERNEL_PANICKED())
|
||||
return (0);
|
||||
|
||||
/*
|
||||
|
@ -119,7 +119,7 @@ fm_drain(void *private, void *data, errorq_elem_t *eep)
|
||||
{
|
||||
nvlist_t *nvl = errorq_elem_nvl(ereport_errorq, eep);
|
||||
|
||||
if (!panicstr)
|
||||
if (!KERNEL_PANICKED())
|
||||
(void) fm_ereport_post(nvl, EVCH_TRYHARD);
|
||||
else
|
||||
fm_nvprint(nvl);
|
||||
@ -420,7 +420,7 @@ fm_banner(void)
|
||||
if (!fm_panicstr)
|
||||
return; /* panic was not initiated by fm_panic(); do nothing */
|
||||
|
||||
if (panicstr) {
|
||||
if (KERNEL_PANICKED()) {
|
||||
tod = panic_hrestime;
|
||||
now = panic_hrtime;
|
||||
} else {
|
||||
@ -472,7 +472,7 @@ fm_ereport_dump(void)
|
||||
char *buf;
|
||||
size_t len;
|
||||
|
||||
if (panicstr) {
|
||||
if (KERNEL_PANICKED()) {
|
||||
tod = panic_hrestime;
|
||||
now = panic_hrtime;
|
||||
} else {
|
||||
@ -486,7 +486,7 @@ fm_ereport_dump(void)
|
||||
* In the panic case, sysevent_evc_walk_init() will return NULL.
|
||||
*/
|
||||
if ((chq = sysevent_evc_walk_init(ereport_chan, NULL)) == NULL &&
|
||||
!panicstr)
|
||||
!KERNEL_PANICKED())
|
||||
return; /* event channel isn't initialized yet */
|
||||
|
||||
while ((sep = sysevent_evc_walk_step(chq)) != NULL) {
|
||||
|
@ -479,7 +479,7 @@ textdump_dumpsys(struct dumperinfo *di)
|
||||
#endif
|
||||
if (textdump_do_msgbuf)
|
||||
textdump_dump_msgbuf(di);
|
||||
if (textdump_do_panic && panicstr != NULL)
|
||||
if (textdump_do_panic && KERNEL_PANICKED())
|
||||
textdump_dump_panic(di);
|
||||
if (textdump_do_version)
|
||||
textdump_dump_version(di);
|
||||
|
@ -2166,7 +2166,7 @@ acpi_shutdown_final(void *arg, int howto)
|
||||
} else if (status != AE_NOT_EXIST)
|
||||
device_printf(sc->acpi_dev, "reset failed - %s\n",
|
||||
AcpiFormatException(status));
|
||||
} else if (sc->acpi_do_disable && panicstr == NULL) {
|
||||
} else if (sc->acpi_do_disable && !KERNEL_PANICKED()) {
|
||||
/*
|
||||
* Only disable ACPI if the user requested. On some systems, writing
|
||||
* the disable value to SMI_CMD hangs the system.
|
||||
|
@ -74,7 +74,7 @@ vt_kms_postswitch(void *arg)
|
||||
|
||||
sc = (struct vt_kms_softc *)arg;
|
||||
|
||||
if (!kdb_active && panicstr == NULL)
|
||||
if (!kdb_active && !KERNEL_PANICKED())
|
||||
taskqueue_enqueue(taskqueue_thread, &sc->fb_mode_task);
|
||||
else
|
||||
drm_fb_helper_restore_fbdev_mode(sc->fb_helper);
|
||||
|
@ -2501,7 +2501,7 @@ static void
|
||||
iscsi_shutdown_post(struct iscsi_softc *sc)
|
||||
{
|
||||
|
||||
if (panicstr == NULL) {
|
||||
if (!KERNEL_PANICKED()) {
|
||||
ISCSI_DEBUG("removing all sessions due to shutdown");
|
||||
iscsi_terminate_sessions(sc);
|
||||
}
|
||||
|
@ -1190,7 +1190,7 @@ mrsas_shutdown(device_t dev)
|
||||
|
||||
sc = device_get_softc(dev);
|
||||
sc->remove_in_progress = 1;
|
||||
if (panicstr == NULL) {
|
||||
if (!KERNEL_PANICKED()) {
|
||||
if (sc->ocr_thread_active)
|
||||
wakeup(&sc->ocr_chan);
|
||||
i = 0;
|
||||
|
@ -2163,7 +2163,7 @@ sccnupdate(scr_stat *scp)
|
||||
if (suspend_in_progress || scp->sc->font_loading_in_progress)
|
||||
return;
|
||||
|
||||
if (kdb_active || panicstr || shutdown_in_progress) {
|
||||
if (kdb_active || KERNEL_PANICKED() || shutdown_in_progress) {
|
||||
sc_touch_scrn_saver();
|
||||
} else if (scp != scp->sc->cur_scp) {
|
||||
return;
|
||||
@ -2229,7 +2229,7 @@ scrn_timer(void *arg)
|
||||
}
|
||||
|
||||
/* should we stop the screen saver? */
|
||||
if (kdb_active || panicstr || shutdown_in_progress)
|
||||
if (kdb_active || KERNEL_PANICKED() || shutdown_in_progress)
|
||||
sc_touch_scrn_saver();
|
||||
if (run_scrn_saver) {
|
||||
if (time_uptime > sc->scrn_time_stamp + scrn_blank_time)
|
||||
|
@ -1301,7 +1301,7 @@ vt_flush(struct vt_device *vd)
|
||||
/* Check if the cursor should be displayed or not. */
|
||||
if ((vd->vd_flags & VDF_MOUSECURSOR) && /* Mouse support enabled. */
|
||||
!(vw->vw_flags & VWF_MOUSE_HIDE) && /* Cursor displayed. */
|
||||
!kdb_active && panicstr == NULL) { /* DDB inactive. */
|
||||
!kdb_active && !KERNEL_PANICKED()) { /* DDB inactive. */
|
||||
vd->vd_mshown = 1;
|
||||
} else {
|
||||
vd->vd_mshown = 0;
|
||||
@ -1398,7 +1398,7 @@ vtterm_done(struct terminal *tm)
|
||||
struct vt_window *vw = tm->tm_softc;
|
||||
struct vt_device *vd = vw->vw_device;
|
||||
|
||||
if (kdb_active || panicstr != NULL) {
|
||||
if (kdb_active || KERNEL_PANICKED()) {
|
||||
/* Switch to the debugger. */
|
||||
if (vd->vd_curwindow != vw) {
|
||||
vd->vd_curwindow = vw;
|
||||
|
@ -199,7 +199,7 @@ xc_printf(const char *fmt, ...)
|
||||
static inline void xencons_lock(struct xencons_priv *cons)
|
||||
{
|
||||
|
||||
if (panicstr == NULL)
|
||||
if (!KERNEL_PANICKED())
|
||||
mtx_lock_spin(&cons->mtx);
|
||||
|
||||
}
|
||||
@ -207,7 +207,7 @@ static inline void xencons_lock(struct xencons_priv *cons)
|
||||
static inline void xencons_unlock(struct xencons_priv *cons)
|
||||
{
|
||||
|
||||
if (panicstr == NULL)
|
||||
if (!KERNEL_PANICKED())
|
||||
mtx_unlock_spin(&cons->mtx);
|
||||
}
|
||||
|
||||
|
@ -340,7 +340,7 @@ DB_FUNC(netgdb, db_netgdb_cmd, db_cmd_table, CS_OWN, NULL)
|
||||
struct debugnet_pcb *pcb;
|
||||
int error;
|
||||
|
||||
if (panicstr == NULL) {
|
||||
if (!KERNEL_PANICKED()) {
|
||||
/* TODO: This limitation should be removed in future work. */
|
||||
printf("%s: netgdb is currently limited to use only after a "
|
||||
"panic. Sorry.\n", __func__);
|
||||
|
@ -2653,7 +2653,7 @@ g_journal_shutdown(void *arg, int howto __unused)
|
||||
struct g_class *mp;
|
||||
struct g_geom *gp, *gp2;
|
||||
|
||||
if (panicstr != NULL)
|
||||
if (KERNEL_PANICKED())
|
||||
return;
|
||||
mp = arg;
|
||||
g_topology_lock();
|
||||
|
@ -3481,7 +3481,7 @@ g_mirror_shutdown_post_sync(void *arg, int howto)
|
||||
struct g_mirror_softc *sc;
|
||||
int error;
|
||||
|
||||
if (panicstr != NULL)
|
||||
if (KERNEL_PANICKED())
|
||||
return;
|
||||
|
||||
mp = arg;
|
||||
|
@ -324,7 +324,7 @@ ktr_tracepoint(uint64_t mask, const char *file, int line, const char *format,
|
||||
#endif
|
||||
int cpu;
|
||||
|
||||
if (panicstr || kdb_active)
|
||||
if (KERNEL_PANICKED() || kdb_active)
|
||||
return;
|
||||
if ((ktr_mask & mask) == 0 || ktr_buf == NULL)
|
||||
return;
|
||||
|
@ -558,7 +558,7 @@ lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
|
||||
int contested = 0;
|
||||
#endif
|
||||
|
||||
if (__predict_false(panicstr != NULL))
|
||||
if (KERNEL_PANICKED())
|
||||
goto out;
|
||||
|
||||
tid = (uintptr_t)curthread;
|
||||
@ -700,7 +700,7 @@ lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
|
||||
int contested = 0;
|
||||
#endif
|
||||
|
||||
if (__predict_false(panicstr != NULL))
|
||||
if (KERNEL_PANICKED())
|
||||
goto out;
|
||||
|
||||
tid = (uintptr_t)curthread;
|
||||
@ -882,7 +882,7 @@ lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
|
||||
int wakeup_swapper = 0;
|
||||
int op;
|
||||
|
||||
if (__predict_false(panicstr != NULL))
|
||||
if (KERNEL_PANICKED())
|
||||
goto out;
|
||||
|
||||
tid = (uintptr_t)curthread;
|
||||
@ -941,7 +941,7 @@ lockmgr_lock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk,
|
||||
u_int op;
|
||||
bool locked;
|
||||
|
||||
if (__predict_false(panicstr != NULL))
|
||||
if (KERNEL_PANICKED())
|
||||
return (0);
|
||||
|
||||
op = flags & LK_TYPE_MASK;
|
||||
@ -1003,7 +1003,7 @@ lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_obje
|
||||
{
|
||||
int wakeup_swapper = 0;
|
||||
|
||||
if (__predict_false(panicstr != NULL))
|
||||
if (KERNEL_PANICKED())
|
||||
goto out;
|
||||
|
||||
wakeup_swapper = wakeupshlk(lk, file, line);
|
||||
@ -1022,7 +1022,7 @@ lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_obje
|
||||
u_int realexslp;
|
||||
int queue;
|
||||
|
||||
if (__predict_false(panicstr != NULL))
|
||||
if (KERNEL_PANICKED())
|
||||
goto out;
|
||||
|
||||
tid = (uintptr_t)curthread;
|
||||
@ -1126,7 +1126,7 @@ lockmgr_unlock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk)
|
||||
const char *file;
|
||||
int line;
|
||||
|
||||
if (__predict_false(panicstr != NULL))
|
||||
if (KERNEL_PANICKED())
|
||||
return (0);
|
||||
|
||||
file = __FILE__;
|
||||
@ -1254,7 +1254,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
|
||||
int contested = 0;
|
||||
#endif
|
||||
|
||||
if (panicstr != NULL)
|
||||
if (KERNEL_PANICKED())
|
||||
return (0);
|
||||
|
||||
error = 0;
|
||||
@ -1662,7 +1662,7 @@ _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
|
||||
{
|
||||
int slocked = 0;
|
||||
|
||||
if (panicstr != NULL)
|
||||
if (KERNEL_PANICKED())
|
||||
return;
|
||||
switch (what) {
|
||||
case KA_SLOCKED:
|
||||
|
@ -1071,7 +1071,7 @@ __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
|
||||
{
|
||||
const struct mtx *m;
|
||||
|
||||
if (panicstr != NULL || dumping || SCHEDULER_STOPPED())
|
||||
if (KERNEL_PANICKED() || dumping || SCHEDULER_STOPPED())
|
||||
return;
|
||||
|
||||
m = mtxlock2mtx(c);
|
||||
@ -1229,7 +1229,7 @@ _mtx_lock_indefinite_check(struct mtx *m, struct lock_delay_arg *ldap)
|
||||
struct thread *td;
|
||||
|
||||
ldap->spin_cnt++;
|
||||
if (ldap->spin_cnt < 60000000 || kdb_active || panicstr != NULL)
|
||||
if (ldap->spin_cnt < 60000000 || kdb_active || KERNEL_PANICKED())
|
||||
cpu_lock_delay();
|
||||
else {
|
||||
td = mtx_owner(m);
|
||||
|
@ -181,7 +181,7 @@ choosethread(void)
|
||||
|
||||
td = sched_choose();
|
||||
|
||||
if (__predict_false(panicstr != NULL))
|
||||
if (KERNEL_PANICKED())
|
||||
return (choosethread_panic(td));
|
||||
|
||||
TD_SET_RUNNING(td);
|
||||
|
@ -480,8 +480,8 @@ mi_switch(int flags)
|
||||
if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td))
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
#endif
|
||||
KASSERT(td->td_critnest == 1 || panicstr,
|
||||
("mi_switch: switch in a critical section"));
|
||||
KASSERT(td->td_critnest == 1 || KERNEL_PANICKED(),
|
||||
("mi_switch: switch in a critical section"));
|
||||
KASSERT((flags & (SW_INVOL | SW_VOL)) != 0,
|
||||
("mi_switch: switch must be voluntary or involuntary"));
|
||||
|
||||
|
@ -348,7 +348,7 @@ maybe_preempt(struct thread *td)
|
||||
("maybe_preempt: trying to run inhibited thread"));
|
||||
pri = td->td_priority;
|
||||
cpri = ctd->td_priority;
|
||||
if (panicstr != NULL || pri >= cpri /* || dumping */ ||
|
||||
if (KERNEL_PANICKED() || pri >= cpri /* || dumping */ ||
|
||||
TD_IS_INHIBITED(ctd))
|
||||
return (0);
|
||||
#ifndef FULL_PREEMPTION
|
||||
@ -1138,7 +1138,7 @@ forward_wakeup(int cpunum)
|
||||
if ((!forward_wakeup_enabled) ||
|
||||
(forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0))
|
||||
return (0);
|
||||
if (!smp_started || panicstr)
|
||||
if (!smp_started || KERNEL_PANICKED())
|
||||
return (0);
|
||||
|
||||
forward_wakeups_requested++;
|
||||
|
@ -2533,7 +2533,7 @@ sched_setpreempt(struct thread *td)
|
||||
cpri = ctd->td_priority;
|
||||
if (pri < cpri)
|
||||
ctd->td_flags |= TDF_NEEDRESCHED;
|
||||
if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
|
||||
if (KERNEL_PANICKED() || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
|
||||
return;
|
||||
if (!sched_shouldpreempt(pri, cpri, 0))
|
||||
return;
|
||||
|
@ -153,7 +153,7 @@ kcsan_access(uintptr_t addr, size_t size, bool write, bool atomic, uintptr_t pc)
|
||||
return;
|
||||
if (__predict_false(kcsan_md_unsupported((vm_offset_t)addr)))
|
||||
return;
|
||||
if (__predict_false(panicstr != NULL))
|
||||
if (KERNEL_PANICKED())
|
||||
return;
|
||||
|
||||
new.addr = addr;
|
||||
|
@ -409,7 +409,7 @@ vprintf(const char *fmt, va_list ap)
|
||||
|
||||
retval = _vprintf(-1, TOCONS | TOLOG, fmt, ap);
|
||||
|
||||
if (!panicstr)
|
||||
if (!KERNEL_PANICKED())
|
||||
msgbuftrigger = 1;
|
||||
|
||||
return (retval);
|
||||
@ -423,7 +423,7 @@ prf_putbuf(char *bufr, int flags, int pri)
|
||||
msglogstr(bufr, pri, /*filter_cr*/1);
|
||||
|
||||
if (flags & TOCONS) {
|
||||
if ((panicstr == NULL) && (constty != NULL))
|
||||
if ((!KERNEL_PANICKED()) && (constty != NULL))
|
||||
msgbuf_addstr(&consmsgbuf, -1,
|
||||
bufr, /*filter_cr*/ 0);
|
||||
|
||||
@ -492,7 +492,7 @@ putchar(int c, void *arg)
|
||||
return;
|
||||
}
|
||||
|
||||
if ((flags & TOTTY) && tp != NULL && panicstr == NULL)
|
||||
if ((flags & TOTTY) && tp != NULL && !KERNEL_PANICKED())
|
||||
tty_putchar(tp, c);
|
||||
|
||||
if ((flags & (TOCONS | TOLOG)) && c != '\0')
|
||||
|
@ -195,7 +195,7 @@ forward_signal(struct thread *td)
|
||||
|
||||
CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
|
||||
|
||||
if (!smp_started || cold || panicstr)
|
||||
if (!smp_started || cold || KERNEL_PANICKED())
|
||||
return;
|
||||
if (!forward_signal_enabled)
|
||||
return;
|
||||
|
@ -899,7 +899,7 @@ witness_init(struct lock_object *lock, const char *type)
|
||||
* it to the pending_locks list. If it is not too early, then enroll
|
||||
* the lock now.
|
||||
*/
|
||||
if (witness_watch < 1 || panicstr != NULL ||
|
||||
if (witness_watch < 1 || KERNEL_PANICKED() ||
|
||||
(lock->lo_flags & LO_WITNESS) == 0)
|
||||
lock->lo_witness = NULL;
|
||||
else if (witness_cold) {
|
||||
@ -1077,7 +1077,7 @@ int
|
||||
witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
|
||||
{
|
||||
|
||||
if (witness_watch == -1 || panicstr != NULL)
|
||||
if (witness_watch == -1 || KERNEL_PANICKED())
|
||||
return (0);
|
||||
|
||||
/* Require locks that witness knows about. */
|
||||
@ -1118,7 +1118,7 @@ witness_checkorder(struct lock_object *lock, int flags, const char *file,
|
||||
int i, j;
|
||||
|
||||
if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
|
||||
panicstr != NULL)
|
||||
KERNEL_PANICKED())
|
||||
return;
|
||||
|
||||
w = lock->lo_witness;
|
||||
@ -1464,7 +1464,7 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
|
||||
struct thread *td;
|
||||
|
||||
if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL ||
|
||||
panicstr != NULL)
|
||||
KERNEL_PANICKED())
|
||||
return;
|
||||
w = lock->lo_witness;
|
||||
td = curthread;
|
||||
@ -1522,7 +1522,7 @@ witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
|
||||
struct lock_class *class;
|
||||
|
||||
KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
|
||||
if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
|
||||
if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED())
|
||||
return;
|
||||
class = LOCK_CLASS(lock);
|
||||
if (witness_watch) {
|
||||
@ -1568,7 +1568,7 @@ witness_downgrade(struct lock_object *lock, int flags, const char *file,
|
||||
struct lock_class *class;
|
||||
|
||||
KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
|
||||
if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
|
||||
if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED())
|
||||
return;
|
||||
class = LOCK_CLASS(lock);
|
||||
if (witness_watch) {
|
||||
@ -1616,7 +1616,7 @@ witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
|
||||
register_t s;
|
||||
int i, j;
|
||||
|
||||
if (witness_cold || lock->lo_witness == NULL || panicstr != NULL)
|
||||
if (witness_cold || lock->lo_witness == NULL || KERNEL_PANICKED())
|
||||
return;
|
||||
td = curthread;
|
||||
class = LOCK_CLASS(lock);
|
||||
@ -1722,7 +1722,7 @@ witness_thread_exit(struct thread *td)
|
||||
int i, n;
|
||||
|
||||
lle = td->td_sleeplocks;
|
||||
if (lle == NULL || panicstr != NULL)
|
||||
if (lle == NULL || KERNEL_PANICKED())
|
||||
return;
|
||||
if (lle->ll_count != 0) {
|
||||
for (n = 0; lle != NULL; lle = lle->ll_next)
|
||||
@ -1757,7 +1757,7 @@ witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
|
||||
va_list ap;
|
||||
int i, n;
|
||||
|
||||
if (witness_cold || witness_watch < 1 || panicstr != NULL)
|
||||
if (witness_cold || witness_watch < 1 || KERNEL_PANICKED())
|
||||
return (0);
|
||||
n = 0;
|
||||
td = curthread;
|
||||
@ -1849,7 +1849,7 @@ enroll(const char *description, struct lock_class *lock_class)
|
||||
|
||||
MPASS(description != NULL);
|
||||
|
||||
if (witness_watch == -1 || panicstr != NULL)
|
||||
if (witness_watch == -1 || KERNEL_PANICKED())
|
||||
return (NULL);
|
||||
if ((lock_class->lc_flags & LC_SPINLOCK)) {
|
||||
if (witness_skipspin)
|
||||
@ -2323,7 +2323,7 @@ witness_save(struct lock_object *lock, const char **filep, int *linep)
|
||||
if (SCHEDULER_STOPPED())
|
||||
return;
|
||||
KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
|
||||
if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
|
||||
if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED())
|
||||
return;
|
||||
class = LOCK_CLASS(lock);
|
||||
if (class->lc_flags & LC_SLEEPLOCK)
|
||||
@ -2358,7 +2358,7 @@ witness_restore(struct lock_object *lock, const char *file, int line)
|
||||
if (SCHEDULER_STOPPED())
|
||||
return;
|
||||
KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
|
||||
if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
|
||||
if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED())
|
||||
return;
|
||||
class = LOCK_CLASS(lock);
|
||||
if (class->lc_flags & LC_SLEEPLOCK)
|
||||
@ -2388,7 +2388,7 @@ witness_assert(const struct lock_object *lock, int flags, const char *file,
|
||||
struct lock_instance *instance;
|
||||
struct lock_class *class;
|
||||
|
||||
if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL)
|
||||
if (lock->lo_witness == NULL || witness_watch < 1 || KERNEL_PANICKED())
|
||||
return;
|
||||
class = LOCK_CLASS(lock);
|
||||
if ((class->lc_flags & LC_SLEEPLOCK) != 0)
|
||||
@ -2460,7 +2460,7 @@ witness_setflag(struct lock_object *lock, int flag, int set)
|
||||
struct lock_instance *instance;
|
||||
struct lock_class *class;
|
||||
|
||||
if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
|
||||
if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED())
|
||||
return;
|
||||
class = LOCK_CLASS(lock);
|
||||
if (class->lc_flags & LC_SLEEPLOCK)
|
||||
|
@ -229,7 +229,7 @@ sbuf_tty_drain(void *a, const char *d, int len)
|
||||
cnputsn(d, len);
|
||||
return (len);
|
||||
}
|
||||
if (tp != NULL && panicstr == NULL) {
|
||||
if (tp != NULL && !KERNEL_PANICKED()) {
|
||||
rc = tty_putstrn(tp, d, len);
|
||||
if (rc != 0)
|
||||
return (-ENXIO);
|
||||
|
@ -1431,7 +1431,7 @@ bufshutdown(int show_busybufs)
|
||||
/*
|
||||
* Unmount filesystems
|
||||
*/
|
||||
if (panicstr == NULL)
|
||||
if (!KERNEL_PANICKED())
|
||||
vfs_unmountall();
|
||||
}
|
||||
swapoff_all();
|
||||
|
@ -5023,7 +5023,7 @@ extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred,
|
||||
* This only exists to suppress warnings from unlocked specfs accesses. It is
|
||||
* no longer ok to have an unlocked VFS.
|
||||
*/
|
||||
#define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \
|
||||
#define IGNORE_LOCK(vp) (KERNEL_PANICKED() || (vp) == NULL || \
|
||||
(vp)->v_type == VCHR || (vp)->v_type == VBAD)
|
||||
|
||||
int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */
|
||||
@ -5172,7 +5172,7 @@ vop_strategy_pre(void *ap)
|
||||
if ((bp->b_flags & B_CLUSTER) != 0)
|
||||
return;
|
||||
|
||||
if (panicstr == NULL && !BUF_ISLOCKED(bp)) {
|
||||
if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) {
|
||||
if (vfs_badlock_print)
|
||||
printf(
|
||||
"VOP_STRATEGY: bp is not locked but should be\n");
|
||||
|
@ -293,7 +293,7 @@ netdump_start(struct dumperinfo *di)
|
||||
if (!netdump_enabled())
|
||||
return (EINVAL);
|
||||
|
||||
if (panicstr == NULL) {
|
||||
if (!KERNEL_PANICKED()) {
|
||||
printf(
|
||||
"netdump_start: netdump may only be used after a panic\n");
|
||||
return (EINVAL);
|
||||
|
@ -611,7 +611,7 @@ spitfire_ipi_single(u_int cpu, u_long d0, u_long d1, u_long d2)
|
||||
if ((ids & (IDR_BUSY | IDR_NACK)) == 0)
|
||||
return;
|
||||
}
|
||||
if (kdb_active != 0 || panicstr != NULL)
|
||||
if (kdb_active != 0 || KERNEL_PANICKED())
|
||||
printf("%s: couldn't send IPI to module 0x%u\n",
|
||||
__func__, mid);
|
||||
else
|
||||
@ -650,7 +650,7 @@ cheetah_ipi_single(u_int cpu, u_long d0, u_long d1, u_long d2)
|
||||
if ((ids & (IDR_BUSY | IDR_NACK)) == 0)
|
||||
return;
|
||||
}
|
||||
if (kdb_active != 0 || panicstr != NULL)
|
||||
if (kdb_active != 0 || KERNEL_PANICKED())
|
||||
printf("%s: couldn't send IPI to module 0x%u\n",
|
||||
__func__, mid);
|
||||
else
|
||||
@ -709,7 +709,7 @@ cheetah_ipi_selected(cpuset_t cpus, u_long d0, u_long d1, u_long d2)
|
||||
if (CPU_EMPTY(&cpus))
|
||||
return;
|
||||
}
|
||||
if (kdb_active != 0 || panicstr != NULL)
|
||||
if (kdb_active != 0 || KERNEL_PANICKED())
|
||||
printf("%s: couldn't send IPI (cpus=%s ids=0x%lu)\n",
|
||||
__func__, cpusetobj_strprint(ipi_pbuf, &cpus), ids);
|
||||
else
|
||||
@ -750,7 +750,7 @@ jalapeno_ipi_single(u_int cpu, u_long d0, u_long d1, u_long d2)
|
||||
if ((ids & busynack) == 0)
|
||||
return;
|
||||
}
|
||||
if (kdb_active != 0 || panicstr != NULL)
|
||||
if (kdb_active != 0 || KERNEL_PANICKED())
|
||||
printf("%s: couldn't send IPI to module 0x%u\n",
|
||||
__func__, mid);
|
||||
else
|
||||
@ -801,7 +801,7 @@ jalapeno_ipi_selected(cpuset_t cpus, u_long d0, u_long d1, u_long d2)
|
||||
(2 * cpuid_to_mid[cpu]))) == 0)
|
||||
CPU_CLR(cpu, &cpus);
|
||||
}
|
||||
if (kdb_active != 0 || panicstr != NULL)
|
||||
if (kdb_active != 0 || KERNEL_PANICKED())
|
||||
printf("%s: couldn't send IPI (cpus=%s ids=0x%lu)\n",
|
||||
__func__, cpusetobj_strprint(ipi_pbuf, &cpus), ids);
|
||||
else
|
||||
|
@ -53,6 +53,7 @@ extern int cold; /* nonzero if we are doing a cold boot */
|
||||
extern int suspend_blocked; /* block suspend due to pending shutdown */
|
||||
extern int rebooting; /* kern_reboot() has been called. */
|
||||
extern const char *panicstr; /* panic message */
|
||||
#define KERNEL_PANICKED() __predict_false(panicstr != NULL)
|
||||
extern char version[]; /* system version */
|
||||
extern char compiler_version[]; /* compiler version */
|
||||
extern char copyright[]; /* system copyright */
|
||||
|
@ -2076,7 +2076,7 @@ native_lapic_ipi_vectored(u_int vector, int dest)
|
||||
|
||||
/* Wait for an earlier IPI to finish. */
|
||||
if (!lapic_ipi_wait(BEFORE_SPIN)) {
|
||||
if (panicstr != NULL)
|
||||
if (KERNEL_PANICKED())
|
||||
return;
|
||||
else
|
||||
panic("APIC: Previous IPI is stuck");
|
||||
|
@ -1459,7 +1459,7 @@ cpustop_handler(void)
|
||||
* again, and might as well save power / release resources
|
||||
* (e.g., overprovisioned VM infrastructure).
|
||||
*/
|
||||
while (__predict_false(!IS_BSP() && panicstr != NULL))
|
||||
while (__predict_false(!IS_BSP() && KERNEL_PANICKED()))
|
||||
halt();
|
||||
}
|
||||
|
||||
@ -1672,7 +1672,7 @@ smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, pmap_t pmap,
|
||||
int cpu;
|
||||
|
||||
/* It is not necessary to signal other CPUs while in the debugger. */
|
||||
if (kdb_active || panicstr != NULL)
|
||||
if (kdb_active || KERNEL_PANICKED())
|
||||
return;
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user