freebsd-skq/sys/sys/eventhandler.h
pjd 27418a37e7 MFC r207920,r207934,r207936,r207937,r207970,r208142,r208147,r208148,r208166,
r208454,r208455,r208458:

r207920:

Back out r205134. It is not stable.

r207934:

Add missing new line characters to the warnings.

r207936:

Eventhough r203504 eliminates taste traffic provoked by vdev_geom.c,
ZFS still like to open all vdevs, close them and open them again,
which in turn provokes taste traffic anyway.

I don't know of any clean way to fix it, so do it the hard way - if we can't
open provider for writing just retry 5 times with 0.5 pauses. This should
elimitate accidental races caused by other classes tasting providers created on
top of our vdevs.

Reported by:	James R. Van Artsdalen <james-freebsd-fs2@jrv.org>
Reported by:	Yuri Pankov <yuri.pankov@gmail.com>

r207937:

I added vfs_lowvnodes event, but it was only used for a short while and now
it is totally unused. Remove it.

r207970:

When there is no memory or KVA, try to help by reclaiming some vnodes.
This helps with 'kmem_map too small' panics.

No objections from:	kib
Tested by:		Alexander V. Ribchansky <shurik@zk.informjust.ua>

r208142:

The whole point of having dedicated worker thread for each leaf VDEV was to
avoid calling zio_interrupt() from geom_up thread context. It turns out that
when provider is forcibly removed from the system and we kill worker thread
there can still be some ZIOs pending. To complete pending ZIOs when there is
no worker thread anymore we still have to call zio_interrupt() from geom_up
context. To avoid this race just remove use of worker threads altogether.
This should be more or less fine, because I also thought that zio_interrupt()
does more work, but it only makes small UMA allocation with M_WAITOK.
It also saves one context switch per I/O request.

PR:		kern/145339
Reported by:	Alex Bakhtin <Alex.Bakhtin@gmail.com>

r208147:

Add task structure to zio and use it instead of allocating one.
This eliminates the only place where we can sleep when calling zio_interrupt().
As a side-effect this can actually improve performance a little as we
allocate one less thing for every I/O.

Prodded by:	kib

r208148:

Allow to configure UMA usage for ZIO data via loader and turn it on by
default for amd64. On i386 I saw performance degradation when UMA was used,
but for amd64 it should help.

r208166:

Fix userland build by making io_task available only for the kernel and by
providing taskq_dispatch_safe() macro.

r208454:

Remove ZIO_USE_UMA from arc.c as well.

r208455:

ZIO_USE_UMA is no longer used.

r208458:

Create UMA zones unconditionally.
2010-05-24 10:09:36 +00:00

243 lines
8.3 KiB
C

/*-
* Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef SYS_EVENTHANDLER_H
#define SYS_EVENTHANDLER_H
#include <sys/lock.h>
#include <sys/ktr.h>
#include <sys/mutex.h>
#include <sys/queue.h>
struct eventhandler_entry {
TAILQ_ENTRY(eventhandler_entry) ee_link;
int ee_priority;
#define EHE_DEAD_PRIORITY (-1)
void *ee_arg;
};
#ifdef VIMAGE
struct eventhandler_entry_vimage {
void (* func)(void); /* Original function registered. */
void *ee_arg; /* Original argument registered. */
void *sparep[2];
};
#endif
struct eventhandler_list {
char *el_name;
int el_flags;
#define EHL_INITTED (1<<0)
u_int el_runcount;
struct mtx el_lock;
TAILQ_ENTRY(eventhandler_list) el_link;
TAILQ_HEAD(,eventhandler_entry) el_entries;
};
typedef struct eventhandler_entry *eventhandler_tag;
#define EHL_LOCK(p) mtx_lock(&(p)->el_lock)
#define EHL_UNLOCK(p) mtx_unlock(&(p)->el_lock)
#define EHL_LOCK_ASSERT(p, x) mtx_assert(&(p)->el_lock, x)
/*
* Macro to invoke the handlers for a given event.
*/
#define _EVENTHANDLER_INVOKE(name, list, ...) do { \
struct eventhandler_entry *_ep; \
struct eventhandler_entry_ ## name *_t; \
\
KASSERT((list)->el_flags & EHL_INITTED, \
("eventhandler_invoke: running non-inited list")); \
EHL_LOCK_ASSERT((list), MA_OWNED); \
(list)->el_runcount++; \
KASSERT((list)->el_runcount > 0, \
("eventhandler_invoke: runcount overflow")); \
CTR0(KTR_EVH, "eventhandler_invoke(\"" __STRING(name) "\")"); \
TAILQ_FOREACH(_ep, &((list)->el_entries), ee_link) { \
if (_ep->ee_priority != EHE_DEAD_PRIORITY) { \
EHL_UNLOCK((list)); \
_t = (struct eventhandler_entry_ ## name *)_ep; \
CTR1(KTR_EVH, "eventhandler_invoke: executing %p", \
(void *)_t->eh_func); \
_t->eh_func(_ep->ee_arg , ## __VA_ARGS__); \
EHL_LOCK((list)); \
} \
} \
KASSERT((list)->el_runcount > 0, \
("eventhandler_invoke: runcount underflow")); \
(list)->el_runcount--; \
if ((list)->el_runcount == 0) \
eventhandler_prune_list(list); \
EHL_UNLOCK((list)); \
} while (0)
/*
* Slow handlers are entirely dynamic; lists are created
* when entries are added to them, and thus have no concept of "owner",
*
* Slow handlers need to be declared, but do not need to be defined. The
* declaration must be in scope wherever the handler is to be invoked.
*/
#define EVENTHANDLER_DECLARE(name, type) \
struct eventhandler_entry_ ## name \
{ \
struct eventhandler_entry ee; \
type eh_func; \
}; \
struct __hack
#define EVENTHANDLER_DEFINE(name, func, arg, priority) \
static eventhandler_tag name ## _tag; \
static void name ## _evh_init(void *ctx) \
{ \
name ## _tag = EVENTHANDLER_REGISTER(name, func, ctx, \
priority); \
} \
SYSINIT(name ## _evh_init, SI_SUB_CONFIGURE, SI_ORDER_ANY, \
name ## _evh_init, arg); \
struct __hack
#define EVENTHANDLER_INVOKE(name, ...) \
do { \
struct eventhandler_list *_el; \
\
if ((_el = eventhandler_find_list(#name)) != NULL) \
_EVENTHANDLER_INVOKE(name, _el , ## __VA_ARGS__); \
} while (0)
#define EVENTHANDLER_REGISTER(name, func, arg, priority) \
eventhandler_register(NULL, #name, func, arg, priority)
#define EVENTHANDLER_DEREGISTER(name, tag) \
do { \
struct eventhandler_list *_el; \
\
if ((_el = eventhandler_find_list(#name)) != NULL) \
eventhandler_deregister(_el, tag); \
} while(0)
eventhandler_tag eventhandler_register(struct eventhandler_list *list,
const char *name, void *func, void *arg, int priority);
void eventhandler_deregister(struct eventhandler_list *list,
eventhandler_tag tag);
struct eventhandler_list *eventhandler_find_list(const char *name);
void eventhandler_prune_list(struct eventhandler_list *list);
#ifdef VIMAGE
typedef void (*vimage_iterator_func_t)(void *, ...);
eventhandler_tag vimage_eventhandler_register(struct eventhandler_list *list,
const char *name, void *func, void *arg, int priority,
vimage_iterator_func_t);
#endif
/*
* Standard system event queues.
*/
/* Generic priority levels */
#define EVENTHANDLER_PRI_FIRST 0
#define EVENTHANDLER_PRI_ANY 10000
#define EVENTHANDLER_PRI_LAST 20000
/* Shutdown events */
typedef void (*shutdown_fn)(void *, int);
#define SHUTDOWN_PRI_FIRST EVENTHANDLER_PRI_FIRST
#define SHUTDOWN_PRI_DEFAULT EVENTHANDLER_PRI_ANY
#define SHUTDOWN_PRI_LAST EVENTHANDLER_PRI_LAST
EVENTHANDLER_DECLARE(shutdown_pre_sync, shutdown_fn); /* before fs sync */
EVENTHANDLER_DECLARE(shutdown_post_sync, shutdown_fn); /* after fs sync */
EVENTHANDLER_DECLARE(shutdown_final, shutdown_fn);
/* Low memory event */
typedef void (*vm_lowmem_handler_t)(void *, int);
#define LOWMEM_PRI_DEFAULT EVENTHANDLER_PRI_FIRST
EVENTHANDLER_DECLARE(vm_lowmem, vm_lowmem_handler_t);
/* Root mounted event */
typedef void (*mountroot_handler_t)(void *);
EVENTHANDLER_DECLARE(mountroot, mountroot_handler_t);
/* VLAN state change events */
struct ifnet;
typedef void (*vlan_config_fn)(void *, struct ifnet *, uint16_t);
typedef void (*vlan_unconfig_fn)(void *, struct ifnet *, uint16_t);
EVENTHANDLER_DECLARE(vlan_config, vlan_config_fn);
EVENTHANDLER_DECLARE(vlan_unconfig, vlan_unconfig_fn);
/* BPF attach/detach events */
struct ifnet;
typedef void (*bpf_track_fn)(void *, struct ifnet *, int /* dlt */,
int /* 1 =>'s attach */);
EVENTHANDLER_DECLARE(bpf_track, bpf_track_fn);
/*
* Process events
* process_fork and exit handlers are called without Giant.
* exec handlers are called with Giant, but that is by accident.
*/
struct proc;
struct image_params;
typedef void (*exitlist_fn)(void *, struct proc *);
typedef void (*forklist_fn)(void *, struct proc *, struct proc *, int);
typedef void (*execlist_fn)(void *, struct proc *, struct image_params *);
typedef void (*proc_ctor_fn)(void *, struct proc *);
typedef void (*proc_dtor_fn)(void *, struct proc *);
typedef void (*proc_init_fn)(void *, struct proc *);
typedef void (*proc_fini_fn)(void *, struct proc *);
EVENTHANDLER_DECLARE(process_ctor, proc_ctor_fn);
EVENTHANDLER_DECLARE(process_dtor, proc_dtor_fn);
EVENTHANDLER_DECLARE(process_init, proc_init_fn);
EVENTHANDLER_DECLARE(process_fini, proc_fini_fn);
EVENTHANDLER_DECLARE(process_exit, exitlist_fn);
EVENTHANDLER_DECLARE(process_fork, forklist_fn);
EVENTHANDLER_DECLARE(process_exec, execlist_fn);
struct thread;
typedef void (*thread_ctor_fn)(void *, struct thread *);
typedef void (*thread_dtor_fn)(void *, struct thread *);
typedef void (*thread_fini_fn)(void *, struct thread *);
typedef void (*thread_init_fn)(void *, struct thread *);
EVENTHANDLER_DECLARE(thread_ctor, thread_ctor_fn);
EVENTHANDLER_DECLARE(thread_dtor, thread_dtor_fn);
EVENTHANDLER_DECLARE(thread_init, thread_init_fn);
EVENTHANDLER_DECLARE(thread_fini, thread_fini_fn);
typedef void (*uma_zone_chfn)(void *);
EVENTHANDLER_DECLARE(nmbclusters_change, uma_zone_chfn);
EVENTHANDLER_DECLARE(maxsockets_change, uma_zone_chfn);
typedef void(*schedtail_fn)(void *, struct proc *);
EVENTHANDLER_DECLARE(schedtail, schedtail_fn);
#endif /* SYS_EVENTHANDLER_H */