lookup hard interrupt events by number. Ignore the irq# for soft intrs. - Add support to cpuset for binding hardware interrupts. This has the side effect of binding any ithread associated with the hard interrupt. As per restrictions imposed by MD code we can only bind interrupts to a single cpu presently. Interrupts can be 'unbound' by binding them to all cpus. Reviewed by: jhb Sponsored by: Nokia
187 lines
5.7 KiB
C
187 lines
5.7 KiB
C
/*-
|
|
* Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org>
|
|
* All rights reserved.
|
|
*
|
|
* Copyright (c) 2008 Nokia Corporation
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice unmodified, this list of conditions, and the following
|
|
* disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*
|
|
* $FreeBSD$
|
|
*/
|
|
|
|
#ifndef _SYS_CPUSET_H_
|
|
#define _SYS_CPUSET_H_
|
|
|
|
#ifdef _KERNEL
|
|
#define CPU_SETSIZE MAXCPU
|
|
#endif
|
|
|
|
#define CPU_MAXSIZE 128
|
|
|
|
#ifndef CPU_SETSIZE
|
|
#define CPU_SETSIZE CPU_MAXSIZE
|
|
#endif
|
|
|
|
#define _NCPUBITS (sizeof(long) * NBBY) /* bits per mask */
|
|
#define _NCPUWORDS howmany(CPU_SETSIZE, _NCPUBITS)
|
|
|
|
typedef struct _cpuset {
|
|
long __bits[howmany(CPU_SETSIZE, _NCPUBITS)];
|
|
} cpuset_t;
|
|
|
|
#define __cpuset_mask(n) ((long)1 << ((n) % _NCPUBITS))
|
|
#define CPU_CLR(n, p) ((p)->__bits[(n)/_NCPUBITS] &= ~__cpuset_mask(n))
|
|
#define CPU_COPY(f, t) (void)(*(t) = *(f))
|
|
#define CPU_ISSET(n, p) (((p)->__bits[(n)/_NCPUBITS] & __cpuset_mask(n)) != 0)
|
|
#define CPU_SET(n, p) ((p)->__bits[(n)/_NCPUBITS] |= __cpuset_mask(n))
|
|
#define CPU_ZERO(p) do { \
|
|
__size_t __i; \
|
|
for (__i = 0; __i < _NCPUWORDS; __i++) \
|
|
(p)->__bits[__i] = 0; \
|
|
} while (0)
|
|
|
|
/* Is p empty. */
|
|
#define CPU_EMPTY(p) __extension__ ({ \
|
|
__size_t __i; \
|
|
for (__i = 0; __i < _NCPUWORDS; __i++) \
|
|
if ((p)->__bits[__i]) \
|
|
break; \
|
|
__i == _NCPUWORDS; \
|
|
})
|
|
|
|
/* Is c a subset of p. */
|
|
#define CPU_SUBSET(p, c) __extension__ ({ \
|
|
__size_t __i; \
|
|
for (__i = 0; __i < _NCPUWORDS; __i++) \
|
|
if (((c)->__bits[__i] & \
|
|
(p)->__bits[__i]) != \
|
|
(c)->__bits[__i]) \
|
|
break; \
|
|
__i == _NCPUWORDS; \
|
|
})
|
|
|
|
/* Are there any common bits between b & c? */
|
|
#define CPU_OVERLAP(p, c) __extension__ ({ \
|
|
__size_t __i; \
|
|
for (__i = 0; __i < _NCPUWORDS; __i++) \
|
|
if (((c)->__bits[__i] & \
|
|
(p)->__bits[__i]) != 0) \
|
|
break; \
|
|
__i != _NCPUWORDS; \
|
|
})
|
|
|
|
/* Compare two sets, returns 0 if equal 1 otherwise. */
|
|
#define CPU_CMP(p, c) __extension__ ({ \
|
|
__size_t __i; \
|
|
for (__i = 0; __i < _NCPUWORDS; __i++) \
|
|
if (((c)->__bits[__i] != \
|
|
(p)->__bits[__i])) \
|
|
break; \
|
|
__i != _NCPUWORDS; \
|
|
})
|
|
|
|
#define CPU_OR(d, s) do { \
|
|
__size_t __i; \
|
|
for (__i = 0; __i < _NCPUWORDS; __i++) \
|
|
(d)->__bits[__i] |= (s)->__bits[__i]; \
|
|
} while (0)
|
|
|
|
#define CPU_AND(d, s) do { \
|
|
__size_t __i; \
|
|
for (__i = 0; __i < _NCPUWORDS; __i++) \
|
|
(d)->__bits[__i] &= (s)->__bits[__i]; \
|
|
} while (0)
|
|
|
|
#define CPU_NAND(d, s) do { \
|
|
__size_t __i; \
|
|
for (__i = 0; __i < _NCPUWORDS; __i++) \
|
|
(d)->__bits[__i] &= ~(s)->__bits[__i]; \
|
|
} while (0)
|
|
|
|
/*
|
|
* Valid cpulevel_t values.
|
|
*/
|
|
#define CPU_LEVEL_ROOT 1 /* All system cpus. */
|
|
#define CPU_LEVEL_CPUSET 2 /* Available cpus for which. */
|
|
#define CPU_LEVEL_WHICH 3 /* Actual mask/id for which. */
|
|
|
|
/*
|
|
* Valid cpuwhich_t values.
|
|
*/
|
|
#define CPU_WHICH_TID 1 /* Specifies a thread id. */
|
|
#define CPU_WHICH_PID 2 /* Specifies a process id. */
|
|
#define CPU_WHICH_CPUSET 3 /* Specifies a set id. */
|
|
#define CPU_WHICH_IRQ 4 /* Specifies an irq #. */
|
|
|
|
/*
|
|
* Reserved cpuset identifiers.
|
|
*/
|
|
#define CPUSET_INVALID -1
|
|
#define CPUSET_DEFAULT 0
|
|
|
|
#ifdef _KERNEL
|
|
LIST_HEAD(setlist, cpuset);
|
|
|
|
/*
|
|
* cpusets encapsulate cpu binding information for one or more threads.
|
|
*
|
|
* a - Accessed with atomics.
|
|
* s - Set at creation, never modified. Only a ref required to read.
|
|
* c - Locked internally by a cpuset lock.
|
|
*
|
|
* The bitmask is only modified while holding the cpuset lock. It may be
|
|
* read while only a reference is held but the consumer must be prepared
|
|
* to deal with inconsistent results.
|
|
*/
|
|
struct cpuset {
|
|
cpuset_t cs_mask; /* bitmask of valid cpus. */
|
|
volatile u_int cs_ref; /* (a) Reference count. */
|
|
int cs_flags; /* (s) Flags from below. */
|
|
cpusetid_t cs_id; /* (s) Id or INVALID. */
|
|
struct cpuset *cs_parent; /* (s) Pointer to our parent. */
|
|
LIST_ENTRY(cpuset) cs_link; /* (c) All identified sets. */
|
|
LIST_ENTRY(cpuset) cs_siblings; /* (c) Sibling set link. */
|
|
struct setlist cs_children; /* (c) List of children. */
|
|
};
|
|
|
|
#define CPU_SET_ROOT 0x0001 /* Set is a root set. */
|
|
#define CPU_SET_RDONLY 0x0002 /* No modification allowed. */
|
|
|
|
extern cpuset_t *cpuset_root;
|
|
|
|
struct cpuset *cpuset_thread0(void);
|
|
struct cpuset *cpuset_ref(struct cpuset *);
|
|
void cpuset_rel(struct cpuset *);
|
|
int cpuset_setthread(lwpid_t id, cpuset_t *);
|
|
|
|
#else
|
|
__BEGIN_DECLS
|
|
int cpuset(cpusetid_t *);
|
|
int cpuset_setid(cpuwhich_t, id_t, cpusetid_t);
|
|
int cpuset_getid(cpulevel_t, cpuwhich_t, id_t, cpusetid_t *);
|
|
int cpuset_getaffinity(cpulevel_t, cpuwhich_t, id_t, size_t, cpuset_t *);
|
|
int cpuset_setaffinity(cpulevel_t, cpuwhich_t, id_t, size_t, const cpuset_t *);
|
|
__END_DECLS
|
|
#endif
|
|
#endif /* !_SYS_CPUSET_H_ */
|