2005-01-06 23:35:40 +00:00
|
|
|
/*-
|
1998-10-29 01:48:36 +00:00
|
|
|
* Copyright 1998 Massachusetts Institute of Technology
|
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, and distribute this software and
|
|
|
|
* its documentation for any purpose and without fee is hereby
|
|
|
|
* granted, provided that both the above copyright notice and this
|
|
|
|
* permission notice appear in all copies, that both the above
|
|
|
|
* copyright notice and this permission notice appear in all
|
|
|
|
* supporting documentation, and that the name of M.I.T. not be used
|
|
|
|
* in advertising or publicity pertaining to distribution of the
|
|
|
|
* software without specific, written prior permission. M.I.T. makes
|
|
|
|
* no representations about the suitability of this software for any
|
|
|
|
* purpose. It is provided "as is" without express or implied
|
|
|
|
* warranty.
|
2005-11-17 08:56:21 +00:00
|
|
|
*
|
1998-10-29 01:48:36 +00:00
|
|
|
* THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
|
|
|
|
* ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
|
|
|
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
|
|
|
|
* SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
|
|
|
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
|
|
|
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
|
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
|
|
|
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The kernel resource manager. This code is responsible for keeping track
|
|
|
|
* of hardware resources which are apportioned out to various drivers.
|
|
|
|
* It does not actually assign those resources, and it is not expected
|
|
|
|
* that end-device drivers will call into this code directly. Rather,
|
|
|
|
* the code which implements the buses that those devices are attached to,
|
|
|
|
* and the code which manages CPU resources, will call this code, and the
|
|
|
|
* end-device drivers will make upcalls to that code to actually perform
|
|
|
|
* the allocation.
|
|
|
|
*
|
|
|
|
* There are two sorts of resources managed by this code. The first is
|
|
|
|
* the more familiar array (RMAN_ARRAY) type; resources in this class
|
|
|
|
* consist of a sequence of individually-allocatable objects which have
|
|
|
|
* been numbered in some well-defined order. Most of the resources
|
|
|
|
* are of this type, as it is the most familiar. The second type is
|
|
|
|
* called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
|
|
|
|
* resources in which each instance is indistinguishable from every
|
|
|
|
* other instance). The principal anticipated application of gauges
|
|
|
|
* is in the context of power consumption, where a bus may have a specific
|
|
|
|
* power budget which all attached devices share. RMAN_GAUGE is not
|
|
|
|
* implemented yet.
|
|
|
|
*
|
|
|
|
* For array resources, we make one simplifying assumption: two clients
|
|
|
|
* sharing the same resource must use the same range of indices. That
|
|
|
|
* is to say, sharing of overlapping-but-not-identical regions is not
|
|
|
|
* permitted.
|
|
|
|
*/
|
|
|
|
|
2007-04-16 21:09:03 +00:00
|
|
|
#include "opt_ddb.h"
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
1998-10-29 01:48:36 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
1998-11-23 09:33:35 +00:00
|
|
|
#include <sys/kernel.h>
|
2006-12-04 16:45:23 +00:00
|
|
|
#include <sys/limits.h>
|
1998-10-29 01:48:36 +00:00
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/malloc.h>
|
2001-01-24 12:35:55 +00:00
|
|
|
#include <sys/mutex.h>
|
1998-10-29 01:48:36 +00:00
|
|
|
#include <sys/bus.h> /* XXX debugging */
|
1999-04-16 21:22:55 +00:00
|
|
|
#include <machine/bus.h>
|
|
|
|
#include <sys/rman.h>
|
2002-09-05 11:45:02 +00:00
|
|
|
#include <sys/sysctl.h>
|
1998-10-29 01:48:36 +00:00
|
|
|
|
2007-04-16 21:09:03 +00:00
|
|
|
#ifdef DDB
|
|
|
|
#include <ddb/ddb.h>
|
|
|
|
#endif
|
|
|
|
|
2005-10-06 21:49:31 +00:00
|
|
|
/*
|
|
|
|
* We use a linked list rather than a bitmap because we need to be able to
|
|
|
|
* represent potentially huge objects (like all of a processor's physical
|
|
|
|
* address space). That is also why the indices are defined to have type
|
|
|
|
* `unsigned long' -- that being the largest integral type in ISO C (1990).
|
|
|
|
* The 1999 version of C allows `long long'; we may need to switch to that
|
|
|
|
* at some point in the future, particularly if we want to support 36-bit
|
|
|
|
* addresses on IA32 hardware.
|
|
|
|
*/
|
|
|
|
struct resource_i {
|
|
|
|
struct resource r_r;
|
|
|
|
TAILQ_ENTRY(resource_i) r_link;
|
|
|
|
LIST_ENTRY(resource_i) r_sharelink;
|
|
|
|
LIST_HEAD(, resource_i) *r_sharehead;
|
2016-01-27 02:23:54 +00:00
|
|
|
rman_res_t r_start; /* index of the first entry in this resource */
|
|
|
|
rman_res_t r_end; /* index of the last entry (inclusive) */
|
2005-10-06 21:49:31 +00:00
|
|
|
u_int r_flags;
|
|
|
|
void *r_virtual; /* virtual address of this resource */
|
2018-12-17 17:11:00 +00:00
|
|
|
void *r_irq_cookie; /* interrupt cookie for this (interrupt) resource */
|
2016-05-04 23:31:52 +00:00
|
|
|
device_t r_dev; /* device which has allocated this resource */
|
2014-07-07 22:02:39 +00:00
|
|
|
struct rman *r_rm; /* resource manager from whence this came */
|
2005-10-06 21:49:31 +00:00
|
|
|
int r_rid; /* optional rid for this resource. */
|
|
|
|
};
|
|
|
|
|
2014-07-07 22:02:39 +00:00
|
|
|
static int rman_debug = 0;
|
2014-06-28 03:56:17 +00:00
|
|
|
SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RWTUN,
|
2002-09-05 11:45:02 +00:00
|
|
|
&rman_debug, 0, "rman debug");
|
|
|
|
|
|
|
|
#define DPRINTF(params) if (rman_debug) printf params
|
2000-05-03 00:20:36 +00:00
|
|
|
|
1999-04-11 02:27:06 +00:00
|
|
|
static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
|
1998-10-29 01:48:36 +00:00
|
|
|
|
2014-07-07 22:02:39 +00:00
|
|
|
struct rman_head rman_head;
|
|
|
|
static struct mtx rman_mtx; /* mutex to protect rman_head */
|
|
|
|
static int int_rman_release_resource(struct rman *rm, struct resource_i *r);
|
2005-09-24 20:07:03 +00:00
|
|
|
|
|
|
|
static __inline struct resource_i *
|
|
|
|
int_alloc_resource(int malloc_flag)
|
|
|
|
{
|
|
|
|
struct resource_i *r;
|
|
|
|
|
|
|
|
r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
|
|
|
|
if (r != NULL) {
|
|
|
|
r->r_r.__r_i = r;
|
|
|
|
}
|
|
|
|
return (r);
|
|
|
|
}
|
1998-10-29 01:48:36 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
rman_init(struct rman *rm)
|
|
|
|
{
|
2005-11-17 08:56:21 +00:00
|
|
|
static int once = 0;
|
1998-10-29 01:48:36 +00:00
|
|
|
|
|
|
|
if (once == 0) {
|
|
|
|
once = 1;
|
|
|
|
TAILQ_INIT(&rman_head);
|
2002-04-04 21:03:38 +00:00
|
|
|
mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
|
1998-10-29 01:48:36 +00:00
|
|
|
}
|
|
|
|
|
2011-04-29 18:41:21 +00:00
|
|
|
if (rm->rm_start == 0 && rm->rm_end == 0)
|
2016-03-03 05:07:35 +00:00
|
|
|
rm->rm_end = ~0;
|
1998-10-29 01:48:36 +00:00
|
|
|
if (rm->rm_type == RMAN_UNINIT)
|
|
|
|
panic("rman_init");
|
|
|
|
if (rm->rm_type == RMAN_GAUGE)
|
|
|
|
panic("implement RMAN_GAUGE");
|
|
|
|
|
2000-11-14 20:46:02 +00:00
|
|
|
TAILQ_INIT(&rm->rm_list);
|
2001-10-10 20:43:50 +00:00
|
|
|
rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
|
2005-11-17 08:56:21 +00:00
|
|
|
if (rm->rm_mtx == NULL)
|
1998-10-29 01:48:36 +00:00
|
|
|
return ENOMEM;
|
2002-04-04 21:03:38 +00:00
|
|
|
mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
|
1998-10-29 01:48:36 +00:00
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&rman_mtx);
|
1998-10-29 01:48:36 +00:00
|
|
|
TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&rman_mtx);
|
1998-10-29 01:48:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2016-01-27 02:23:54 +00:00
|
|
|
rman_manage_region(struct rman *rm, rman_res_t start, rman_res_t end)
|
1998-10-29 01:48:36 +00:00
|
|
|
{
|
2006-09-11 19:31:52 +00:00
|
|
|
struct resource_i *r, *s, *t;
|
2012-05-31 17:27:05 +00:00
|
|
|
int rv = 0;
|
1998-10-29 01:48:36 +00:00
|
|
|
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
DPRINTF(("rman_manage_region: <%s> request: start %#jx, end %#jx\n",
|
2004-08-19 16:41:12 +00:00
|
|
|
rm->rm_descr, start, end));
|
2011-04-29 18:41:21 +00:00
|
|
|
if (start < rm->rm_start || end > rm->rm_end)
|
|
|
|
return EINVAL;
|
2005-09-24 20:07:03 +00:00
|
|
|
r = int_alloc_resource(M_NOWAIT);
|
2005-11-17 08:56:21 +00:00
|
|
|
if (r == NULL)
|
1998-10-29 01:48:36 +00:00
|
|
|
return ENOMEM;
|
|
|
|
r->r_start = start;
|
|
|
|
r->r_end = end;
|
|
|
|
r->r_rm = rm;
|
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(rm->rm_mtx);
|
2006-09-11 19:31:52 +00:00
|
|
|
|
|
|
|
/* Skip entries before us. */
|
2006-12-04 16:45:23 +00:00
|
|
|
TAILQ_FOREACH(s, &rm->rm_list, r_link) {
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
if (s->r_end == ~0)
|
2006-12-04 16:45:23 +00:00
|
|
|
break;
|
|
|
|
if (s->r_end + 1 >= r->r_start)
|
|
|
|
break;
|
|
|
|
}
|
1998-10-29 01:48:36 +00:00
|
|
|
|
2006-09-11 19:31:52 +00:00
|
|
|
/* If we ran off the end of the list, insert at the tail. */
|
2000-11-14 20:46:02 +00:00
|
|
|
if (s == NULL) {
|
|
|
|
TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
|
1998-10-29 01:48:36 +00:00
|
|
|
} else {
|
2006-09-11 19:31:52 +00:00
|
|
|
/* Check for any overlap with the current region. */
|
2012-05-31 17:27:05 +00:00
|
|
|
if (r->r_start <= s->r_end && r->r_end >= s->r_start) {
|
|
|
|
rv = EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
2006-09-11 19:31:52 +00:00
|
|
|
|
|
|
|
/* Check for any overlap with the next region. */
|
|
|
|
t = TAILQ_NEXT(s, r_link);
|
2012-05-31 17:27:05 +00:00
|
|
|
if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) {
|
|
|
|
rv = EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
2006-09-11 19:31:52 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* See if this region can be merged with the next region. If
|
|
|
|
* not, clear the pointer.
|
|
|
|
*/
|
|
|
|
if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
|
|
|
|
t = NULL;
|
|
|
|
|
|
|
|
/* See if we can merge with the current region. */
|
|
|
|
if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
|
|
|
|
/* Can we merge all 3 regions? */
|
|
|
|
if (t != NULL) {
|
|
|
|
s->r_end = t->r_end;
|
|
|
|
TAILQ_REMOVE(&rm->rm_list, t, r_link);
|
|
|
|
free(r, M_RMAN);
|
|
|
|
free(t, M_RMAN);
|
|
|
|
} else {
|
|
|
|
s->r_end = r->r_end;
|
|
|
|
free(r, M_RMAN);
|
|
|
|
}
|
2007-02-23 22:53:56 +00:00
|
|
|
} else if (t != NULL) {
|
2006-09-11 19:31:52 +00:00
|
|
|
/* Can we merge with just the next region? */
|
2007-02-23 22:53:56 +00:00
|
|
|
t->r_start = r->r_start;
|
|
|
|
free(r, M_RMAN);
|
|
|
|
} else if (s->r_end < r->r_start) {
|
|
|
|
TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
|
|
|
|
} else {
|
|
|
|
TAILQ_INSERT_BEFORE(s, r, r_link);
|
2006-09-11 19:31:52 +00:00
|
|
|
}
|
1998-10-29 01:48:36 +00:00
|
|
|
}
|
2012-05-31 17:27:05 +00:00
|
|
|
out:
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(rm->rm_mtx);
|
2012-05-31 17:27:05 +00:00
|
|
|
return rv;
|
1998-10-29 01:48:36 +00:00
|
|
|
}
|
|
|
|
|
2006-06-12 04:06:21 +00:00
|
|
|
int
|
|
|
|
rman_init_from_resource(struct rman *rm, struct resource *r)
|
|
|
|
{
|
|
|
|
int rv;
|
|
|
|
|
|
|
|
if ((rv = rman_init(rm)) != 0)
|
|
|
|
return (rv);
|
|
|
|
return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
|
|
|
|
}
|
|
|
|
|
1998-10-29 01:48:36 +00:00
|
|
|
int
|
|
|
|
rman_fini(struct rman *rm)
|
|
|
|
{
|
2005-09-24 20:07:03 +00:00
|
|
|
struct resource_i *r;
|
1998-10-29 01:48:36 +00:00
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(rm->rm_mtx);
|
2000-11-14 20:46:02 +00:00
|
|
|
TAILQ_FOREACH(r, &rm->rm_list, r_link) {
|
1999-04-16 21:22:55 +00:00
|
|
|
if (r->r_flags & RF_ALLOCATED) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(rm->rm_mtx);
|
1998-10-29 01:48:36 +00:00
|
|
|
return EBUSY;
|
1999-04-16 21:22:55 +00:00
|
|
|
}
|
1998-10-29 01:48:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There really should only be one of these if we are in this
|
|
|
|
* state and the code is working properly, but it can't hurt.
|
|
|
|
*/
|
2000-11-14 20:46:02 +00:00
|
|
|
while (!TAILQ_EMPTY(&rm->rm_list)) {
|
|
|
|
r = TAILQ_FIRST(&rm->rm_list);
|
|
|
|
TAILQ_REMOVE(&rm->rm_list, r, r_link);
|
1998-10-29 01:48:36 +00:00
|
|
|
free(r, M_RMAN);
|
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(rm->rm_mtx);
|
|
|
|
mtx_lock(&rman_mtx);
|
1998-10-29 01:48:36 +00:00
|
|
|
TAILQ_REMOVE(&rman_head, rm, rm_link);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&rman_mtx);
|
2001-01-24 12:35:55 +00:00
|
|
|
mtx_destroy(rm->rm_mtx);
|
|
|
|
free(rm->rm_mtx, M_RMAN);
|
1998-10-29 01:48:36 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-04-29 20:05:19 +00:00
|
|
|
int
|
2016-01-27 02:23:54 +00:00
|
|
|
rman_first_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
|
2011-04-29 20:05:19 +00:00
|
|
|
{
|
|
|
|
struct resource_i *r;
|
|
|
|
|
|
|
|
mtx_lock(rm->rm_mtx);
|
|
|
|
TAILQ_FOREACH(r, &rm->rm_list, r_link) {
|
|
|
|
if (!(r->r_flags & RF_ALLOCATED)) {
|
|
|
|
*start = r->r_start;
|
|
|
|
*end = r->r_end;
|
|
|
|
mtx_unlock(rm->rm_mtx);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mtx_unlock(rm->rm_mtx);
|
|
|
|
return (ENOENT);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2016-01-27 02:23:54 +00:00
|
|
|
rman_last_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
|
2011-04-29 20:05:19 +00:00
|
|
|
{
|
|
|
|
struct resource_i *r;
|
|
|
|
|
|
|
|
mtx_lock(rm->rm_mtx);
|
|
|
|
TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
|
|
|
|
if (!(r->r_flags & RF_ALLOCATED)) {
|
|
|
|
*start = r->r_start;
|
|
|
|
*end = r->r_end;
|
|
|
|
mtx_unlock(rm->rm_mtx);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mtx_unlock(rm->rm_mtx);
|
|
|
|
return (ENOENT);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Shrink or extend one or both ends of an allocated resource. */
|
|
|
|
int
|
2016-01-27 02:23:54 +00:00
|
|
|
rman_adjust_resource(struct resource *rr, rman_res_t start, rman_res_t end)
|
2011-04-29 20:05:19 +00:00
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
struct resource_i *r, *s, *t, *new;
|
|
|
|
struct rman *rm;
|
2011-04-29 20:05:19 +00:00
|
|
|
|
|
|
|
/* Not supported for shared resources. */
|
|
|
|
r = rr->__r_i;
|
2014-07-16 22:18:19 +00:00
|
|
|
if (r->r_flags & RF_SHAREABLE)
|
2011-04-29 20:05:19 +00:00
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This does not support wholesale moving of a resource. At
|
|
|
|
* least part of the desired new range must overlap with the
|
|
|
|
* existing resource.
|
|
|
|
*/
|
|
|
|
if (end < r->r_start || r->r_end < start)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the two resource regions immediately adjacent to the
|
|
|
|
* allocated resource.
|
|
|
|
*/
|
|
|
|
rm = r->r_rm;
|
|
|
|
mtx_lock(rm->rm_mtx);
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
TAILQ_FOREACH(s, &rm->rm_list, r_link) {
|
|
|
|
if (s == r)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (s == NULL)
|
|
|
|
panic("resource not in list");
|
|
|
|
#endif
|
|
|
|
s = TAILQ_PREV(r, resource_head, r_link);
|
|
|
|
t = TAILQ_NEXT(r, r_link);
|
|
|
|
KASSERT(s == NULL || s->r_end + 1 == r->r_start,
|
|
|
|
("prev resource mismatch"));
|
|
|
|
KASSERT(t == NULL || r->r_end + 1 == t->r_start,
|
|
|
|
("next resource mismatch"));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See if the changes are permitted. Shrinking is always allowed,
|
|
|
|
* but growing requires sufficient room in the adjacent region.
|
|
|
|
*/
|
|
|
|
if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
|
|
|
|
s->r_start > start)) {
|
|
|
|
mtx_unlock(rm->rm_mtx);
|
|
|
|
return (EBUSY);
|
|
|
|
}
|
|
|
|
if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
|
|
|
|
t->r_end < end)) {
|
|
|
|
mtx_unlock(rm->rm_mtx);
|
|
|
|
return (EBUSY);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* While holding the lock, grow either end of the resource as
|
|
|
|
* needed and shrink either end if the shrinking does not require
|
|
|
|
* allocating a new resource. We can safely drop the lock and then
|
|
|
|
* insert a new range to handle the shrinking case afterwards.
|
|
|
|
*/
|
|
|
|
if (start < r->r_start ||
|
|
|
|
(start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) {
|
|
|
|
KASSERT(s->r_flags == 0, ("prev is busy"));
|
|
|
|
r->r_start = start;
|
|
|
|
if (s->r_start == start) {
|
|
|
|
TAILQ_REMOVE(&rm->rm_list, s, r_link);
|
|
|
|
free(s, M_RMAN);
|
|
|
|
} else
|
|
|
|
s->r_end = start - 1;
|
|
|
|
}
|
|
|
|
if (end > r->r_end ||
|
|
|
|
(end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) {
|
|
|
|
KASSERT(t->r_flags == 0, ("next is busy"));
|
|
|
|
r->r_end = end;
|
|
|
|
if (t->r_end == end) {
|
|
|
|
TAILQ_REMOVE(&rm->rm_list, t, r_link);
|
|
|
|
free(t, M_RMAN);
|
|
|
|
} else
|
|
|
|
t->r_start = end + 1;
|
|
|
|
}
|
|
|
|
mtx_unlock(rm->rm_mtx);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle the shrinking cases that require allocating a new
|
|
|
|
* resource to hold the newly-free region. We have to recheck
|
|
|
|
* if we still need this new region after acquiring the lock.
|
|
|
|
*/
|
|
|
|
if (start > r->r_start) {
|
|
|
|
new = int_alloc_resource(M_WAITOK);
|
|
|
|
new->r_start = r->r_start;
|
|
|
|
new->r_end = start - 1;
|
|
|
|
new->r_rm = rm;
|
|
|
|
mtx_lock(rm->rm_mtx);
|
|
|
|
r->r_start = start;
|
|
|
|
s = TAILQ_PREV(r, resource_head, r_link);
|
|
|
|
if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
|
|
|
|
s->r_end = start - 1;
|
|
|
|
free(new, M_RMAN);
|
|
|
|
} else
|
|
|
|
TAILQ_INSERT_BEFORE(r, new, r_link);
|
|
|
|
mtx_unlock(rm->rm_mtx);
|
|
|
|
}
|
|
|
|
if (end < r->r_end) {
|
|
|
|
new = int_alloc_resource(M_WAITOK);
|
|
|
|
new->r_start = end + 1;
|
|
|
|
new->r_end = r->r_end;
|
|
|
|
new->r_rm = rm;
|
|
|
|
mtx_lock(rm->rm_mtx);
|
|
|
|
r->r_end = end;
|
|
|
|
t = TAILQ_NEXT(r, r_link);
|
|
|
|
if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
|
|
|
|
t->r_start = end + 1;
|
|
|
|
free(new, M_RMAN);
|
|
|
|
} else
|
|
|
|
TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
|
|
|
|
mtx_unlock(rm->rm_mtx);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2014-07-16 22:18:19 +00:00
|
|
|
#define SHARE_TYPE(f) (f & (RF_SHAREABLE | RF_PREFETCHABLE))
|
2014-05-28 16:57:17 +00:00
|
|
|
|
1998-10-29 01:48:36 +00:00
|
|
|
struct resource *
|
2016-01-27 02:23:54 +00:00
|
|
|
rman_reserve_resource_bound(struct rman *rm, rman_res_t start, rman_res_t end,
|
|
|
|
rman_res_t count, rman_res_t bound, u_int flags,
|
2016-05-04 23:31:52 +00:00
|
|
|
device_t dev)
|
1998-10-29 01:48:36 +00:00
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
u_int new_rflags;
|
|
|
|
struct resource_i *r, *s, *rv;
|
2016-01-27 02:23:54 +00:00
|
|
|
rman_res_t rstart, rend, amask, bmask;
|
1998-10-29 01:48:36 +00:00
|
|
|
|
2005-11-17 08:56:21 +00:00
|
|
|
rv = NULL;
|
1998-10-29 01:48:36 +00:00
|
|
|
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#jx, %#jx], "
|
|
|
|
"length %#jx, flags %x, device %s\n", rm->rm_descr, start, end,
|
2006-08-03 21:19:13 +00:00
|
|
|
count, flags,
|
|
|
|
dev == NULL ? "<null>" : device_get_nameunit(dev)));
|
2014-07-16 22:18:19 +00:00
|
|
|
KASSERT((flags & RF_FIRSTSHARE) == 0,
|
2014-05-28 16:57:17 +00:00
|
|
|
("invalid flags %#x", flags));
|
2014-07-16 22:18:19 +00:00
|
|
|
new_rflags = (flags & ~RF_FIRSTSHARE) | RF_ALLOCATED;
|
1998-10-29 01:48:36 +00:00
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(rm->rm_mtx);
|
1998-10-29 01:48:36 +00:00
|
|
|
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
r = TAILQ_FIRST(&rm->rm_list);
|
|
|
|
if (r == NULL) {
|
|
|
|
DPRINTF(("NULL list head\n"));
|
|
|
|
} else {
|
|
|
|
DPRINTF(("rman_reserve_resource_bound: trying %#jx <%#jx,%#jx>\n",
|
|
|
|
r->r_end, start, count-1));
|
|
|
|
}
|
2005-11-17 08:56:21 +00:00
|
|
|
for (r = TAILQ_FIRST(&rm->rm_list);
|
2014-05-05 15:59:31 +00:00
|
|
|
r && r->r_end < start + count - 1;
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
r = TAILQ_NEXT(r, r_link)) {
|
1998-10-29 01:48:36 +00:00
|
|
|
;
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
DPRINTF(("rman_reserve_resource_bound: tried %#jx <%#jx,%#jx>\n",
|
|
|
|
r->r_end, start, count-1));
|
|
|
|
}
|
1998-10-29 01:48:36 +00:00
|
|
|
|
2000-11-14 20:46:02 +00:00
|
|
|
if (r == NULL) {
|
2000-05-03 00:20:36 +00:00
|
|
|
DPRINTF(("could not find a region\n"));
|
1998-10-29 01:48:36 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
amask = (1ull << RF_ALIGNMENT(flags)) - 1;
|
|
|
|
KASSERT(start <= RM_MAX_END - amask,
|
|
|
|
("start (%#jx) + amask (%#jx) would wrap around", start, amask));
|
2014-05-05 15:59:31 +00:00
|
|
|
|
2001-12-21 21:40:55 +00:00
|
|
|
/* If bound is 0, bmask will also be 0 */
|
|
|
|
bmask = ~(bound - 1);
|
1998-10-29 01:48:36 +00:00
|
|
|
/*
|
|
|
|
* First try to find an acceptable totally-unshared region.
|
|
|
|
*/
|
2000-11-14 20:46:02 +00:00
|
|
|
for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
DPRINTF(("considering [%#jx, %#jx]\n", s->r_start, s->r_end));
|
2014-05-05 15:59:31 +00:00
|
|
|
/*
|
|
|
|
* The resource list is sorted, so there is no point in
|
|
|
|
* searching further once r_start is too large.
|
|
|
|
*/
|
|
|
|
if (s->r_start > end - (count - 1)) {
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
DPRINTF(("s->r_start (%#jx) + count - 1> end (%#jx)\n",
|
Sometimes, when asked to return region A..C, we'd return A+N..C+N
instead of failing.
When looking for a region to allocate, we used to check to see if the
start address was < end. In the case where A..B is allocated already,
and one wants to allocate A..C (B < C), then this test would
improperly fail (which means we'd examine that region as a possible
one), and we'd return the region B+1..C+(B-A+1) rather than NULL.
Since C+(B-A+1) is necessarily larger than C (end argument), this is
incorrect behavior for rman_reserve_resource_bound().
The fix is to exclude those regions where r->r_start + count - 1 > end
rather than r->r_start > end. This bug has been in this code for a
very long time. I believe that all other tests against end are
correctly done.
This is why sio0 generated a message about interrupts not being
enabled properly for the device. When fdc had a bug that allocated
from 0x3f7 to 0x3fb, sio0 was then given 0x3fc-0x404 rather than the
0x3f8-0x3ff that it wanted. Now when fdc has the same bug, sio0 fails
to allocate its ports, which is the proper behavior. Since the probe
failed, we never saw the messed up resources reported.
I suspect that there are other places in the tree that have weird
looping or other odd work arounds to try to cope with the observed
weirdness this bug can introduce. These workarounds should be located
and eliminated.
Minor debug write fix to match the above test done as well.
'nice' by: mdodd
Sponsored by: timing solutions (http://www.timing.com/)
2005-03-15 20:28:51 +00:00
|
|
|
s->r_start, end));
|
1998-10-29 01:48:36 +00:00
|
|
|
break;
|
|
|
|
}
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
if (s->r_start > RM_MAX_END - amask) {
|
|
|
|
DPRINTF(("s->r_start (%#jx) + amask (%#jx) too large\n",
|
2014-05-05 15:59:31 +00:00
|
|
|
s->r_start, amask));
|
|
|
|
break;
|
|
|
|
}
|
1998-10-29 01:48:36 +00:00
|
|
|
if (s->r_flags & RF_ALLOCATED) {
|
2000-05-03 00:20:36 +00:00
|
|
|
DPRINTF(("region is allocated\n"));
|
1998-10-29 01:48:36 +00:00
|
|
|
continue;
|
|
|
|
}
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
rstart = ummax(s->r_start, start);
|
2001-12-21 21:40:55 +00:00
|
|
|
/*
|
|
|
|
* Try to find a region by adjusting to boundary and alignment
|
|
|
|
* until both conditions are satisfied. This is not an optimal
|
|
|
|
* algorithm, but in most cases it isn't really bad, either.
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
rstart = (rstart + amask) & ~amask;
|
2003-01-21 17:02:21 +00:00
|
|
|
if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
|
2001-12-21 21:40:55 +00:00
|
|
|
rstart += bound - (rstart & ~bmask);
|
|
|
|
} while ((rstart & amask) != 0 && rstart < end &&
|
|
|
|
rstart < s->r_end);
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
rend = ummin(s->r_end, ummax(rstart + count - 1, end));
|
2002-08-29 12:39:21 +00:00
|
|
|
if (rstart > rend) {
|
|
|
|
DPRINTF(("adjusted start exceeds end\n"));
|
|
|
|
continue;
|
|
|
|
}
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
DPRINTF(("truncated region: [%#jx, %#jx]; size %#jx (requested %#jx)\n",
|
2000-05-03 00:20:36 +00:00
|
|
|
rstart, rend, (rend - rstart + 1), count));
|
1998-10-29 01:48:36 +00:00
|
|
|
|
|
|
|
if ((rend - rstart + 1) >= count) {
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
DPRINTF(("candidate region: [%#jx, %#jx], size %#jx\n",
|
2005-03-15 20:15:15 +00:00
|
|
|
rstart, rend, (rend - rstart + 1)));
|
1998-10-29 01:48:36 +00:00
|
|
|
if ((s->r_end - s->r_start + 1) == count) {
|
2000-05-03 00:20:36 +00:00
|
|
|
DPRINTF(("candidate region is entire chunk\n"));
|
1998-10-29 01:48:36 +00:00
|
|
|
rv = s;
|
2014-05-28 16:57:17 +00:00
|
|
|
rv->r_flags = new_rflags;
|
1998-10-29 01:48:36 +00:00
|
|
|
rv->r_dev = dev;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If s->r_start < rstart and
|
|
|
|
* s->r_end > rstart + count - 1, then
|
|
|
|
* we need to split the region into three pieces
|
|
|
|
* (the middle one will get returned to the user).
|
|
|
|
* Otherwise, we are allocating at either the
|
|
|
|
* beginning or the end of s, so we only need to
|
|
|
|
* split it in two. The first case requires
|
|
|
|
* two new allocations; the second requires but one.
|
|
|
|
*/
|
2005-09-24 20:07:03 +00:00
|
|
|
rv = int_alloc_resource(M_NOWAIT);
|
2005-11-17 08:56:21 +00:00
|
|
|
if (rv == NULL)
|
1998-10-29 01:48:36 +00:00
|
|
|
goto out;
|
|
|
|
rv->r_start = rstart;
|
|
|
|
rv->r_end = rstart + count - 1;
|
2014-05-28 16:57:17 +00:00
|
|
|
rv->r_flags = new_rflags;
|
1998-10-29 01:48:36 +00:00
|
|
|
rv->r_dev = dev;
|
1999-04-16 21:22:55 +00:00
|
|
|
rv->r_rm = rm;
|
2005-11-17 08:56:21 +00:00
|
|
|
|
1998-10-29 01:48:36 +00:00
|
|
|
if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
|
2000-05-03 00:20:36 +00:00
|
|
|
DPRINTF(("splitting region in three parts: "
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
"[%#jx, %#jx]; [%#jx, %#jx]; [%#jx, %#jx]\n",
|
1998-10-29 01:48:36 +00:00
|
|
|
s->r_start, rv->r_start - 1,
|
|
|
|
rv->r_start, rv->r_end,
|
2000-05-03 00:20:36 +00:00
|
|
|
rv->r_end + 1, s->r_end));
|
1998-10-29 01:48:36 +00:00
|
|
|
/*
|
|
|
|
* We are allocating in the middle.
|
|
|
|
*/
|
2005-09-24 20:07:03 +00:00
|
|
|
r = int_alloc_resource(M_NOWAIT);
|
2005-11-17 08:56:21 +00:00
|
|
|
if (r == NULL) {
|
1998-10-29 01:48:36 +00:00
|
|
|
free(rv, M_RMAN);
|
2005-11-17 08:56:21 +00:00
|
|
|
rv = NULL;
|
1998-10-29 01:48:36 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
r->r_start = rv->r_end + 1;
|
|
|
|
r->r_end = s->r_end;
|
|
|
|
r->r_flags = s->r_flags;
|
1999-04-16 21:22:55 +00:00
|
|
|
r->r_rm = rm;
|
1998-10-29 01:48:36 +00:00
|
|
|
s->r_end = rv->r_start - 1;
|
2000-11-14 20:46:02 +00:00
|
|
|
TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
|
1998-10-29 01:48:36 +00:00
|
|
|
r_link);
|
2000-11-14 20:46:02 +00:00
|
|
|
TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
|
1998-10-29 01:48:36 +00:00
|
|
|
r_link);
|
|
|
|
} else if (s->r_start == rv->r_start) {
|
2000-05-03 00:20:36 +00:00
|
|
|
DPRINTF(("allocating from the beginning\n"));
|
1998-10-29 01:48:36 +00:00
|
|
|
/*
|
|
|
|
* We are allocating at the beginning.
|
|
|
|
*/
|
|
|
|
s->r_start = rv->r_end + 1;
|
2000-11-14 20:46:02 +00:00
|
|
|
TAILQ_INSERT_BEFORE(s, rv, r_link);
|
1998-10-29 01:48:36 +00:00
|
|
|
} else {
|
2000-05-03 00:20:36 +00:00
|
|
|
DPRINTF(("allocating at the end\n"));
|
1998-10-29 01:48:36 +00:00
|
|
|
/*
|
|
|
|
* We are allocating at the end.
|
|
|
|
*/
|
|
|
|
s->r_end = rv->r_start - 1;
|
2000-11-14 20:46:02 +00:00
|
|
|
TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
|
1998-10-29 01:48:36 +00:00
|
|
|
r_link);
|
|
|
|
}
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now find an acceptable shared region, if the client's requirements
|
|
|
|
* allow sharing. By our implementation restriction, a candidate
|
|
|
|
* region must match exactly by both size and sharing type in order
|
|
|
|
* to be considered compatible with the client's request. (The
|
|
|
|
* former restriction could probably be lifted without too much
|
|
|
|
* additional work, but this does not seem warranted.)
|
|
|
|
*/
|
2000-05-03 00:20:36 +00:00
|
|
|
DPRINTF(("no unshared regions found\n"));
|
2014-07-16 22:18:19 +00:00
|
|
|
if ((flags & RF_SHAREABLE) == 0)
|
1998-10-29 01:48:36 +00:00
|
|
|
goto out;
|
|
|
|
|
2014-05-19 04:44:27 +00:00
|
|
|
for (s = r; s && s->r_end <= end; s = TAILQ_NEXT(s, r_link)) {
|
2014-05-28 16:57:17 +00:00
|
|
|
if (SHARE_TYPE(s->r_flags) == SHARE_TYPE(flags) &&
|
2014-05-19 04:44:27 +00:00
|
|
|
s->r_start >= start &&
|
|
|
|
(s->r_end - s->r_start + 1) == count &&
|
2001-12-21 21:40:55 +00:00
|
|
|
(s->r_start & amask) == 0 &&
|
|
|
|
((s->r_start ^ s->r_end) & bmask) == 0) {
|
2005-09-24 20:07:03 +00:00
|
|
|
rv = int_alloc_resource(M_NOWAIT);
|
2005-11-17 08:56:21 +00:00
|
|
|
if (rv == NULL)
|
1998-10-29 01:48:36 +00:00
|
|
|
goto out;
|
|
|
|
rv->r_start = s->r_start;
|
|
|
|
rv->r_end = s->r_end;
|
2014-05-28 16:57:17 +00:00
|
|
|
rv->r_flags = new_rflags;
|
1998-10-29 01:48:36 +00:00
|
|
|
rv->r_dev = dev;
|
|
|
|
rv->r_rm = rm;
|
2005-11-17 08:56:21 +00:00
|
|
|
if (s->r_sharehead == NULL) {
|
1998-10-29 01:48:36 +00:00
|
|
|
s->r_sharehead = malloc(sizeof *s->r_sharehead,
|
2000-12-08 21:51:06 +00:00
|
|
|
M_RMAN, M_NOWAIT | M_ZERO);
|
2005-11-17 08:56:21 +00:00
|
|
|
if (s->r_sharehead == NULL) {
|
1998-10-29 01:48:36 +00:00
|
|
|
free(rv, M_RMAN);
|
2005-11-17 08:56:21 +00:00
|
|
|
rv = NULL;
|
1998-10-29 01:48:36 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
LIST_INIT(s->r_sharehead);
|
2005-11-17 08:56:21 +00:00
|
|
|
LIST_INSERT_HEAD(s->r_sharehead, s,
|
1998-10-29 01:48:36 +00:00
|
|
|
r_sharelink);
|
1999-03-29 08:30:17 +00:00
|
|
|
s->r_flags |= RF_FIRSTSHARE;
|
1998-10-29 01:48:36 +00:00
|
|
|
}
|
|
|
|
rv->r_sharehead = s->r_sharehead;
|
|
|
|
LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* We couldn't find anything.
|
|
|
|
*/
|
2005-11-17 08:56:21 +00:00
|
|
|
|
2014-07-16 22:18:19 +00:00
|
|
|
out:
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(rm->rm_mtx);
|
2005-11-17 08:56:21 +00:00
|
|
|
return (rv == NULL ? NULL : &rv->r_r);
|
1998-10-29 01:48:36 +00:00
|
|
|
}
|
|
|
|
|
2001-12-21 21:40:55 +00:00
|
|
|
struct resource *
|
2016-01-27 02:23:54 +00:00
|
|
|
rman_reserve_resource(struct rman *rm, rman_res_t start, rman_res_t end,
|
2016-05-04 23:31:52 +00:00
|
|
|
rman_res_t count, u_int flags, device_t dev)
|
2001-12-21 21:40:55 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
|
|
|
|
dev));
|
|
|
|
}
|
|
|
|
|
1998-10-29 01:48:36 +00:00
|
|
|
int
|
2005-09-24 20:07:03 +00:00
|
|
|
rman_activate_resource(struct resource *re)
|
1998-10-29 01:48:36 +00:00
|
|
|
{
|
2014-07-16 22:18:19 +00:00
|
|
|
struct resource_i *r;
|
1998-10-29 01:48:36 +00:00
|
|
|
struct rman *rm;
|
|
|
|
|
2005-09-24 20:07:03 +00:00
|
|
|
r = re->__r_i;
|
1998-10-29 01:48:36 +00:00
|
|
|
rm = r->r_rm;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(rm->rm_mtx);
|
2014-07-16 22:18:19 +00:00
|
|
|
r->r_flags |= RF_ACTIVE;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(rm->rm_mtx);
|
1999-04-16 21:22:55 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rman_deactivate_resource(struct resource *r)
|
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
struct rman *rm;
|
1999-04-16 21:22:55 +00:00
|
|
|
|
2005-09-24 20:07:03 +00:00
|
|
|
rm = r->__r_i->r_rm;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(rm->rm_mtx);
|
2014-07-16 22:18:19 +00:00
|
|
|
r->__r_i->r_flags &= ~RF_ACTIVE;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(rm->rm_mtx);
|
1998-10-29 01:48:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2005-09-24 20:07:03 +00:00
|
|
|
int_rman_release_resource(struct rman *rm, struct resource_i *r)
|
1998-10-29 01:48:36 +00:00
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
struct resource_i *s, *t;
|
1998-10-29 01:48:36 +00:00
|
|
|
|
|
|
|
if (r->r_flags & RF_ACTIVE)
|
2014-07-16 22:18:19 +00:00
|
|
|
r->r_flags &= ~RF_ACTIVE;
|
1998-10-29 01:48:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for a sharing list first. If there is one, then we don't
|
|
|
|
* have to think as hard.
|
|
|
|
*/
|
|
|
|
if (r->r_sharehead) {
|
|
|
|
/*
|
|
|
|
* If a sharing list exists, then we know there are at
|
|
|
|
* least two sharers.
|
|
|
|
*
|
|
|
|
* If we are in the main circleq, appoint someone else.
|
|
|
|
*/
|
|
|
|
LIST_REMOVE(r, r_sharelink);
|
1999-11-16 16:28:58 +00:00
|
|
|
s = LIST_FIRST(r->r_sharehead);
|
1998-10-29 01:48:36 +00:00
|
|
|
if (r->r_flags & RF_FIRSTSHARE) {
|
|
|
|
s->r_flags |= RF_FIRSTSHARE;
|
2000-11-14 20:46:02 +00:00
|
|
|
TAILQ_INSERT_BEFORE(r, s, r_link);
|
|
|
|
TAILQ_REMOVE(&rm->rm_list, r, r_link);
|
1998-10-29 01:48:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure that the sharing list goes away completely
|
|
|
|
* if the resource is no longer being shared at all.
|
|
|
|
*/
|
2005-11-17 08:56:21 +00:00
|
|
|
if (LIST_NEXT(s, r_sharelink) == NULL) {
|
1998-10-29 01:48:36 +00:00
|
|
|
free(s->r_sharehead, M_RMAN);
|
2005-11-17 08:56:21 +00:00
|
|
|
s->r_sharehead = NULL;
|
1998-10-29 01:48:36 +00:00
|
|
|
s->r_flags &= ~RF_FIRSTSHARE;
|
|
|
|
}
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look at the adjacent resources in the list and see if our
|
2004-08-05 15:48:18 +00:00
|
|
|
* segment can be merged with any of them. If either of the
|
|
|
|
* resources is allocated or is not exactly adjacent then they
|
|
|
|
* cannot be merged with our segment.
|
1998-10-29 01:48:36 +00:00
|
|
|
*/
|
2000-11-14 20:46:02 +00:00
|
|
|
s = TAILQ_PREV(r, resource_head, r_link);
|
2004-08-05 15:48:18 +00:00
|
|
|
if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
|
|
|
|
s->r_end + 1 != r->r_start))
|
|
|
|
s = NULL;
|
2000-11-14 20:46:02 +00:00
|
|
|
t = TAILQ_NEXT(r, r_link);
|
2004-08-05 15:48:18 +00:00
|
|
|
if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
|
|
|
|
r->r_end + 1 != t->r_start))
|
|
|
|
t = NULL;
|
1998-10-29 01:48:36 +00:00
|
|
|
|
2004-08-05 15:48:18 +00:00
|
|
|
if (s != NULL && t != NULL) {
|
1998-10-29 01:48:36 +00:00
|
|
|
/*
|
|
|
|
* Merge all three segments.
|
|
|
|
*/
|
|
|
|
s->r_end = t->r_end;
|
2000-11-14 20:46:02 +00:00
|
|
|
TAILQ_REMOVE(&rm->rm_list, r, r_link);
|
|
|
|
TAILQ_REMOVE(&rm->rm_list, t, r_link);
|
1998-10-29 01:48:36 +00:00
|
|
|
free(t, M_RMAN);
|
2004-08-05 15:48:18 +00:00
|
|
|
} else if (s != NULL) {
|
1998-10-29 01:48:36 +00:00
|
|
|
/*
|
|
|
|
* Merge previous segment with ours.
|
|
|
|
*/
|
|
|
|
s->r_end = r->r_end;
|
2000-11-14 20:46:02 +00:00
|
|
|
TAILQ_REMOVE(&rm->rm_list, r, r_link);
|
2004-08-05 15:48:18 +00:00
|
|
|
} else if (t != NULL) {
|
1998-10-29 01:48:36 +00:00
|
|
|
/*
|
|
|
|
* Merge next segment with ours.
|
|
|
|
*/
|
|
|
|
t->r_start = r->r_start;
|
2000-11-14 20:46:02 +00:00
|
|
|
TAILQ_REMOVE(&rm->rm_list, r, r_link);
|
1998-10-29 01:48:36 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* At this point, we know there is nothing we
|
|
|
|
* can potentially merge with, because on each
|
|
|
|
* side, there is either nothing there or what is
|
|
|
|
* there is still allocated. In that case, we don't
|
|
|
|
* want to remove r from the list; we simply want to
|
|
|
|
* change it to an unallocated region and return
|
|
|
|
* without freeing anything.
|
|
|
|
*/
|
|
|
|
r->r_flags &= ~RF_ALLOCATED;
|
2011-06-06 13:12:56 +00:00
|
|
|
r->r_dev = NULL;
|
1998-10-29 01:48:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
free(r, M_RMAN);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2005-09-24 20:07:03 +00:00
|
|
|
rman_release_resource(struct resource *re)
|
1998-10-29 01:48:36 +00:00
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
int rv;
|
|
|
|
struct resource_i *r;
|
|
|
|
struct rman *rm;
|
1998-10-29 01:48:36 +00:00
|
|
|
|
2005-09-24 20:07:03 +00:00
|
|
|
r = re->__r_i;
|
|
|
|
rm = r->r_rm;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(rm->rm_mtx);
|
1998-10-29 01:48:36 +00:00
|
|
|
rv = int_rman_release_resource(rm, r);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(rm->rm_mtx);
|
1998-10-29 01:48:36 +00:00
|
|
|
return (rv);
|
|
|
|
}
|
2000-10-17 22:08:03 +00:00
|
|
|
|
|
|
|
uint32_t
|
|
|
|
rman_make_alignment_flags(uint32_t size)
|
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
int i;
|
2000-10-17 22:08:03 +00:00
|
|
|
|
2000-10-22 04:48:11 +00:00
|
|
|
/*
|
|
|
|
* Find the hightest bit set, and add one if more than one bit
|
|
|
|
* set. We're effectively computing the ceil(log2(size)) here.
|
|
|
|
*/
|
2001-12-21 21:40:55 +00:00
|
|
|
for (i = 31; i > 0; i--)
|
2000-10-22 04:48:11 +00:00
|
|
|
if ((1 << i) & size)
|
|
|
|
break;
|
|
|
|
if (~(1 << i) & size)
|
|
|
|
i++;
|
2000-10-17 22:08:03 +00:00
|
|
|
|
|
|
|
return(RF_ALIGNMENT_LOG2(i));
|
2000-10-22 04:48:11 +00:00
|
|
|
}
|
2002-11-27 03:55:22 +00:00
|
|
|
|
2008-08-25 16:16:57 +00:00
|
|
|
void
|
2016-01-27 02:23:54 +00:00
|
|
|
rman_set_start(struct resource *r, rman_res_t start)
|
2008-08-25 16:16:57 +00:00
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
|
2008-08-25 16:16:57 +00:00
|
|
|
r->__r_i->r_start = start;
|
|
|
|
}
|
|
|
|
|
2016-01-27 02:23:54 +00:00
|
|
|
rman_res_t
|
2002-11-27 03:55:22 +00:00
|
|
|
rman_get_start(struct resource *r)
|
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
|
2005-09-24 20:07:03 +00:00
|
|
|
return (r->__r_i->r_start);
|
2002-11-27 03:55:22 +00:00
|
|
|
}
|
|
|
|
|
2008-08-25 16:16:57 +00:00
|
|
|
void
|
2016-01-27 02:23:54 +00:00
|
|
|
rman_set_end(struct resource *r, rman_res_t end)
|
2008-08-25 16:16:57 +00:00
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
|
2008-08-25 16:16:57 +00:00
|
|
|
r->__r_i->r_end = end;
|
|
|
|
}
|
|
|
|
|
2016-01-27 02:23:54 +00:00
|
|
|
rman_res_t
|
2002-11-27 03:55:22 +00:00
|
|
|
rman_get_end(struct resource *r)
|
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
|
2005-09-24 20:07:03 +00:00
|
|
|
return (r->__r_i->r_end);
|
2002-11-27 03:55:22 +00:00
|
|
|
}
|
|
|
|
|
2016-01-27 02:23:54 +00:00
|
|
|
rman_res_t
|
2002-11-27 03:55:22 +00:00
|
|
|
rman_get_size(struct resource *r)
|
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
|
2005-09-24 20:07:03 +00:00
|
|
|
return (r->__r_i->r_end - r->__r_i->r_start + 1);
|
2002-11-27 03:55:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
u_int
|
|
|
|
rman_get_flags(struct resource *r)
|
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
|
2005-09-24 20:07:03 +00:00
|
|
|
return (r->__r_i->r_flags);
|
2002-11-27 03:55:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rman_set_virtual(struct resource *r, void *v)
|
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
|
2005-09-24 20:07:03 +00:00
|
|
|
r->__r_i->r_virtual = v;
|
2002-11-27 03:55:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
rman_get_virtual(struct resource *r)
|
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
|
2005-09-24 20:07:03 +00:00
|
|
|
return (r->__r_i->r_virtual);
|
2002-11-27 03:55:22 +00:00
|
|
|
}
|
|
|
|
|
2018-12-17 17:11:00 +00:00
|
|
|
void
|
|
|
|
rman_set_irq_cookie(struct resource *r, void *c)
|
|
|
|
{
|
|
|
|
|
|
|
|
r->__r_i->r_irq_cookie = c;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
rman_get_irq_cookie(struct resource *r)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (r->__r_i->r_irq_cookie);
|
|
|
|
}
|
|
|
|
|
2002-11-27 03:55:22 +00:00
|
|
|
void
|
|
|
|
rman_set_bustag(struct resource *r, bus_space_tag_t t)
|
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
|
2002-11-27 03:55:22 +00:00
|
|
|
r->r_bustag = t;
|
|
|
|
}
|
|
|
|
|
|
|
|
bus_space_tag_t
|
|
|
|
rman_get_bustag(struct resource *r)
|
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
|
2002-11-27 03:55:22 +00:00
|
|
|
return (r->r_bustag);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rman_set_bushandle(struct resource *r, bus_space_handle_t h)
|
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
|
2002-11-27 03:55:22 +00:00
|
|
|
r->r_bushandle = h;
|
|
|
|
}
|
|
|
|
|
|
|
|
bus_space_handle_t
|
|
|
|
rman_get_bushandle(struct resource *r)
|
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
|
2002-11-27 03:55:22 +00:00
|
|
|
return (r->r_bushandle);
|
|
|
|
}
|
|
|
|
|
2016-05-20 17:57:47 +00:00
|
|
|
void
|
|
|
|
rman_set_mapping(struct resource *r, struct resource_map *map)
|
|
|
|
{
|
|
|
|
|
|
|
|
KASSERT(rman_get_size(r) == map->r_size,
|
|
|
|
("rman_set_mapping: size mismatch"));
|
|
|
|
rman_set_bustag(r, map->r_bustag);
|
|
|
|
rman_set_bushandle(r, map->r_bushandle);
|
|
|
|
rman_set_virtual(r, map->r_vaddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rman_get_mapping(struct resource *r, struct resource_map *map)
|
|
|
|
{
|
|
|
|
|
|
|
|
map->r_bustag = rman_get_bustag(r);
|
|
|
|
map->r_bushandle = rman_get_bushandle(r);
|
|
|
|
map->r_size = rman_get_size(r);
|
|
|
|
map->r_vaddr = rman_get_virtual(r);
|
|
|
|
}
|
|
|
|
|
2002-11-27 03:55:22 +00:00
|
|
|
void
|
|
|
|
rman_set_rid(struct resource *r, int rid)
|
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
|
2005-09-24 20:07:03 +00:00
|
|
|
r->__r_i->r_rid = rid;
|
2002-11-27 03:55:22 +00:00
|
|
|
}
|
|
|
|
|
2008-08-25 16:16:57 +00:00
|
|
|
int
|
|
|
|
rman_get_rid(struct resource *r)
|
2004-07-01 16:22:10 +00:00
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
|
2008-08-25 16:16:57 +00:00
|
|
|
return (r->__r_i->r_rid);
|
2004-07-01 16:22:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-05-04 23:31:52 +00:00
|
|
|
rman_set_device(struct resource *r, device_t dev)
|
2002-11-27 03:55:22 +00:00
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
|
2008-08-25 16:16:57 +00:00
|
|
|
r->__r_i->r_dev = dev;
|
2002-11-27 03:55:22 +00:00
|
|
|
}
|
2003-02-12 07:00:59 +00:00
|
|
|
|
2016-05-04 23:31:52 +00:00
|
|
|
device_t
|
2003-02-12 07:00:59 +00:00
|
|
|
rman_get_device(struct resource *r)
|
|
|
|
{
|
2014-07-07 22:02:39 +00:00
|
|
|
|
2005-09-24 20:07:03 +00:00
|
|
|
return (r->__r_i->r_dev);
|
2003-02-12 07:00:59 +00:00
|
|
|
}
|
2005-03-24 18:13:11 +00:00
|
|
|
|
2005-09-25 20:10:10 +00:00
|
|
|
int
|
|
|
|
rman_is_region_manager(struct resource *r, struct rman *rm)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (r->__r_i->r_rm == rm);
|
|
|
|
}
|
|
|
|
|
2005-03-24 18:13:11 +00:00
|
|
|
/*
|
|
|
|
* Sysctl interface for scanning the resource lists.
|
|
|
|
*
|
|
|
|
* We take two input parameters; the index into the list of resource
|
|
|
|
* managers, and the resource offset into the list.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sysctl_rman(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
int *name = (int *)arg1;
|
|
|
|
u_int namelen = arg2;
|
|
|
|
int rman_idx, res_idx;
|
|
|
|
struct rman *rm;
|
2005-09-24 20:07:03 +00:00
|
|
|
struct resource_i *res;
|
2009-05-19 14:08:21 +00:00
|
|
|
struct resource_i *sres;
|
2005-03-24 18:13:11 +00:00
|
|
|
struct u_rman urm;
|
|
|
|
struct u_resource ures;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (namelen != 3)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
if (bus_data_generation_check(name[0]))
|
|
|
|
return (EINVAL);
|
|
|
|
rman_idx = name[1];
|
|
|
|
res_idx = name[2];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the indexed resource manager
|
|
|
|
*/
|
2005-11-17 08:56:21 +00:00
|
|
|
mtx_lock(&rman_mtx);
|
2005-03-24 18:13:11 +00:00
|
|
|
TAILQ_FOREACH(rm, &rman_head, rm_link) {
|
|
|
|
if (rman_idx-- == 0)
|
|
|
|
break;
|
|
|
|
}
|
2005-11-17 08:56:21 +00:00
|
|
|
mtx_unlock(&rman_mtx);
|
2005-03-24 18:13:11 +00:00
|
|
|
if (rm == NULL)
|
|
|
|
return (ENOENT);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the resource index is -1, we want details on the
|
|
|
|
* resource manager.
|
|
|
|
*/
|
|
|
|
if (res_idx == -1) {
|
2005-05-06 02:50:00 +00:00
|
|
|
bzero(&urm, sizeof(urm));
|
2005-03-24 18:13:11 +00:00
|
|
|
urm.rm_handle = (uintptr_t)rm;
|
2008-10-22 18:20:45 +00:00
|
|
|
if (rm->rm_descr != NULL)
|
|
|
|
strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
|
2005-03-24 18:13:11 +00:00
|
|
|
urm.rm_start = rm->rm_start;
|
|
|
|
urm.rm_size = rm->rm_end - rm->rm_start + 1;
|
|
|
|
urm.rm_type = rm->rm_type;
|
|
|
|
|
|
|
|
error = SYSCTL_OUT(req, &urm, sizeof(urm));
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the indexed resource and return it.
|
|
|
|
*/
|
2005-11-17 08:56:21 +00:00
|
|
|
mtx_lock(rm->rm_mtx);
|
2005-03-24 18:13:11 +00:00
|
|
|
TAILQ_FOREACH(res, &rm->rm_list, r_link) {
|
2009-05-19 14:08:21 +00:00
|
|
|
if (res->r_sharehead != NULL) {
|
|
|
|
LIST_FOREACH(sres, res->r_sharehead, r_sharelink)
|
|
|
|
if (res_idx-- == 0) {
|
|
|
|
res = sres;
|
|
|
|
goto found;
|
2005-03-24 18:13:11 +00:00
|
|
|
}
|
|
|
|
}
|
2009-05-19 14:08:21 +00:00
|
|
|
else if (res_idx-- == 0)
|
|
|
|
goto found;
|
2005-03-24 18:13:11 +00:00
|
|
|
}
|
2005-11-17 08:56:21 +00:00
|
|
|
mtx_unlock(rm->rm_mtx);
|
2005-03-24 18:13:11 +00:00
|
|
|
return (ENOENT);
|
2009-05-19 14:08:21 +00:00
|
|
|
|
|
|
|
found:
|
|
|
|
bzero(&ures, sizeof(ures));
|
|
|
|
ures.r_handle = (uintptr_t)res;
|
|
|
|
ures.r_parent = (uintptr_t)res->r_rm;
|
|
|
|
ures.r_device = (uintptr_t)res->r_dev;
|
|
|
|
if (res->r_dev != NULL) {
|
|
|
|
if (device_get_name(res->r_dev) != NULL) {
|
|
|
|
snprintf(ures.r_devname, RM_TEXTLEN,
|
|
|
|
"%s%d",
|
|
|
|
device_get_name(res->r_dev),
|
|
|
|
device_get_unit(res->r_dev));
|
|
|
|
} else {
|
|
|
|
strlcpy(ures.r_devname, "nomatch",
|
|
|
|
RM_TEXTLEN);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ures.r_devname[0] = '\0';
|
|
|
|
}
|
|
|
|
ures.r_start = res->r_start;
|
|
|
|
ures.r_size = res->r_end - res->r_start + 1;
|
|
|
|
ures.r_flags = res->r_flags;
|
|
|
|
|
|
|
|
mtx_unlock(rm->rm_mtx);
|
|
|
|
error = SYSCTL_OUT(req, &ures, sizeof(ures));
|
|
|
|
return (error);
|
2005-03-24 18:13:11 +00:00
|
|
|
}
|
|
|
|
|
2020-02-26 14:26:36 +00:00
|
|
|
static SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD | CTLFLAG_MPSAFE,
|
|
|
|
sysctl_rman,
|
2005-03-24 18:13:11 +00:00
|
|
|
"kernel resource manager");
|
2007-04-16 21:09:03 +00:00
|
|
|
|
|
|
|
#ifdef DDB
|
2011-04-13 19:10:56 +00:00
|
|
|
static void
|
|
|
|
dump_rman_header(struct rman *rm)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (db_pager_quit)
|
|
|
|
return;
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
db_printf("rman %p: %s (0x%jx-0x%jx full range)\n",
|
|
|
|
rm, rm->rm_descr, (rman_res_t)rm->rm_start, (rman_res_t)rm->rm_end);
|
2011-04-13 19:10:56 +00:00
|
|
|
}
|
|
|
|
|
2007-04-16 21:09:03 +00:00
|
|
|
static void
|
|
|
|
dump_rman(struct rman *rm)
|
|
|
|
{
|
|
|
|
struct resource_i *r;
|
|
|
|
const char *devname;
|
|
|
|
|
|
|
|
if (db_pager_quit)
|
|
|
|
return;
|
|
|
|
TAILQ_FOREACH(r, &rm->rm_list, r_link) {
|
|
|
|
if (r->r_dev != NULL) {
|
|
|
|
devname = device_get_nameunit(r->r_dev);
|
|
|
|
if (devname == NULL)
|
|
|
|
devname = "nomatch";
|
|
|
|
} else
|
|
|
|
devname = NULL;
|
Use uintmax_t (typedef'd to rman_res_t type) for rman ranges.
On some architectures, u_long isn't large enough for resource definitions.
Particularly, powerpc and arm allow 36-bit (or larger) physical addresses, but
type `long' is only 32-bit. This extends rman's resources to uintmax_t. With
this change, any resource can feasibly be placed anywhere in physical memory
(within the constraints of the driver).
Why uintmax_t and not something machine dependent, or uint64_t? Though it's
possible for uintmax_t to grow, it's highly unlikely it will become 128-bit on
32-bit architectures. 64-bit architectures should have plenty of RAM to absorb
the increase on resource sizes if and when this occurs, and the number of
resources on memory-constrained systems should be sufficiently small as to not
pose a drastic overhead. That being said, uintmax_t was chosen for source
clarity. If it's specified as uint64_t, all printf()-like calls would either
need casts to uintmax_t, or be littered with PRI*64 macros. Casts to uintmax_t
aren't horrible, but it would also bake into the API for
resource_list_print_type() either a hidden assumption that entries get cast to
uintmax_t for printing, or these calls would need the PRI*64 macros. Since
source code is meant to be read more often than written, I chose the clearest
path of simply using uintmax_t.
Tested on a PowerPC p5020-based board, which places all device resources in
0xfxxxxxxxx, and has 8GB RAM.
Regression tested on qemu-system-i386
Regression tested on qemu-system-mips (malta profile)
Tested PAE and devinfo on virtualbox (live CD)
Special thanks to bz for his testing on ARM.
Reviewed By: bz, jhb (previous)
Relnotes: Yes
Sponsored by: Alex Perez/Inertial Computing
Differential Revision: https://reviews.freebsd.org/D4544
2016-03-18 01:28:41 +00:00
|
|
|
db_printf(" 0x%jx-0x%jx (RID=%d) ",
|
2015-11-05 23:12:23 +00:00
|
|
|
r->r_start, r->r_end, r->r_rid);
|
2007-04-16 21:09:03 +00:00
|
|
|
if (devname != NULL)
|
|
|
|
db_printf("(%s)\n", devname);
|
|
|
|
else
|
|
|
|
db_printf("----\n");
|
|
|
|
if (db_pager_quit)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
DB_SHOW_COMMAND(rman, db_show_rman)
|
|
|
|
{
|
|
|
|
|
2011-04-13 19:10:56 +00:00
|
|
|
if (have_addr) {
|
|
|
|
dump_rman_header((struct rman *)addr);
|
2007-04-16 21:09:03 +00:00
|
|
|
dump_rman((struct rman *)addr);
|
2011-04-13 19:10:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
DB_SHOW_COMMAND(rmans, db_show_rmans)
|
|
|
|
{
|
|
|
|
struct rman *rm;
|
|
|
|
|
|
|
|
TAILQ_FOREACH(rm, &rman_head, rm_link) {
|
|
|
|
dump_rman_header(rm);
|
|
|
|
}
|
2007-04-16 21:09:03 +00:00
|
|
|
}
|
|
|
|
|
2008-09-15 22:45:14 +00:00
|
|
|
DB_SHOW_ALL_COMMAND(rman, db_show_all_rman)
|
2007-04-16 21:09:03 +00:00
|
|
|
{
|
|
|
|
struct rman *rm;
|
|
|
|
|
2011-04-13 19:10:56 +00:00
|
|
|
TAILQ_FOREACH(rm, &rman_head, rm_link) {
|
|
|
|
dump_rman_header(rm);
|
2007-04-16 21:09:03 +00:00
|
|
|
dump_rman(rm);
|
2011-04-13 19:10:56 +00:00
|
|
|
}
|
2007-04-16 21:09:03 +00:00
|
|
|
}
|
2008-09-15 22:45:14 +00:00
|
|
|
DB_SHOW_ALIAS(allrman, db_show_all_rman);
|
2007-04-16 21:09:03 +00:00
|
|
|
#endif
|