MFC r266814
Initialize r_flags the same way in all cases using a sanitized copy of flags that has several bits cleared. The RF_WANTED and RF_FIRSTSHARE bits are invalid in this context, and we want to defer setting RF_ACTIVE in r_flags until later. This should make rman_get_flags() return the correct answer in all cases. Add a KASSERT() to catch callers which incorrectly pass the RF_WANTED or RF_FIRSTSHARE flags. Do a strict equality check on the share type bits of flags. In particular, do an equality check on RF_PREFETCHABLE. The previous code would allow one type of mismatch of RF_PREFETCHABLE but disallow the other type of mismatch. Also, ignore the the RF_ALIGNMENT_MASK bits since alignment validity should be handled by the amask check. This field contains an integer value, but previous code did a strange bitwise comparison on it. Leave the original value of flags unmolested as a minor debug aid. Change the start+amask overflow check to a KASSERT() since it is just meant to catch a highly unlikely programming error in the caller. Reviewed by: jhb
This commit is contained in:
parent
a33844e926
commit
eca942517b
@ -435,12 +435,14 @@ rman_adjust_resource(struct resource *rr, u_long start, u_long end)
|
||||
return (0);
|
||||
}
|
||||
|
||||
#define SHARE_TYPE(f) (f & (RF_SHAREABLE | RF_TIMESHARE | RF_PREFETCHABLE))
|
||||
|
||||
struct resource *
|
||||
rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
|
||||
u_long count, u_long bound, u_int flags,
|
||||
struct device *dev)
|
||||
{
|
||||
u_int want_activate;
|
||||
u_int new_rflags;
|
||||
struct resource_i *r, *s, *rv;
|
||||
u_long rstart, rend, amask, bmask;
|
||||
|
||||
@ -450,8 +452,10 @@ rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
|
||||
"length %#lx, flags %u, device %s\n", rm->rm_descr, start, end,
|
||||
count, flags,
|
||||
dev == NULL ? "<null>" : device_get_nameunit(dev)));
|
||||
want_activate = (flags & RF_ACTIVE);
|
||||
flags &= ~RF_ACTIVE;
|
||||
KASSERT((flags & (RF_WANTED | RF_FIRSTSHARE)) == 0,
|
||||
("invalid flags %#x", flags));
|
||||
new_rflags = (flags & ~(RF_ACTIVE | RF_WANTED | RF_FIRSTSHARE)) |
|
||||
RF_ALLOCATED;
|
||||
|
||||
mtx_lock(rm->rm_mtx);
|
||||
|
||||
@ -466,10 +470,8 @@ rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
|
||||
}
|
||||
|
||||
amask = (1ul << RF_ALIGNMENT(flags)) - 1;
|
||||
if (start > ULONG_MAX - amask) {
|
||||
DPRINTF(("start+amask would wrap around\n"));
|
||||
goto out;
|
||||
}
|
||||
KASSERT(start <= ULONG_MAX - amask,
|
||||
("start (%#lx) + amask (%#lx) would wrap around", start, amask));
|
||||
|
||||
/* If bound is 0, bmask will also be 0 */
|
||||
bmask = ~(bound - 1);
|
||||
@ -522,7 +524,7 @@ rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
|
||||
if ((s->r_end - s->r_start + 1) == count) {
|
||||
DPRINTF(("candidate region is entire chunk\n"));
|
||||
rv = s;
|
||||
rv->r_flags |= RF_ALLOCATED | flags;
|
||||
rv->r_flags = new_rflags;
|
||||
rv->r_dev = dev;
|
||||
goto out;
|
||||
}
|
||||
@ -542,7 +544,7 @@ rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
|
||||
goto out;
|
||||
rv->r_start = rstart;
|
||||
rv->r_end = rstart + count - 1;
|
||||
rv->r_flags = flags | RF_ALLOCATED;
|
||||
rv->r_flags = new_rflags;
|
||||
rv->r_dev = dev;
|
||||
rv->r_rm = rm;
|
||||
|
||||
@ -603,7 +605,7 @@ rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
|
||||
goto out;
|
||||
|
||||
for (s = r; s && s->r_end <= end; s = TAILQ_NEXT(s, r_link)) {
|
||||
if ((s->r_flags & flags) == flags &&
|
||||
if (SHARE_TYPE(s->r_flags) == SHARE_TYPE(flags) &&
|
||||
s->r_start >= start &&
|
||||
(s->r_end - s->r_start + 1) == count &&
|
||||
(s->r_start & amask) == 0 &&
|
||||
@ -613,8 +615,7 @@ rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
|
||||
goto out;
|
||||
rv->r_start = s->r_start;
|
||||
rv->r_end = s->r_end;
|
||||
rv->r_flags = s->r_flags &
|
||||
(RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
|
||||
rv->r_flags = new_rflags;
|
||||
rv->r_dev = dev;
|
||||
rv->r_rm = rm;
|
||||
if (s->r_sharehead == NULL) {
|
||||
@ -641,13 +642,12 @@ rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
|
||||
*/
|
||||
out:
|
||||
/*
|
||||
* If the user specified RF_ACTIVE in the initial flags,
|
||||
* which is reflected in `want_activate', we attempt to atomically
|
||||
* If the user specified RF_ACTIVE in flags, we attempt to atomically
|
||||
* activate the resource. If this fails, we release the resource
|
||||
* and indicate overall failure. (This behavior probably doesn't
|
||||
* make sense for RF_TIMESHARE-type resources.)
|
||||
*/
|
||||
if (rv && want_activate) {
|
||||
if (rv && (flags & RF_ACTIVE) != 0) {
|
||||
struct resource_i *whohas;
|
||||
if (int_rman_activate_resource(rm, rv, &whohas)) {
|
||||
int_rman_release_resource(rm, rv);
|
||||
|
Loading…
x
Reference in New Issue
Block a user