From 324e9ed2a4009953fc622809bb021d278af57666 Mon Sep 17 00:00:00 2001
From: Bruce Evans <bde@FreeBSD.org>
Date: Sat, 27 Jan 1996 00:13:33 +0000
Subject: [PATCH] Added a `boundary' arg to vm_alloc_page_contig().  Previously
 the only way to avoid crossing a 64K DMA boundary was to specify an alignment
 greater than the size even when the alignment didn't matter, and for sizes
 larger than a page, this reduced the chance of finding enough contiguous
 pages.  E.g., allocations of 8K not crossing a 64K boundary previously had to
 be allocated on 8K boundaries; now they can be allocated on any 4K boundary
 except (64 * n + 60)K.

Fixed bugs in vm_alloc_page_contig():
- the last page wasn't allocated for sizes smaller than a page.
- failures of kmem_alloc_pageable() weren't handled.

Mutated vm_page_alloc_contig() to create a more convenient interface
named contigmalloc().  This is the same as the one in 1.1.5 except
it has `low' and `high' args, and the `alignment' and `boundary'
args are multipliers instead of masks.
---
 sys/sys/malloc.h | 11 +++++++---
 sys/vm/vm_page.c | 52 ++++++++++++++++++++++++++++++++++++++----------
 2 files changed, 50 insertions(+), 13 deletions(-)

diff --git a/sys/sys/malloc.h b/sys/sys/malloc.h
index f69d49a491df..5ef233e548b5 100644
--- a/sys/sys/malloc.h
+++ b/sys/sys/malloc.h
@@ -31,7 +31,7 @@
  * SUCH DAMAGE.
  *
  *	@(#)malloc.h	8.3 (Berkeley) 1/12/94
- * $Id: malloc.h,v 1.9 1995/09/14 16:25:06 wollman Exp $
+ * $Id: malloc.h,v 1.10 1995/12/02 20:40:12 phk Exp $
  */
 
 #ifndef _SYS_MALLOC_H_
@@ -328,7 +328,12 @@ extern struct kmemstats kmemstats[];
 extern struct kmemusage *kmemusage;
 extern char *kmembase;
 extern struct kmembuckets bucket[];
-extern void *malloc __P((unsigned long size, int type, int flags));
-extern void free __P((void *addr, int type));
+
+void	*contigmalloc __P((unsigned long size, int type, int flags,
+			   unsigned long low, unsigned long high,
+			   unsigned long alignment, unsigned long boundary));
+void	free __P((void *addr, int type));
+void	*malloc __P((unsigned long size, int type, int flags));
 #endif /* KERNEL */
+
 #endif /* !_SYS_MALLOC_H_ */
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 288f1406fb9b..7ef6e9f4cdc6 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
  * SUCH DAMAGE.
  *
  *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
- *	$Id: vm_page.c,v 1.45 1996/01/04 21:13:23 wollman Exp $
+ *	$Id: vm_page.c,v 1.46 1996/01/19 04:00:10 dyson Exp $
  */
 
 /*
@@ -71,6 +71,7 @@
 
 #include <sys/param.h>
 #include <sys/systm.h>
+#include <sys/malloc.h>
 #include <sys/proc.h>
 #include <sys/queue.h>
 #include <sys/vmmeter.h>
@@ -686,31 +687,47 @@ vm_page_alloc(object, pindex, page_req)
 	return (m);
 }
 
-vm_offset_t
-vm_page_alloc_contig(size, low, high, alignment)
-	vm_offset_t size;
-	vm_offset_t low;
-	vm_offset_t high;
-	vm_offset_t alignment;
+/*
+ * This interface is for merging with malloc() someday.
+ * Even if we never implement compaction so that contiguous allocation
+ * works after initialization time, malloc()'s data structures are good
+ * for statistics and for allocations of less than a page.
+ */
+void *
+contigmalloc(size, type, flags, low, high, alignment, boundary)
+	unsigned long size;	/* should be size_t here and for malloc() */
+	int type;
+	int flags;
+	unsigned long low;
+	unsigned long high;
+	unsigned long alignment;
+	unsigned long boundary;
 {
 	int i, s, start;
 	vm_offset_t addr, phys, tmp_addr;
 	vm_page_t pga = vm_page_array;
 
+	size = round_page(size);
+	if (size == 0)
+		panic("vm_page_alloc_contig: size must not be 0");
 	if ((alignment & (alignment - 1)) != 0)
 		panic("vm_page_alloc_contig: alignment must be a power of 2");
+	if ((boundary & (boundary - 1)) != 0)
+		panic("vm_page_alloc_contig: boundary must be a power of 2");
 
 	start = 0;
 	s = splhigh();
 again:
 	/*
-	 * Find first page in array that is free, within range, and aligned.
+	 * Find first page in array that is free, within range, aligned, and
+	 * such that the boundary won't be crossed.
 	 */
 	for (i = start; i < cnt.v_page_count; i++) {
 		phys = VM_PAGE_TO_PHYS(&pga[i]);
 		if ((pga[i].queue == PQ_FREE) &&
 		    (phys >= low) && (phys < high) &&
-		    ((phys & (alignment - 1)) == 0))
+		    ((phys & (alignment - 1)) == 0) &&
+		    (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0))
 			break;
 	}
 
@@ -742,6 +759,10 @@ vm_page_alloc_contig(size, low, high, alignment)
 	 * return kernel VM pointer.
 	 */
 	tmp_addr = addr = kmem_alloc_pageable(kernel_map, size);
+	if (addr == 0) {
+		splx(s);
+		return (NULL);
+	}
 
 	for (i = start; i < (start + size / PAGE_SIZE); i++) {
 		vm_page_t m = &pga[i];
@@ -763,7 +784,18 @@ vm_page_alloc_contig(size, low, high, alignment)
 	}
 
 	splx(s);
-	return (addr);
+	return ((void *)addr);
+}
+
+vm_offset_t
+vm_page_alloc_contig(size, low, high, alignment)
+	vm_offset_t size;
+	vm_offset_t low;
+	vm_offset_t high;
+	vm_offset_t alignment;
+{
+	return ((vm_offset_t)contigmalloc(size, M_DEVBUF, M_NOWAIT, low, high,
+					  alignment, 0ul));
 }
 
 /*