1997-08-17 21:09:35 +00:00
|
|
|
/*-
|
2017-11-26 02:00:33 +00:00
|
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
*
|
2012-01-07 16:13:56 +00:00
|
|
|
* Copyright (c) 1991-1997 Søren Schmidt
|
1997-08-17 21:09:35 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer
|
|
|
|
* in this position and unchanged.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. The name of the author may not be used to endorse or promote products
|
2002-06-02 20:05:59 +00:00
|
|
|
* derived from this software without specific prior written permission
|
1997-08-17 21:09:35 +00:00
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2001-09-16 21:35:07 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
1997-08-17 21:09:35 +00:00
|
|
|
#include <signal.h>
|
2000-10-08 21:34:00 +00:00
|
|
|
#include <sys/fbio.h>
|
2019-04-20 20:29:03 +00:00
|
|
|
#include <sys/kbio.h>
|
For writing and reading single pixels, avoid some pessimizations for
depths > 8. Add some smaller optimizations for these depths. Use a
more generic method for all depths >= 8, although this gives tiny
pessimizations for these depths.
For clearing the whole frame buffer, avoid the same pessimizations
for depths > 8. Add some larger optimizations for these depths. Use
an even more generic method for all depths >= 8 to give the optimizations
for depths > 8 and a tiny pessimization for depth 8.
The main pessimization was that old versions of bcopy() copy 1 byte at a
time for all trailing bytes. (i386 still does this. amd64 now pessimizzes
large sizes instead of small ones if the CPU supports ERMS. dev/fb gets
this wrong by mostly not using the bcopy() family or the technically correct
bus space functions but by mostly copying 2 bytes at a time using an
unoptimized loop without even volatile declarations to prevent the compiler
rewriting it.)
The sizes here are 1, 2, 3 or 4 bytes, so depths 9-16 were up to twice as
slow as necessary and depths 17-24 were up to 3 times slower than necessary.
Fix this (except depths 17-24 are still up to 2 times slower than necessary)
by using (builtin) memcpy() instead of bcopy() and reorganizing so that the
complier can see the small constant sizes. Reduce special cases while
reorganizing although this is slightly slower than adding special cases.
The compiler inlining (and even -O2 vs -O0) makes little difference compared
with reducing the number of accesses except on modern hardware it gives a
small improvement.
Clearing was also pessimized mainly by the extra accesses. Fix it quite
differently by creating a MEMBUF containing 1 line (in fast memory using
a slow method) and copying this. This is only slightly slower than reducing
everything to efficient memset()s and bcopy()s, but simpler, especially
for the segmented case. This works for planar modes too, but don't use it
then since the old method was actually optimal for planar modes (it works
by moving the slow i/o instructions out of inner loops), while for direct
modes the slow instructions were all in the invisible inner loop in bcopy().
Use htole32() and le32toh() and some type puns instead of unoptimized
functions for converting colors. This optimization is mostly in the noise.
libvgl is only supported on x86, so it could hard-code the assumption that
the byte order is le32, but the old conversion functions didn't hard-code
this.
2019-04-14 13:37:50 +00:00
|
|
|
#include <sys/endian.h>
|
1997-08-17 21:09:35 +00:00
|
|
|
#include "vgl.h"
|
|
|
|
|
2019-04-21 10:33:09 +00:00
|
|
|
static int VGLBlank;
|
|
|
|
static byte VGLBorderColor;
|
1997-08-17 21:09:35 +00:00
|
|
|
static byte VGLSavePaletteRed[256];
|
|
|
|
static byte VGLSavePaletteGreen[256];
|
|
|
|
static byte VGLSavePaletteBlue[256];
|
|
|
|
|
|
|
|
#define ABS(a) (((a)<0) ? -(a) : (a))
|
|
|
|
#define SGN(a) (((a)<0) ? -1 : 1)
|
1999-11-08 11:37:46 +00:00
|
|
|
#define min(x, y) (((x) < (y)) ? (x) : (y))
|
|
|
|
#define max(x, y) (((x) > (y)) ? (x) : (y))
|
1997-08-17 21:09:35 +00:00
|
|
|
|
|
|
|
void
|
2001-01-13 11:30:17 +00:00
|
|
|
VGLSetXY(VGLBitmap *object, int x, int y, u_long color)
|
1997-08-17 21:09:35 +00:00
|
|
|
{
|
2019-04-26 13:49:06 +00:00
|
|
|
int offset, soffset, undermouse;
|
1999-11-08 11:37:46 +00:00
|
|
|
|
1997-08-17 21:09:35 +00:00
|
|
|
VGLCheckSwitch();
|
1999-11-08 11:37:46 +00:00
|
|
|
if (x>=0 && x<object->VXsize && y>=0 && y<object->VYsize) {
|
2019-04-24 15:35:29 +00:00
|
|
|
if (object == VGLDisplay) {
|
|
|
|
undermouse = VGLMouseFreezeXY(x, y);
|
2019-04-21 16:17:35 +00:00
|
|
|
VGLSetXY(&VGLVDisplay, x, y, color);
|
2019-04-24 15:35:29 +00:00
|
|
|
} else if (object->Type != MEMBUF)
|
|
|
|
return; /* invalid */
|
|
|
|
else
|
|
|
|
undermouse = 0;
|
|
|
|
if (!undermouse) {
|
For writing and reading single pixels, avoid some pessimizations for
depths > 8. Add some smaller optimizations for these depths. Use a
more generic method for all depths >= 8, although this gives tiny
pessimizations for these depths.
For clearing the whole frame buffer, avoid the same pessimizations
for depths > 8. Add some larger optimizations for these depths. Use
an even more generic method for all depths >= 8 to give the optimizations
for depths > 8 and a tiny pessimization for depth 8.
The main pessimization was that old versions of bcopy() copy 1 byte at a
time for all trailing bytes. (i386 still does this. amd64 now pessimizzes
large sizes instead of small ones if the CPU supports ERMS. dev/fb gets
this wrong by mostly not using the bcopy() family or the technically correct
bus space functions but by mostly copying 2 bytes at a time using an
unoptimized loop without even volatile declarations to prevent the compiler
rewriting it.)
The sizes here are 1, 2, 3 or 4 bytes, so depths 9-16 were up to twice as
slow as necessary and depths 17-24 were up to 3 times slower than necessary.
Fix this (except depths 17-24 are still up to 2 times slower than necessary)
by using (builtin) memcpy() instead of bcopy() and reorganizing so that the
complier can see the small constant sizes. Reduce special cases while
reorganizing although this is slightly slower than adding special cases.
The compiler inlining (and even -O2 vs -O0) makes little difference compared
with reducing the number of accesses except on modern hardware it gives a
small improvement.
Clearing was also pessimized mainly by the extra accesses. Fix it quite
differently by creating a MEMBUF containing 1 line (in fast memory using
a slow method) and copying this. This is only slightly slower than reducing
everything to efficient memset()s and bcopy()s, but simpler, especially
for the segmented case. This works for planar modes too, but don't use it
then since the old method was actually optimal for planar modes (it works
by moving the slow i/o instructions out of inner loops), while for direct
modes the slow instructions were all in the invisible inner loop in bcopy().
Use htole32() and le32toh() and some type puns instead of unoptimized
functions for converting colors. This optimization is mostly in the noise.
libvgl is only supported on x86, so it could hard-code the assumption that
the byte order is le32, but the old conversion functions didn't hard-code
this.
2019-04-14 13:37:50 +00:00
|
|
|
offset = (y * object->VXsize + x) * object->PixelBytes;
|
1997-08-17 21:09:35 +00:00
|
|
|
switch (object->Type) {
|
For writing and reading single pixels, avoid some pessimizations for
depths > 8. Add some smaller optimizations for these depths. Use a
more generic method for all depths >= 8, although this gives tiny
pessimizations for these depths.
For clearing the whole frame buffer, avoid the same pessimizations
for depths > 8. Add some larger optimizations for these depths. Use
an even more generic method for all depths >= 8 to give the optimizations
for depths > 8 and a tiny pessimization for depth 8.
The main pessimization was that old versions of bcopy() copy 1 byte at a
time for all trailing bytes. (i386 still does this. amd64 now pessimizzes
large sizes instead of small ones if the CPU supports ERMS. dev/fb gets
this wrong by mostly not using the bcopy() family or the technically correct
bus space functions but by mostly copying 2 bytes at a time using an
unoptimized loop without even volatile declarations to prevent the compiler
rewriting it.)
The sizes here are 1, 2, 3 or 4 bytes, so depths 9-16 were up to twice as
slow as necessary and depths 17-24 were up to 3 times slower than necessary.
Fix this (except depths 17-24 are still up to 2 times slower than necessary)
by using (builtin) memcpy() instead of bcopy() and reorganizing so that the
complier can see the small constant sizes. Reduce special cases while
reorganizing although this is slightly slower than adding special cases.
The compiler inlining (and even -O2 vs -O0) makes little difference compared
with reducing the number of accesses except on modern hardware it gives a
small improvement.
Clearing was also pessimized mainly by the extra accesses. Fix it quite
differently by creating a MEMBUF containing 1 line (in fast memory using
a slow method) and copying this. This is only slightly slower than reducing
everything to efficient memset()s and bcopy()s, but simpler, especially
for the segmented case. This works for planar modes too, but don't use it
then since the old method was actually optimal for planar modes (it works
by moving the slow i/o instructions out of inner loops), while for direct
modes the slow instructions were all in the invisible inner loop in bcopy().
Use htole32() and le32toh() and some type puns instead of unoptimized
functions for converting colors. This optimization is mostly in the noise.
libvgl is only supported on x86, so it could hard-code the assumption that
the byte order is le32, but the old conversion functions didn't hard-code
this.
2019-04-14 13:37:50 +00:00
|
|
|
case VIDBUF8S:
|
|
|
|
case VIDBUF16S:
|
|
|
|
case VIDBUF32S:
|
|
|
|
offset = VGLSetSegment(offset);
|
|
|
|
/* FALLTHROUGH */
|
1997-08-17 21:09:35 +00:00
|
|
|
case MEMBUF:
|
|
|
|
case VIDBUF8:
|
2001-01-13 11:30:17 +00:00
|
|
|
case VIDBUF16:
|
|
|
|
case VIDBUF24:
|
|
|
|
case VIDBUF32:
|
For writing and reading single pixels, avoid some pessimizations for
depths > 8. Add some smaller optimizations for these depths. Use a
more generic method for all depths >= 8, although this gives tiny
pessimizations for these depths.
For clearing the whole frame buffer, avoid the same pessimizations
for depths > 8. Add some larger optimizations for these depths. Use
an even more generic method for all depths >= 8 to give the optimizations
for depths > 8 and a tiny pessimization for depth 8.
The main pessimization was that old versions of bcopy() copy 1 byte at a
time for all trailing bytes. (i386 still does this. amd64 now pessimizzes
large sizes instead of small ones if the CPU supports ERMS. dev/fb gets
this wrong by mostly not using the bcopy() family or the technically correct
bus space functions but by mostly copying 2 bytes at a time using an
unoptimized loop without even volatile declarations to prevent the compiler
rewriting it.)
The sizes here are 1, 2, 3 or 4 bytes, so depths 9-16 were up to twice as
slow as necessary and depths 17-24 were up to 3 times slower than necessary.
Fix this (except depths 17-24 are still up to 2 times slower than necessary)
by using (builtin) memcpy() instead of bcopy() and reorganizing so that the
complier can see the small constant sizes. Reduce special cases while
reorganizing although this is slightly slower than adding special cases.
The compiler inlining (and even -O2 vs -O0) makes little difference compared
with reducing the number of accesses except on modern hardware it gives a
small improvement.
Clearing was also pessimized mainly by the extra accesses. Fix it quite
differently by creating a MEMBUF containing 1 line (in fast memory using
a slow method) and copying this. This is only slightly slower than reducing
everything to efficient memset()s and bcopy()s, but simpler, especially
for the segmented case. This works for planar modes too, but don't use it
then since the old method was actually optimal for planar modes (it works
by moving the slow i/o instructions out of inner loops), while for direct
modes the slow instructions were all in the invisible inner loop in bcopy().
Use htole32() and le32toh() and some type puns instead of unoptimized
functions for converting colors. This optimization is mostly in the noise.
libvgl is only supported on x86, so it could hard-code the assumption that
the byte order is le32, but the old conversion functions didn't hard-code
this.
2019-04-14 13:37:50 +00:00
|
|
|
color = htole32(color);
|
|
|
|
switch (object->PixelBytes) {
|
|
|
|
case 1:
|
|
|
|
memcpy(&object->Bitmap[offset], &color, 1);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
memcpy(&object->Bitmap[offset], &color, 2);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
memcpy(&object->Bitmap[offset], &color, 3);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
memcpy(&object->Bitmap[offset], &color, 4);
|
|
|
|
break;
|
|
|
|
}
|
2001-01-13 11:30:17 +00:00
|
|
|
break;
|
2019-04-26 13:49:06 +00:00
|
|
|
case VIDBUF24S:
|
|
|
|
soffset = VGLSetSegment(offset);
|
|
|
|
color = htole32(color);
|
|
|
|
switch (VGLAdpInfo.va_window_size - soffset) {
|
|
|
|
case 1:
|
|
|
|
memcpy(&object->Bitmap[soffset], &color, 1);
|
|
|
|
soffset = VGLSetSegment(offset + 1);
|
|
|
|
memcpy(&object->Bitmap[soffset], (byte *)&color + 1, 2);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
memcpy(&object->Bitmap[soffset], &color, 2);
|
|
|
|
soffset = VGLSetSegment(offset + 2);
|
|
|
|
memcpy(&object->Bitmap[soffset], (byte *)&color + 2, 1);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
memcpy(&object->Bitmap[soffset], &color, 3);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
1997-08-17 21:09:35 +00:00
|
|
|
case VIDBUF8X:
|
|
|
|
outb(0x3c4, 0x02);
|
|
|
|
outb(0x3c5, 0x01 << (x&0x3));
|
2001-01-13 11:30:17 +00:00
|
|
|
object->Bitmap[(unsigned)(VGLAdpInfo.va_line_width*y)+(x/4)] = ((byte)color);
|
1997-08-17 21:09:35 +00:00
|
|
|
break;
|
1999-11-08 11:37:46 +00:00
|
|
|
case VIDBUF4S:
|
|
|
|
offset = VGLSetSegment(y*VGLAdpInfo.va_line_width + x/8);
|
|
|
|
goto set_planar;
|
1997-08-17 21:09:35 +00:00
|
|
|
case VIDBUF4:
|
1999-11-08 11:37:46 +00:00
|
|
|
offset = y*VGLAdpInfo.va_line_width + x/8;
|
|
|
|
set_planar:
|
|
|
|
outb(0x3c4, 0x02); outb(0x3c5, 0x0f);
|
2001-01-13 11:30:17 +00:00
|
|
|
outb(0x3ce, 0x00); outb(0x3cf, (byte)color & 0x0f); /* set/reset */
|
1999-11-08 11:37:46 +00:00
|
|
|
outb(0x3ce, 0x01); outb(0x3cf, 0x0f); /* set/reset enable */
|
|
|
|
outb(0x3ce, 0x08); outb(0x3cf, 0x80 >> (x%8)); /* bit mask */
|
2001-01-13 11:30:17 +00:00
|
|
|
object->Bitmap[offset] |= (byte)color;
|
1997-08-17 21:09:35 +00:00
|
|
|
}
|
|
|
|
}
|
2019-04-24 15:35:29 +00:00
|
|
|
if (object == VGLDisplay)
|
2019-03-27 18:03:34 +00:00
|
|
|
VGLMouseUnFreeze();
|
1997-08-17 21:09:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-26 16:38:23 +00:00
|
|
|
u_long
|
|
|
|
VGLGetXY(VGLBitmap *object, int x, int y)
|
1997-08-17 21:09:35 +00:00
|
|
|
{
|
2001-01-13 11:30:17 +00:00
|
|
|
u_long color;
|
2019-04-26 16:38:23 +00:00
|
|
|
int offset;
|
1999-11-08 11:37:46 +00:00
|
|
|
|
2019-04-26 16:38:23 +00:00
|
|
|
VGLCheckSwitch();
|
|
|
|
if (x<0 || x>=object->VXsize || y<0 || y>=object->VYsize)
|
|
|
|
return 0;
|
|
|
|
if (object == VGLDisplay)
|
|
|
|
object = &VGLVDisplay;
|
|
|
|
else if (object->Type != MEMBUF)
|
|
|
|
return 0; /* invalid */
|
For writing and reading single pixels, avoid some pessimizations for
depths > 8. Add some smaller optimizations for these depths. Use a
more generic method for all depths >= 8, although this gives tiny
pessimizations for these depths.
For clearing the whole frame buffer, avoid the same pessimizations
for depths > 8. Add some larger optimizations for these depths. Use
an even more generic method for all depths >= 8 to give the optimizations
for depths > 8 and a tiny pessimization for depth 8.
The main pessimization was that old versions of bcopy() copy 1 byte at a
time for all trailing bytes. (i386 still does this. amd64 now pessimizzes
large sizes instead of small ones if the CPU supports ERMS. dev/fb gets
this wrong by mostly not using the bcopy() family or the technically correct
bus space functions but by mostly copying 2 bytes at a time using an
unoptimized loop without even volatile declarations to prevent the compiler
rewriting it.)
The sizes here are 1, 2, 3 or 4 bytes, so depths 9-16 were up to twice as
slow as necessary and depths 17-24 were up to 3 times slower than necessary.
Fix this (except depths 17-24 are still up to 2 times slower than necessary)
by using (builtin) memcpy() instead of bcopy() and reorganizing so that the
complier can see the small constant sizes. Reduce special cases while
reorganizing although this is slightly slower than adding special cases.
The compiler inlining (and even -O2 vs -O0) makes little difference compared
with reducing the number of accesses except on modern hardware it gives a
small improvement.
Clearing was also pessimized mainly by the extra accesses. Fix it quite
differently by creating a MEMBUF containing 1 line (in fast memory using
a slow method) and copying this. This is only slightly slower than reducing
everything to efficient memset()s and bcopy()s, but simpler, especially
for the segmented case. This works for planar modes too, but don't use it
then since the old method was actually optimal for planar modes (it works
by moving the slow i/o instructions out of inner loops), while for direct
modes the slow instructions were all in the invisible inner loop in bcopy().
Use htole32() and le32toh() and some type puns instead of unoptimized
functions for converting colors. This optimization is mostly in the noise.
libvgl is only supported on x86, so it could hard-code the assumption that
the byte order is le32, but the old conversion functions didn't hard-code
this.
2019-04-14 13:37:50 +00:00
|
|
|
offset = (y * object->VXsize + x) * object->PixelBytes;
|
2019-04-21 16:17:35 +00:00
|
|
|
switch (object->PixelBytes) {
|
|
|
|
case 1:
|
|
|
|
memcpy(&color, &object->Bitmap[offset], 1);
|
|
|
|
return le32toh(color) & 0xff;
|
|
|
|
case 2:
|
|
|
|
memcpy(&color, &object->Bitmap[offset], 2);
|
|
|
|
return le32toh(color) & 0xffff;
|
|
|
|
case 3:
|
|
|
|
memcpy(&color, &object->Bitmap[offset], 3);
|
|
|
|
return le32toh(color) & 0xffffff;
|
|
|
|
case 4:
|
|
|
|
memcpy(&color, &object->Bitmap[offset], 4);
|
|
|
|
return le32toh(color);
|
1997-08-17 21:09:35 +00:00
|
|
|
}
|
2019-04-21 16:17:35 +00:00
|
|
|
return 0; /* invalid */
|
1997-08-17 21:09:35 +00:00
|
|
|
}
|
|
|
|
|
2012-01-03 19:47:32 +00:00
|
|
|
/*
|
|
|
|
* Symmetric Double Step Line Algorithm by Brian Wyvill from
|
|
|
|
* "Graphics Gems", Academic Press, 1990.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define SL_SWAP(a,b) {a^=b; b^=a; a^=b;}
|
|
|
|
#define SL_ABSOLUTE(i,j,k) ( (i-j)*(k = ( (i-j)<0 ? -1 : 1)))
|
|
|
|
|
|
|
|
void
|
2019-03-28 14:21:22 +00:00
|
|
|
plot(VGLBitmap * object, int x, int y, int flag, u_long color)
|
2012-01-03 19:47:32 +00:00
|
|
|
{
|
|
|
|
/* non-zero flag indicates the pixels need swapping back. */
|
|
|
|
if (flag)
|
|
|
|
VGLSetXY(object, y, x, color);
|
|
|
|
else
|
|
|
|
VGLSetXY(object, x, y, color);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
1997-08-17 21:09:35 +00:00
|
|
|
void
|
2001-01-13 11:30:17 +00:00
|
|
|
VGLLine(VGLBitmap *object, int x1, int y1, int x2, int y2, u_long color)
|
1997-08-17 21:09:35 +00:00
|
|
|
{
|
2012-01-03 19:47:32 +00:00
|
|
|
int dx, dy, incr1, incr2, D, x, y, xend, c, pixels_left;
|
|
|
|
int sign_x, sign_y, step, reverse, i;
|
1997-08-17 21:09:35 +00:00
|
|
|
|
2012-01-03 19:47:32 +00:00
|
|
|
dx = SL_ABSOLUTE(x2, x1, sign_x);
|
|
|
|
dy = SL_ABSOLUTE(y2, y1, sign_y);
|
|
|
|
/* decide increment sign by the slope sign */
|
|
|
|
if (sign_x == sign_y)
|
|
|
|
step = 1;
|
|
|
|
else
|
|
|
|
step = -1;
|
1997-08-17 21:09:35 +00:00
|
|
|
|
2012-01-03 19:47:32 +00:00
|
|
|
if (dy > dx) { /* chooses axis of greatest movement (make dx) */
|
|
|
|
SL_SWAP(x1, y1);
|
|
|
|
SL_SWAP(x2, y2);
|
|
|
|
SL_SWAP(dx, dy);
|
|
|
|
reverse = 1;
|
|
|
|
} else
|
|
|
|
reverse = 0;
|
|
|
|
/* note error check for dx==0 should be included here */
|
|
|
|
if (x1 > x2) { /* start from the smaller coordinate */
|
|
|
|
x = x2;
|
|
|
|
y = y2;
|
2012-01-04 20:05:38 +00:00
|
|
|
/* x1 = x1;
|
|
|
|
y1 = y1; */
|
2012-01-03 19:47:32 +00:00
|
|
|
} else {
|
|
|
|
x = x1;
|
|
|
|
y = y1;
|
|
|
|
x1 = x2;
|
|
|
|
y1 = y2;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Note dx=n implies 0 - n or (dx+1) pixels to be set */
|
|
|
|
/* Go round loop dx/4 times then plot last 0,1,2 or 3 pixels */
|
|
|
|
/* In fact (dx-1)/4 as 2 pixels are already plotted */
|
|
|
|
xend = (dx - 1) / 4;
|
|
|
|
pixels_left = (dx - 1) % 4; /* number of pixels left over at the
|
|
|
|
* end */
|
|
|
|
plot(object, x, y, reverse, color);
|
|
|
|
if (pixels_left < 0)
|
|
|
|
return; /* plot only one pixel for zero length
|
|
|
|
* vectors */
|
|
|
|
plot(object, x1, y1, reverse, color); /* plot first two points */
|
|
|
|
incr2 = 4 * dy - 2 * dx;
|
|
|
|
if (incr2 < 0) { /* slope less than 1/2 */
|
|
|
|
c = 2 * dy;
|
|
|
|
incr1 = 2 * c;
|
|
|
|
D = incr1 - dx;
|
|
|
|
|
|
|
|
for (i = 0; i < xend; i++) { /* plotting loop */
|
|
|
|
++x;
|
|
|
|
--x1;
|
|
|
|
if (D < 0) {
|
|
|
|
/* pattern 1 forwards */
|
|
|
|
plot(object, x, y, reverse, color);
|
|
|
|
plot(object, ++x, y, reverse, color);
|
|
|
|
/* pattern 1 backwards */
|
|
|
|
plot(object, x1, y1, reverse, color);
|
|
|
|
plot(object, --x1, y1, reverse, color);
|
|
|
|
D += incr1;
|
|
|
|
} else {
|
|
|
|
if (D < c) {
|
|
|
|
/* pattern 2 forwards */
|
|
|
|
plot(object, x, y, reverse, color);
|
|
|
|
plot(object, ++x, y += step, reverse,
|
|
|
|
color);
|
|
|
|
/* pattern 2 backwards */
|
|
|
|
plot(object, x1, y1, reverse, color);
|
|
|
|
plot(object, --x1, y1 -= step, reverse,
|
|
|
|
color);
|
|
|
|
} else {
|
|
|
|
/* pattern 3 forwards */
|
|
|
|
plot(object, x, y += step, reverse, color);
|
|
|
|
plot(object, ++x, y, reverse, color);
|
|
|
|
/* pattern 3 backwards */
|
|
|
|
plot(object, x1, y1 -= step, reverse,
|
|
|
|
color);
|
|
|
|
plot(object, --x1, y1, reverse, color);
|
|
|
|
}
|
|
|
|
D += incr2;
|
1997-08-17 21:09:35 +00:00
|
|
|
}
|
2012-01-03 19:47:32 +00:00
|
|
|
} /* end for */
|
|
|
|
|
|
|
|
/* plot last pattern */
|
|
|
|
if (pixels_left) {
|
|
|
|
if (D < 0) {
|
|
|
|
plot(object, ++x, y, reverse, color); /* pattern 1 */
|
|
|
|
if (pixels_left > 1)
|
|
|
|
plot(object, ++x, y, reverse, color);
|
|
|
|
if (pixels_left > 2)
|
|
|
|
plot(object, --x1, y1, reverse, color);
|
|
|
|
} else {
|
|
|
|
if (D < c) {
|
|
|
|
plot(object, ++x, y, reverse, color); /* pattern 2 */
|
|
|
|
if (pixels_left > 1)
|
|
|
|
plot(object, ++x, y += step, reverse, color);
|
|
|
|
if (pixels_left > 2)
|
|
|
|
plot(object, --x1, y1, reverse, color);
|
|
|
|
} else {
|
|
|
|
/* pattern 3 */
|
|
|
|
plot(object, ++x, y += step, reverse, color);
|
|
|
|
if (pixels_left > 1)
|
|
|
|
plot(object, ++x, y, reverse, color);
|
|
|
|
if (pixels_left > 2)
|
|
|
|
plot(object, --x1, y1 -= step, reverse, color);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} /* end if pixels_left */
|
1997-08-17 21:09:35 +00:00
|
|
|
}
|
2012-01-03 19:47:32 +00:00
|
|
|
/* end slope < 1/2 */
|
|
|
|
else { /* slope greater than 1/2 */
|
|
|
|
c = 2 * (dy - dx);
|
|
|
|
incr1 = 2 * c;
|
|
|
|
D = incr1 + dx;
|
|
|
|
for (i = 0; i < xend; i++) {
|
|
|
|
++x;
|
|
|
|
--x1;
|
|
|
|
if (D > 0) {
|
|
|
|
/* pattern 4 forwards */
|
|
|
|
plot(object, x, y += step, reverse, color);
|
|
|
|
plot(object, ++x, y += step, reverse, color);
|
|
|
|
/* pattern 4 backwards */
|
|
|
|
plot(object, x1, y1 -= step, reverse, color);
|
|
|
|
plot(object, --x1, y1 -= step, reverse, color);
|
|
|
|
D += incr1;
|
|
|
|
} else {
|
|
|
|
if (D < c) {
|
|
|
|
/* pattern 2 forwards */
|
|
|
|
plot(object, x, y, reverse, color);
|
|
|
|
plot(object, ++x, y += step, reverse,
|
|
|
|
color);
|
|
|
|
|
|
|
|
/* pattern 2 backwards */
|
|
|
|
plot(object, x1, y1, reverse, color);
|
|
|
|
plot(object, --x1, y1 -= step, reverse,
|
|
|
|
color);
|
|
|
|
} else {
|
|
|
|
/* pattern 3 forwards */
|
|
|
|
plot(object, x, y += step, reverse, color);
|
|
|
|
plot(object, ++x, y, reverse, color);
|
|
|
|
/* pattern 3 backwards */
|
|
|
|
plot(object, x1, y1 -= step, reverse, color);
|
|
|
|
plot(object, --x1, y1, reverse, color);
|
|
|
|
}
|
|
|
|
D += incr2;
|
|
|
|
}
|
|
|
|
} /* end for */
|
|
|
|
/* plot last pattern */
|
|
|
|
if (pixels_left) {
|
|
|
|
if (D > 0) {
|
|
|
|
plot(object, ++x, y += step, reverse, color); /* pattern 4 */
|
|
|
|
if (pixels_left > 1)
|
|
|
|
plot(object, ++x, y += step, reverse,
|
|
|
|
color);
|
|
|
|
if (pixels_left > 2)
|
|
|
|
plot(object, --x1, y1 -= step, reverse,
|
|
|
|
color);
|
|
|
|
} else {
|
|
|
|
if (D < c) {
|
|
|
|
plot(object, ++x, y, reverse, color); /* pattern 2 */
|
|
|
|
if (pixels_left > 1)
|
|
|
|
plot(object, ++x, y += step, reverse, color);
|
|
|
|
if (pixels_left > 2)
|
|
|
|
plot(object, --x1, y1, reverse, color);
|
|
|
|
} else {
|
|
|
|
/* pattern 3 */
|
|
|
|
plot(object, ++x, y += step, reverse, color);
|
|
|
|
if (pixels_left > 1)
|
|
|
|
plot(object, ++x, y, reverse, color);
|
|
|
|
if (pixels_left > 2) {
|
|
|
|
if (D > c) /* step 3 */
|
|
|
|
plot(object, --x1, y1 -= step, reverse, color);
|
|
|
|
else /* step 2 */
|
|
|
|
plot(object, --x1, y1, reverse, color);
|
|
|
|
}
|
|
|
|
}
|
1997-08-17 21:09:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2001-01-13 11:30:17 +00:00
|
|
|
VGLBox(VGLBitmap *object, int x1, int y1, int x2, int y2, u_long color)
|
1997-08-17 21:09:35 +00:00
|
|
|
{
|
|
|
|
VGLLine(object, x1, y1, x2, y1, color);
|
|
|
|
VGLLine(object, x2, y1, x2, y2, color);
|
|
|
|
VGLLine(object, x2, y2, x1, y2, color);
|
|
|
|
VGLLine(object, x1, y2, x1, y1, color);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2001-01-13 11:30:17 +00:00
|
|
|
VGLFilledBox(VGLBitmap *object, int x1, int y1, int x2, int y2, u_long color)
|
1997-08-17 21:09:35 +00:00
|
|
|
{
|
|
|
|
int y;
|
|
|
|
|
|
|
|
for (y=y1; y<=y2; y++) VGLLine(object, x1, y, x2, y, color);
|
|
|
|
}
|
|
|
|
|
2010-10-04 18:16:38 +00:00
|
|
|
static inline void
|
|
|
|
set4pixels(VGLBitmap *object, int x, int y, int xc, int yc, u_long color)
|
1997-08-17 21:09:35 +00:00
|
|
|
{
|
|
|
|
if (x!=0) {
|
|
|
|
VGLSetXY(object, xc+x, yc+y, color);
|
|
|
|
VGLSetXY(object, xc-x, yc+y, color);
|
|
|
|
if (y!=0) {
|
|
|
|
VGLSetXY(object, xc+x, yc-y, color);
|
|
|
|
VGLSetXY(object, xc-x, yc-y, color);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
VGLSetXY(object, xc, yc+y, color);
|
|
|
|
if (y!=0)
|
|
|
|
VGLSetXY(object, xc, yc-y, color);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2001-01-13 11:30:17 +00:00
|
|
|
VGLEllipse(VGLBitmap *object, int xc, int yc, int a, int b, u_long color)
|
1997-08-17 21:09:35 +00:00
|
|
|
{
|
|
|
|
int x = 0, y = b, asq = a*a, asq2 = a*a*2, bsq = b*b;
|
|
|
|
int bsq2 = b*b*2, d = bsq-asq*b+asq/4, dx = 0, dy = asq2*b;
|
|
|
|
|
|
|
|
while (dx<dy) {
|
|
|
|
set4pixels(object, x, y, xc, yc, color);
|
|
|
|
if (d>0) {
|
|
|
|
y--; dy-=asq2; d-=dy;
|
|
|
|
}
|
|
|
|
x++; dx+=bsq2; d+=bsq+dx;
|
|
|
|
}
|
|
|
|
d+=(3*(asq-bsq)/2-(dx+dy))/2;
|
|
|
|
while (y>=0) {
|
|
|
|
set4pixels(object, x, y, xc, yc, color);
|
|
|
|
if (d<0) {
|
|
|
|
x++; dx+=bsq2; d+=dx;
|
|
|
|
}
|
|
|
|
y--; dy-=asq2; d+=asq-dy;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-10-04 18:16:38 +00:00
|
|
|
static inline void
|
|
|
|
set2lines(VGLBitmap *object, int x, int y, int xc, int yc, u_long color)
|
1997-08-17 21:09:35 +00:00
|
|
|
{
|
|
|
|
if (x!=0) {
|
|
|
|
VGLLine(object, xc+x, yc+y, xc-x, yc+y, color);
|
|
|
|
if (y!=0)
|
|
|
|
VGLLine(object, xc+x, yc-y, xc-x, yc-y, color);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
VGLLine(object, xc, yc+y, xc, yc-y, color);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2001-01-13 11:30:17 +00:00
|
|
|
VGLFilledEllipse(VGLBitmap *object, int xc, int yc, int a, int b, u_long color)
|
1997-08-17 21:09:35 +00:00
|
|
|
{
|
|
|
|
int x = 0, y = b, asq = a*a, asq2 = a*a*2, bsq = b*b;
|
|
|
|
int bsq2 = b*b*2, d = bsq-asq*b+asq/4, dx = 0, dy = asq2*b;
|
|
|
|
|
|
|
|
while (dx<dy) {
|
|
|
|
set2lines(object, x, y, xc, yc, color);
|
|
|
|
if (d>0) {
|
|
|
|
y--; dy-=asq2; d-=dy;
|
|
|
|
}
|
|
|
|
x++; dx+=bsq2; d+=bsq+dx;
|
|
|
|
}
|
|
|
|
d+=(3*(asq-bsq)/2-(dx+dy))/2;
|
|
|
|
while (y>=0) {
|
|
|
|
set2lines(object, x, y, xc, yc, color);
|
|
|
|
if (d<0) {
|
|
|
|
x++; dx+=bsq2; d+=dx;
|
|
|
|
}
|
|
|
|
y--; dy-=asq2; d+=asq-dy;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2001-01-13 11:30:17 +00:00
|
|
|
VGLClear(VGLBitmap *object, u_long color)
|
1997-08-17 21:09:35 +00:00
|
|
|
{
|
For writing and reading single pixels, avoid some pessimizations for
depths > 8. Add some smaller optimizations for these depths. Use a
more generic method for all depths >= 8, although this gives tiny
pessimizations for these depths.
For clearing the whole frame buffer, avoid the same pessimizations
for depths > 8. Add some larger optimizations for these depths. Use
an even more generic method for all depths >= 8 to give the optimizations
for depths > 8 and a tiny pessimization for depth 8.
The main pessimization was that old versions of bcopy() copy 1 byte at a
time for all trailing bytes. (i386 still does this. amd64 now pessimizzes
large sizes instead of small ones if the CPU supports ERMS. dev/fb gets
this wrong by mostly not using the bcopy() family or the technically correct
bus space functions but by mostly copying 2 bytes at a time using an
unoptimized loop without even volatile declarations to prevent the compiler
rewriting it.)
The sizes here are 1, 2, 3 or 4 bytes, so depths 9-16 were up to twice as
slow as necessary and depths 17-24 were up to 3 times slower than necessary.
Fix this (except depths 17-24 are still up to 2 times slower than necessary)
by using (builtin) memcpy() instead of bcopy() and reorganizing so that the
complier can see the small constant sizes. Reduce special cases while
reorganizing although this is slightly slower than adding special cases.
The compiler inlining (and even -O2 vs -O0) makes little difference compared
with reducing the number of accesses except on modern hardware it gives a
small improvement.
Clearing was also pessimized mainly by the extra accesses. Fix it quite
differently by creating a MEMBUF containing 1 line (in fast memory using
a slow method) and copying this. This is only slightly slower than reducing
everything to efficient memset()s and bcopy()s, but simpler, especially
for the segmented case. This works for planar modes too, but don't use it
then since the old method was actually optimal for planar modes (it works
by moving the slow i/o instructions out of inner loops), while for direct
modes the slow instructions were all in the invisible inner loop in bcopy().
Use htole32() and le32toh() and some type puns instead of unoptimized
functions for converting colors. This optimization is mostly in the noise.
libvgl is only supported on x86, so it could hard-code the assumption that
the byte order is le32, but the old conversion functions didn't hard-code
this.
2019-04-14 13:37:50 +00:00
|
|
|
VGLBitmap src;
|
2019-04-29 14:13:53 +00:00
|
|
|
int i, len, mousemode, offset;
|
1999-11-08 11:37:46 +00:00
|
|
|
|
1997-08-17 21:09:35 +00:00
|
|
|
VGLCheckSwitch();
|
2019-04-21 16:17:35 +00:00
|
|
|
if (object == VGLDisplay) {
|
2019-04-24 15:35:29 +00:00
|
|
|
VGLMouseFreeze();
|
2019-04-21 16:17:35 +00:00
|
|
|
VGLClear(&VGLVDisplay, color);
|
|
|
|
} else if (object->Type != MEMBUF)
|
|
|
|
return; /* invalid */
|
1997-08-17 21:09:35 +00:00
|
|
|
switch (object->Type) {
|
|
|
|
case MEMBUF:
|
|
|
|
case VIDBUF8:
|
1999-11-08 11:37:46 +00:00
|
|
|
case VIDBUF8S:
|
2001-01-13 11:30:17 +00:00
|
|
|
case VIDBUF16:
|
|
|
|
case VIDBUF16S:
|
For writing and reading single pixels, avoid some pessimizations for
depths > 8. Add some smaller optimizations for these depths. Use a
more generic method for all depths >= 8, although this gives tiny
pessimizations for these depths.
For clearing the whole frame buffer, avoid the same pessimizations
for depths > 8. Add some larger optimizations for these depths. Use
an even more generic method for all depths >= 8 to give the optimizations
for depths > 8 and a tiny pessimization for depth 8.
The main pessimization was that old versions of bcopy() copy 1 byte at a
time for all trailing bytes. (i386 still does this. amd64 now pessimizzes
large sizes instead of small ones if the CPU supports ERMS. dev/fb gets
this wrong by mostly not using the bcopy() family or the technically correct
bus space functions but by mostly copying 2 bytes at a time using an
unoptimized loop without even volatile declarations to prevent the compiler
rewriting it.)
The sizes here are 1, 2, 3 or 4 bytes, so depths 9-16 were up to twice as
slow as necessary and depths 17-24 were up to 3 times slower than necessary.
Fix this (except depths 17-24 are still up to 2 times slower than necessary)
by using (builtin) memcpy() instead of bcopy() and reorganizing so that the
complier can see the small constant sizes. Reduce special cases while
reorganizing although this is slightly slower than adding special cases.
The compiler inlining (and even -O2 vs -O0) makes little difference compared
with reducing the number of accesses except on modern hardware it gives a
small improvement.
Clearing was also pessimized mainly by the extra accesses. Fix it quite
differently by creating a MEMBUF containing 1 line (in fast memory using
a slow method) and copying this. This is only slightly slower than reducing
everything to efficient memset()s and bcopy()s, but simpler, especially
for the segmented case. This works for planar modes too, but don't use it
then since the old method was actually optimal for planar modes (it works
by moving the slow i/o instructions out of inner loops), while for direct
modes the slow instructions were all in the invisible inner loop in bcopy().
Use htole32() and le32toh() and some type puns instead of unoptimized
functions for converting colors. This optimization is mostly in the noise.
libvgl is only supported on x86, so it could hard-code the assumption that
the byte order is le32, but the old conversion functions didn't hard-code
this.
2019-04-14 13:37:50 +00:00
|
|
|
case VIDBUF24:
|
2001-01-13 11:30:17 +00:00
|
|
|
case VIDBUF24S:
|
For writing and reading single pixels, avoid some pessimizations for
depths > 8. Add some smaller optimizations for these depths. Use a
more generic method for all depths >= 8, although this gives tiny
pessimizations for these depths.
For clearing the whole frame buffer, avoid the same pessimizations
for depths > 8. Add some larger optimizations for these depths. Use
an even more generic method for all depths >= 8 to give the optimizations
for depths > 8 and a tiny pessimization for depth 8.
The main pessimization was that old versions of bcopy() copy 1 byte at a
time for all trailing bytes. (i386 still does this. amd64 now pessimizzes
large sizes instead of small ones if the CPU supports ERMS. dev/fb gets
this wrong by mostly not using the bcopy() family or the technically correct
bus space functions but by mostly copying 2 bytes at a time using an
unoptimized loop without even volatile declarations to prevent the compiler
rewriting it.)
The sizes here are 1, 2, 3 or 4 bytes, so depths 9-16 were up to twice as
slow as necessary and depths 17-24 were up to 3 times slower than necessary.
Fix this (except depths 17-24 are still up to 2 times slower than necessary)
by using (builtin) memcpy() instead of bcopy() and reorganizing so that the
complier can see the small constant sizes. Reduce special cases while
reorganizing although this is slightly slower than adding special cases.
The compiler inlining (and even -O2 vs -O0) makes little difference compared
with reducing the number of accesses except on modern hardware it gives a
small improvement.
Clearing was also pessimized mainly by the extra accesses. Fix it quite
differently by creating a MEMBUF containing 1 line (in fast memory using
a slow method) and copying this. This is only slightly slower than reducing
everything to efficient memset()s and bcopy()s, but simpler, especially
for the segmented case. This works for planar modes too, but don't use it
then since the old method was actually optimal for planar modes (it works
by moving the slow i/o instructions out of inner loops), while for direct
modes the slow instructions were all in the invisible inner loop in bcopy().
Use htole32() and le32toh() and some type puns instead of unoptimized
functions for converting colors. This optimization is mostly in the noise.
libvgl is only supported on x86, so it could hard-code the assumption that
the byte order is le32, but the old conversion functions didn't hard-code
this.
2019-04-14 13:37:50 +00:00
|
|
|
case VIDBUF32:
|
2001-01-13 11:30:17 +00:00
|
|
|
case VIDBUF32S:
|
For writing and reading single pixels, avoid some pessimizations for
depths > 8. Add some smaller optimizations for these depths. Use a
more generic method for all depths >= 8, although this gives tiny
pessimizations for these depths.
For clearing the whole frame buffer, avoid the same pessimizations
for depths > 8. Add some larger optimizations for these depths. Use
an even more generic method for all depths >= 8 to give the optimizations
for depths > 8 and a tiny pessimization for depth 8.
The main pessimization was that old versions of bcopy() copy 1 byte at a
time for all trailing bytes. (i386 still does this. amd64 now pessimizzes
large sizes instead of small ones if the CPU supports ERMS. dev/fb gets
this wrong by mostly not using the bcopy() family or the technically correct
bus space functions but by mostly copying 2 bytes at a time using an
unoptimized loop without even volatile declarations to prevent the compiler
rewriting it.)
The sizes here are 1, 2, 3 or 4 bytes, so depths 9-16 were up to twice as
slow as necessary and depths 17-24 were up to 3 times slower than necessary.
Fix this (except depths 17-24 are still up to 2 times slower than necessary)
by using (builtin) memcpy() instead of bcopy() and reorganizing so that the
complier can see the small constant sizes. Reduce special cases while
reorganizing although this is slightly slower than adding special cases.
The compiler inlining (and even -O2 vs -O0) makes little difference compared
with reducing the number of accesses except on modern hardware it gives a
small improvement.
Clearing was also pessimized mainly by the extra accesses. Fix it quite
differently by creating a MEMBUF containing 1 line (in fast memory using
a slow method) and copying this. This is only slightly slower than reducing
everything to efficient memset()s and bcopy()s, but simpler, especially
for the segmented case. This works for planar modes too, but don't use it
then since the old method was actually optimal for planar modes (it works
by moving the slow i/o instructions out of inner loops), while for direct
modes the slow instructions were all in the invisible inner loop in bcopy().
Use htole32() and le32toh() and some type puns instead of unoptimized
functions for converting colors. This optimization is mostly in the noise.
libvgl is only supported on x86, so it could hard-code the assumption that
the byte order is le32, but the old conversion functions didn't hard-code
this.
2019-04-14 13:37:50 +00:00
|
|
|
src.Type = MEMBUF;
|
|
|
|
src.Xsize = object->Xsize;
|
|
|
|
src.VXsize = object->VXsize;
|
|
|
|
src.Ysize = 1;
|
|
|
|
src.VYsize = 1;
|
|
|
|
src.Xorigin = 0;
|
|
|
|
src.Yorigin = 0;
|
|
|
|
src.Bitmap = alloca(object->VXsize * object->PixelBytes);
|
|
|
|
src.PixelBytes = object->PixelBytes;
|
|
|
|
color = htole32(color);
|
|
|
|
for (i = 0; i < object->VXsize; i++)
|
|
|
|
bcopy(&color, src.Bitmap + i * object->PixelBytes, object->PixelBytes);
|
|
|
|
for (i = 0; i < object->VYsize; i++)
|
2019-04-29 15:58:05 +00:00
|
|
|
__VGLBitmapCopy(&src, 0, 0, object, 0, i, object->VXsize, -1);
|
1999-11-08 11:37:46 +00:00
|
|
|
break;
|
|
|
|
|
1997-08-17 21:09:35 +00:00
|
|
|
case VIDBUF8X:
|
2019-04-29 14:13:53 +00:00
|
|
|
mousemode = __VGLMouseMode(VGL_MOUSEHIDE);
|
1997-08-17 21:09:35 +00:00
|
|
|
/* XXX works only for Xsize % 4 = 0 */
|
1999-11-08 11:37:46 +00:00
|
|
|
outb(0x3c6, 0xff);
|
1997-08-17 21:09:35 +00:00
|
|
|
outb(0x3c4, 0x02); outb(0x3c5, 0x0f);
|
2001-01-13 11:30:17 +00:00
|
|
|
memset(object->Bitmap, (byte)color, VGLAdpInfo.va_line_width*object->VYsize);
|
2019-04-29 14:13:53 +00:00
|
|
|
__VGLMouseMode(mousemode);
|
1997-08-17 21:09:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIDBUF4:
|
1999-11-08 11:37:46 +00:00
|
|
|
case VIDBUF4S:
|
2019-04-29 14:13:53 +00:00
|
|
|
mousemode = __VGLMouseMode(VGL_MOUSEHIDE);
|
1997-08-17 21:09:35 +00:00
|
|
|
/* XXX works only for Xsize % 8 = 0 */
|
1999-11-08 11:37:46 +00:00
|
|
|
outb(0x3c4, 0x02); outb(0x3c5, 0x0f);
|
|
|
|
outb(0x3ce, 0x05); outb(0x3cf, 0x02); /* mode 2 */
|
|
|
|
outb(0x3ce, 0x01); outb(0x3cf, 0x00); /* set/reset enable */
|
|
|
|
outb(0x3ce, 0x08); outb(0x3cf, 0xff); /* bit mask */
|
|
|
|
for (offset = 0; offset < VGLAdpInfo.va_line_width*object->VYsize; ) {
|
|
|
|
VGLSetSegment(offset);
|
|
|
|
len = min(object->VXsize*object->VYsize - offset,
|
|
|
|
VGLAdpInfo.va_window_size);
|
2001-01-13 11:30:17 +00:00
|
|
|
memset(object->Bitmap, (byte)color, len);
|
1999-11-08 11:37:46 +00:00
|
|
|
offset += len;
|
|
|
|
}
|
|
|
|
outb(0x3ce, 0x05); outb(0x3cf, 0x00);
|
2019-04-29 14:13:53 +00:00
|
|
|
__VGLMouseMode(mousemode);
|
1997-08-17 21:09:35 +00:00
|
|
|
break;
|
|
|
|
}
|
2019-04-29 14:13:53 +00:00
|
|
|
if (object == VGLDisplay)
|
2019-03-27 18:03:34 +00:00
|
|
|
VGLMouseUnFreeze();
|
1997-08-17 21:09:35 +00:00
|
|
|
}
|
|
|
|
|
2019-04-22 19:31:16 +00:00
|
|
|
static inline u_long
|
|
|
|
VGLrgbToNative(uint16_t r, uint16_t g, uint16_t b)
|
|
|
|
{
|
|
|
|
int nr, ng, nb;
|
|
|
|
|
|
|
|
nr = VGLModeInfo.vi_pixel_fsizes[2];
|
|
|
|
ng = VGLModeInfo.vi_pixel_fsizes[1];
|
|
|
|
nb = VGLModeInfo.vi_pixel_fsizes[0];
|
|
|
|
return (r >> (16 - nr) << (ng + nb)) | (g >> (16 - ng) << nb) |
|
|
|
|
(b >> (16 - nb) << 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
u_long
|
|
|
|
VGLrgb332ToNative(byte c)
|
|
|
|
{
|
|
|
|
uint16_t r, g, b;
|
|
|
|
|
|
|
|
/* 3:3:2 to 16:16:16 */
|
|
|
|
r = ((c & 0xe0) >> 5) * 0xffff / 7;
|
|
|
|
g = ((c & 0x1c) >> 2) * 0xffff / 7;
|
|
|
|
b = ((c & 0x03) >> 0) * 0xffff / 3;
|
|
|
|
|
|
|
|
return VGLrgbToNative(r, g, b);
|
|
|
|
}
|
|
|
|
|
1997-08-17 21:09:35 +00:00
|
|
|
void
|
|
|
|
VGLRestorePalette()
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2019-04-20 20:29:03 +00:00
|
|
|
if (VGLModeInfo.vi_mem_model == V_INFO_MM_DIRECT)
|
|
|
|
return;
|
1997-08-17 21:09:35 +00:00
|
|
|
outb(0x3C6, 0xFF);
|
|
|
|
inb(0x3DA);
|
|
|
|
outb(0x3C8, 0x00);
|
|
|
|
for (i=0; i<256; i++) {
|
|
|
|
outb(0x3C9, VGLSavePaletteRed[i]);
|
|
|
|
inb(0x84);
|
|
|
|
outb(0x3C9, VGLSavePaletteGreen[i]);
|
|
|
|
inb(0x84);
|
|
|
|
outb(0x3C9, VGLSavePaletteBlue[i]);
|
|
|
|
inb(0x84);
|
|
|
|
}
|
|
|
|
inb(0x3DA);
|
|
|
|
outb(0x3C0, 0x20);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
VGLSavePalette()
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2019-04-20 20:29:03 +00:00
|
|
|
if (VGLModeInfo.vi_mem_model == V_INFO_MM_DIRECT)
|
|
|
|
return;
|
1997-08-17 21:09:35 +00:00
|
|
|
outb(0x3C6, 0xFF);
|
|
|
|
inb(0x3DA);
|
|
|
|
outb(0x3C7, 0x00);
|
|
|
|
for (i=0; i<256; i++) {
|
|
|
|
VGLSavePaletteRed[i] = inb(0x3C9);
|
|
|
|
inb(0x84);
|
|
|
|
VGLSavePaletteGreen[i] = inb(0x3C9);
|
|
|
|
inb(0x84);
|
|
|
|
VGLSavePaletteBlue[i] = inb(0x3C9);
|
|
|
|
inb(0x84);
|
|
|
|
}
|
|
|
|
inb(0x3DA);
|
|
|
|
outb(0x3C0, 0x20);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
VGLSetPalette(byte *red, byte *green, byte *blue)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2019-04-20 20:29:03 +00:00
|
|
|
if (VGLModeInfo.vi_mem_model == V_INFO_MM_DIRECT)
|
|
|
|
return;
|
1997-08-17 21:09:35 +00:00
|
|
|
for (i=0; i<256; i++) {
|
|
|
|
VGLSavePaletteRed[i] = red[i];
|
|
|
|
VGLSavePaletteGreen[i] = green[i];
|
|
|
|
VGLSavePaletteBlue[i] = blue[i];
|
|
|
|
}
|
|
|
|
VGLCheckSwitch();
|
|
|
|
outb(0x3C6, 0xFF);
|
|
|
|
inb(0x3DA);
|
|
|
|
outb(0x3C8, 0x00);
|
|
|
|
for (i=0; i<256; i++) {
|
|
|
|
outb(0x3C9, VGLSavePaletteRed[i]);
|
|
|
|
inb(0x84);
|
|
|
|
outb(0x3C9, VGLSavePaletteGreen[i]);
|
|
|
|
inb(0x84);
|
|
|
|
outb(0x3C9, VGLSavePaletteBlue[i]);
|
|
|
|
inb(0x84);
|
|
|
|
}
|
|
|
|
inb(0x3DA);
|
|
|
|
outb(0x3C0, 0x20);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
VGLSetPaletteIndex(byte color, byte red, byte green, byte blue)
|
|
|
|
{
|
2019-04-20 20:29:03 +00:00
|
|
|
if (VGLModeInfo.vi_mem_model == V_INFO_MM_DIRECT)
|
|
|
|
return;
|
1997-08-17 21:09:35 +00:00
|
|
|
VGLSavePaletteRed[color] = red;
|
|
|
|
VGLSavePaletteGreen[color] = green;
|
|
|
|
VGLSavePaletteBlue[color] = blue;
|
|
|
|
VGLCheckSwitch();
|
|
|
|
outb(0x3C6, 0xFF);
|
|
|
|
inb(0x3DA);
|
|
|
|
outb(0x3C8, color);
|
|
|
|
outb(0x3C9, red); outb(0x3C9, green); outb(0x3C9, blue);
|
|
|
|
inb(0x3DA);
|
|
|
|
outb(0x3C0, 0x20);
|
|
|
|
}
|
|
|
|
|
2019-04-21 10:33:09 +00:00
|
|
|
void
|
|
|
|
VGLRestoreBorder(void)
|
|
|
|
{
|
|
|
|
VGLSetBorder(VGLBorderColor);
|
|
|
|
}
|
|
|
|
|
1997-08-17 21:09:35 +00:00
|
|
|
void
|
|
|
|
VGLSetBorder(byte color)
|
|
|
|
{
|
2019-04-20 20:29:03 +00:00
|
|
|
if (VGLModeInfo.vi_mem_model == V_INFO_MM_DIRECT && ioctl(0, KDENABIO, 0))
|
|
|
|
return;
|
1997-08-17 21:09:35 +00:00
|
|
|
VGLCheckSwitch();
|
|
|
|
inb(0x3DA);
|
|
|
|
outb(0x3C0,0x11); outb(0x3C0, color);
|
|
|
|
inb(0x3DA);
|
|
|
|
outb(0x3C0, 0x20);
|
2019-04-21 10:33:09 +00:00
|
|
|
VGLBorderColor = color;
|
2019-04-20 20:29:03 +00:00
|
|
|
if (VGLModeInfo.vi_mem_model == V_INFO_MM_DIRECT)
|
|
|
|
ioctl(0, KDDISABIO, 0);
|
1997-08-17 21:09:35 +00:00
|
|
|
}
|
|
|
|
|
2019-04-21 10:33:09 +00:00
|
|
|
void
|
|
|
|
VGLRestoreBlank(void)
|
|
|
|
{
|
|
|
|
VGLBlankDisplay(VGLBlank);
|
|
|
|
}
|
|
|
|
|
1997-08-17 21:09:35 +00:00
|
|
|
void
|
|
|
|
VGLBlankDisplay(int blank)
|
|
|
|
{
|
|
|
|
byte val;
|
|
|
|
|
2019-04-20 20:29:03 +00:00
|
|
|
if (VGLModeInfo.vi_mem_model == V_INFO_MM_DIRECT && ioctl(0, KDENABIO, 0))
|
|
|
|
return;
|
1997-08-17 21:09:35 +00:00
|
|
|
VGLCheckSwitch();
|
|
|
|
outb(0x3C4, 0x01); val = inb(0x3C5); outb(0x3C4, 0x01);
|
|
|
|
outb(0x3C5, ((blank) ? (val |= 0x20) : (val &= 0xDF)));
|
2019-04-21 10:33:09 +00:00
|
|
|
VGLBlank = blank;
|
2019-04-20 20:29:03 +00:00
|
|
|
if (VGLModeInfo.vi_mem_model == V_INFO_MM_DIRECT)
|
|
|
|
ioctl(0, KDDISABIO, 0);
|
1997-08-17 21:09:35 +00:00
|
|
|
}
|