1997-06-16 00:27:26 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1997 John S. Dyson. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. John S. Dyson's name may not be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* DISCLAIMER: This code isn't warranted to do anything useful. Anything
|
|
|
|
* bad that happens because of using this software isn't the responsibility
|
|
|
|
* of the author. This software is distributed AS-IS.
|
|
|
|
*
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1997-06-16 00:27:26 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
1998-03-28 11:51:01 +00:00
|
|
|
* This file contains support for the POSIX 1003.1B AIO/LIO facility.
|
1997-06-16 00:27:26 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2000-05-05 09:59:14 +00:00
|
|
|
#include <sys/bio.h>
|
1999-02-25 15:54:06 +00:00
|
|
|
#include <sys/buf.h>
|
1997-06-16 00:27:26 +00:00
|
|
|
#include <sys/sysproto.h>
|
|
|
|
#include <sys/filedesc.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/fcntl.h>
|
|
|
|
#include <sys/file.h>
|
1997-11-18 10:02:40 +00:00
|
|
|
#include <sys/lock.h>
|
1997-06-16 00:27:26 +00:00
|
|
|
#include <sys/unistd.h>
|
|
|
|
#include <sys/proc.h>
|
1998-08-17 17:28:10 +00:00
|
|
|
#include <sys/resourcevar.h>
|
1997-06-16 00:27:26 +00:00
|
|
|
#include <sys/signalvar.h>
|
2000-01-14 02:53:29 +00:00
|
|
|
#include <sys/protosw.h>
|
|
|
|
#include <sys/socketvar.h>
|
1997-10-09 04:14:41 +00:00
|
|
|
#include <sys/sysctl.h>
|
1997-11-29 01:33:10 +00:00
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/conf.h>
|
2000-04-16 18:53:38 +00:00
|
|
|
#include <sys/event.h>
|
1997-06-16 00:27:26 +00:00
|
|
|
|
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/vm_extern.h>
|
1997-07-06 02:40:43 +00:00
|
|
|
#include <vm/pmap.h>
|
|
|
|
#include <vm/vm_map.h>
|
1997-11-29 01:33:10 +00:00
|
|
|
#include <vm/vm_zone.h>
|
1997-06-16 00:27:26 +00:00
|
|
|
#include <sys/aio.h>
|
1997-07-17 04:49:43 +00:00
|
|
|
|
1998-03-28 10:33:27 +00:00
|
|
|
#include <machine/limits.h>
|
2000-02-23 07:44:25 +00:00
|
|
|
#include "opt_vfs_aio.h"
|
1997-06-16 00:27:26 +00:00
|
|
|
|
2000-07-28 23:10:10 +00:00
|
|
|
#ifdef VFS_AIO
|
|
|
|
|
1998-06-10 10:31:08 +00:00
|
|
|
static long jobrefid;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
#define JOBST_NULL 0x0
|
1997-07-06 02:40:43 +00:00
|
|
|
#define JOBST_JOBQPROC 0x1
|
|
|
|
#define JOBST_JOBQGLOBAL 0x2
|
|
|
|
#define JOBST_JOBRUNNING 0x3
|
|
|
|
#define JOBST_JOBFINISHED 0x4
|
1997-11-29 01:33:10 +00:00
|
|
|
#define JOBST_JOBQBUF 0x5
|
|
|
|
#define JOBST_JOBBFINISHED 0x6
|
1997-07-06 02:40:43 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#ifndef MAX_AIO_PER_PROC
|
1997-07-06 02:40:43 +00:00
|
|
|
#define MAX_AIO_PER_PROC 32
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MAX_AIO_QUEUE_PER_PROC
|
1997-07-06 02:40:43 +00:00
|
|
|
#define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MAX_AIO_PROCS
|
1997-11-29 01:33:10 +00:00
|
|
|
#define MAX_AIO_PROCS 32
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MAX_AIO_QUEUE
|
1997-07-06 02:40:43 +00:00
|
|
|
#define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef TARGET_AIO_PROCS
|
2000-01-14 02:53:29 +00:00
|
|
|
#define TARGET_AIO_PROCS 4
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MAX_BUF_AIO
|
2000-01-14 02:53:29 +00:00
|
|
|
#define MAX_BUF_AIO 16
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef AIOD_TIMEOUT_DEFAULT
|
2000-01-14 02:53:29 +00:00
|
|
|
#define AIOD_TIMEOUT_DEFAULT (10 * hz)
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef AIOD_LIFETIME_DEFAULT
|
2000-01-14 02:53:29 +00:00
|
|
|
#define AIOD_LIFETIME_DEFAULT (30 * hz)
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
1997-07-06 02:40:43 +00:00
|
|
|
|
1998-02-09 06:11:36 +00:00
|
|
|
static int max_aio_procs = MAX_AIO_PROCS;
|
|
|
|
static int num_aio_procs = 0;
|
|
|
|
static int target_aio_procs = TARGET_AIO_PROCS;
|
|
|
|
static int max_queue_count = MAX_AIO_QUEUE;
|
|
|
|
static int num_queue_count = 0;
|
|
|
|
static int num_buf_aio = 0;
|
|
|
|
static int num_aio_resv_start = 0;
|
|
|
|
static int aiod_timeout;
|
|
|
|
static int aiod_lifetime;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
static int max_aio_per_proc = MAX_AIO_PER_PROC;
|
|
|
|
static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC;
|
1998-02-09 06:11:36 +00:00
|
|
|
static int max_buf_aio = MAX_BUF_AIO;
|
1997-10-09 04:14:41 +00:00
|
|
|
|
|
|
|
SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "AIO mgmt");
|
|
|
|
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc,
|
|
|
|
CTLFLAG_RW, &max_aio_per_proc, 0, "");
|
|
|
|
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc,
|
|
|
|
CTLFLAG_RW, &max_aio_queue_per_proc, 0, "");
|
|
|
|
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs,
|
|
|
|
CTLFLAG_RW, &max_aio_procs, 0, "");
|
|
|
|
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs,
|
|
|
|
CTLFLAG_RD, &num_aio_procs, 0, "");
|
|
|
|
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count,
|
|
|
|
CTLFLAG_RD, &num_queue_count, 0, "");
|
|
|
|
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue,
|
|
|
|
CTLFLAG_RW, &max_queue_count, 0, "");
|
|
|
|
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs,
|
|
|
|
CTLFLAG_RW, &target_aio_procs, 0, "");
|
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio,
|
|
|
|
CTLFLAG_RW, &max_buf_aio, 0, "");
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio,
|
|
|
|
CTLFLAG_RD, &num_buf_aio, 0, "");
|
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime,
|
|
|
|
CTLFLAG_RW, &aiod_lifetime, 0, "");
|
|
|
|
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout,
|
|
|
|
CTLFLAG_RW, &aiod_timeout, 0, "");
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
|
|
|
* AIO process info
|
|
|
|
*/
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#define AIOP_FREE 0x1 /* proc on free queue */
|
|
|
|
#define AIOP_SCHED 0x2 /* proc explicitly scheduled */
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
struct aioproclist {
|
|
|
|
int aioprocflags; /* AIO proc flags */
|
2000-05-26 02:09:24 +00:00
|
|
|
TAILQ_ENTRY(aioproclist) list; /* List of processes */
|
1997-07-06 02:40:43 +00:00
|
|
|
struct proc *aioproc; /* The AIO thread */
|
2000-05-26 02:09:24 +00:00
|
|
|
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
|
1997-07-06 02:40:43 +00:00
|
|
|
};
|
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
/*
|
|
|
|
* data-structure for lio signal management
|
|
|
|
*/
|
|
|
|
struct aio_liojob {
|
2000-01-14 02:53:29 +00:00
|
|
|
int lioj_flags;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
int lioj_buffer_count;
|
|
|
|
int lioj_buffer_finished_count;
|
|
|
|
int lioj_queue_count;
|
|
|
|
int lioj_queue_finished_count;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct sigevent lioj_signal; /* signal on all I/O done */
|
2000-05-26 02:09:24 +00:00
|
|
|
TAILQ_ENTRY (aio_liojob) lioj_list;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct kaioinfo *lioj_ki;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
};
|
2000-01-14 02:53:29 +00:00
|
|
|
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* per process aio data structure
|
|
|
|
*/
|
1997-07-06 02:40:43 +00:00
|
|
|
struct kaioinfo {
|
2000-01-14 02:53:29 +00:00
|
|
|
int kaio_flags; /* per process kaio flags */
|
1997-07-06 02:40:43 +00:00
|
|
|
int kaio_maxactive_count; /* maximum number of AIOs */
|
|
|
|
int kaio_active_count; /* number of currently used AIOs */
|
|
|
|
int kaio_qallowed_count; /* maxiumu size of AIO queue */
|
|
|
|
int kaio_queue_count; /* size of AIO queue */
|
1997-11-29 01:33:10 +00:00
|
|
|
int kaio_ballowed_count; /* maximum number of buffers */
|
2000-01-14 02:53:29 +00:00
|
|
|
int kaio_queue_finished_count; /* number of daemon jobs finished */
|
1997-11-29 01:33:10 +00:00
|
|
|
int kaio_buffer_count; /* number of physio buffers */
|
2000-01-14 02:53:29 +00:00
|
|
|
int kaio_buffer_finished_count; /* count of I/O done */
|
|
|
|
struct proc *kaio_p; /* process that uses this kaio block */
|
2000-05-26 02:09:24 +00:00
|
|
|
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
|
|
|
|
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
|
|
|
|
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
|
|
|
|
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
|
|
|
|
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
|
|
|
|
TAILQ_HEAD (,aiocblist) kaio_sockqueue; /* queue for aios waiting on sockets */
|
1997-07-06 02:40:43 +00:00
|
|
|
};
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
#define KAIO_RUNDOWN 0x1 /* process is being run down */
|
|
|
|
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2000-05-26 02:09:24 +00:00
|
|
|
static TAILQ_HEAD(,aioproclist) aio_freeproc, aio_activeproc;
|
|
|
|
static TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
|
|
|
|
static TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
|
|
|
|
static TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
static void aio_init_aioinfo(struct proc *p);
|
|
|
|
static void aio_onceonly(void *);
|
|
|
|
static int aio_free_entry(struct aiocblist *aiocbe);
|
|
|
|
static void aio_process(struct aiocblist *aiocbe);
|
|
|
|
static int aio_newproc(void);
|
|
|
|
static int aio_aqueue(struct proc *p, struct aiocb *job, int type);
|
|
|
|
static void aio_physwakeup(struct buf *bp);
|
|
|
|
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
|
|
|
|
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
|
|
|
|
static void aio_daemon(void *uproc);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
static vm_zone_t kaio_zone = 0, aiop_zone = 0, aiocb_zone = 0, aiol_zone = 0;
|
|
|
|
static vm_zone_t aiolio_zone = 0;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
|
|
|
* Startup initialization
|
|
|
|
*/
|
|
|
|
void
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_onceonly(void *na)
|
|
|
|
{
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_INIT(&aio_freeproc);
|
|
|
|
TAILQ_INIT(&aio_activeproc);
|
|
|
|
TAILQ_INIT(&aio_jobs);
|
1997-11-29 01:33:10 +00:00
|
|
|
TAILQ_INIT(&aio_bufjobs);
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_INIT(&aio_freejobs);
|
1997-11-29 01:33:10 +00:00
|
|
|
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
|
|
|
|
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
|
|
|
|
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
|
|
|
|
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
|
2000-01-14 02:53:29 +00:00
|
|
|
aiolio_zone = zinit("AIOLIO", AIO_LISTIO_MAX * sizeof (struct
|
|
|
|
aio_liojob), 0, 0, 1);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
|
|
|
|
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
|
1997-11-29 01:33:10 +00:00
|
|
|
jobrefid = 1;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Init the per-process aioinfo structure. The aioinfo limits are set
|
|
|
|
* per-process for user limit (resource) management.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
void
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_init_aioinfo(struct proc *p)
|
|
|
|
{
|
1997-07-06 02:40:43 +00:00
|
|
|
struct kaioinfo *ki;
|
|
|
|
if (p->p_aioinfo == NULL) {
|
1997-11-29 01:33:10 +00:00
|
|
|
ki = zalloc(kaio_zone);
|
1997-07-06 02:40:43 +00:00
|
|
|
p->p_aioinfo = ki;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
ki->kaio_flags = 0;
|
1997-10-09 04:14:41 +00:00
|
|
|
ki->kaio_maxactive_count = max_aio_per_proc;
|
1997-07-06 02:40:43 +00:00
|
|
|
ki->kaio_active_count = 0;
|
1997-10-09 04:14:41 +00:00
|
|
|
ki->kaio_qallowed_count = max_aio_queue_per_proc;
|
1997-07-06 02:40:43 +00:00
|
|
|
ki->kaio_queue_count = 0;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
ki->kaio_ballowed_count = max_buf_aio;
|
1997-11-29 01:33:10 +00:00
|
|
|
ki->kaio_buffer_count = 0;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
ki->kaio_buffer_finished_count = 0;
|
|
|
|
ki->kaio_p = p;
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_INIT(&ki->kaio_jobdone);
|
|
|
|
TAILQ_INIT(&ki->kaio_jobqueue);
|
1997-11-29 01:33:10 +00:00
|
|
|
TAILQ_INIT(&ki->kaio_bufdone);
|
|
|
|
TAILQ_INIT(&ki->kaio_bufqueue);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
TAILQ_INIT(&ki->kaio_liojoblist);
|
2000-01-14 02:53:29 +00:00
|
|
|
TAILQ_INIT(&ki->kaio_sockqueue);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
|
|
|
|
while (num_aio_procs < target_aio_procs)
|
|
|
|
aio_newproc();
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Free a job entry. Wait for completion if it is currently active, but don't
|
|
|
|
* delay forever. If we delay, we return a flag that says that we have to
|
|
|
|
* restart the queue scan.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
int
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_free_entry(struct aiocblist *aiocbe)
|
|
|
|
{
|
1997-07-06 02:40:43 +00:00
|
|
|
struct kaioinfo *ki;
|
|
|
|
struct aioproclist *aiop;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
struct aio_liojob *lj;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct proc *p;
|
1997-11-29 01:33:10 +00:00
|
|
|
int error;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
int s;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
if (aiocbe->jobstate == JOBST_NULL)
|
|
|
|
panic("aio_free_entry: freeing already free job");
|
|
|
|
|
|
|
|
p = aiocbe->userproc;
|
|
|
|
ki = p->p_aioinfo;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj = aiocbe->lio;
|
1997-07-06 02:40:43 +00:00
|
|
|
if (ki == NULL)
|
|
|
|
panic("aio_free_entry: missing p->p_aioinfo");
|
|
|
|
|
|
|
|
if (aiocbe->jobstate == JOBST_JOBRUNNING) {
|
|
|
|
if (aiocbe->jobflags & AIOCBLIST_ASYNCFREE)
|
|
|
|
return 0;
|
|
|
|
aiocbe->jobflags |= AIOCBLIST_RUNDOWN;
|
1997-10-09 04:14:41 +00:00
|
|
|
tsleep(aiocbe, PRIBIO|PCATCH, "jobwai", 0);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
aiocbe->jobflags &= ~AIOCBLIST_ASYNCFREE;
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
if (aiocbe->bp == NULL) {
|
|
|
|
if (ki->kaio_queue_count <= 0)
|
|
|
|
panic("aio_free_entry: process queue size <= 0");
|
|
|
|
if (num_queue_count <= 0)
|
|
|
|
panic("aio_free_entry: system wide queue size <= 0");
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if (lj) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj->lioj_queue_count--;
|
|
|
|
if (aiocbe->jobflags & AIOCBLIST_DONE)
|
|
|
|
lj->lioj_queue_finished_count--;
|
|
|
|
}
|
|
|
|
ki->kaio_queue_count--;
|
|
|
|
if (aiocbe->jobflags & AIOCBLIST_DONE)
|
|
|
|
ki->kaio_queue_finished_count--;
|
|
|
|
num_queue_count--;
|
1997-11-29 01:33:10 +00:00
|
|
|
} else {
|
2000-01-14 02:53:29 +00:00
|
|
|
if (lj) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj->lioj_buffer_count--;
|
|
|
|
if (aiocbe->jobflags & AIOCBLIST_DONE)
|
|
|
|
lj->lioj_buffer_finished_count--;
|
|
|
|
}
|
|
|
|
if (aiocbe->jobflags & AIOCBLIST_DONE)
|
|
|
|
ki->kaio_buffer_finished_count--;
|
|
|
|
ki->kaio_buffer_count--;
|
|
|
|
num_buf_aio--;
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
|
|
|
|
2000-04-16 18:53:38 +00:00
|
|
|
/* aiocbe is going away, we need to destroy any knotes */
|
|
|
|
knote_remove(p, &aiocbe->klist);
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags & KAIO_RUNDOWN)
|
|
|
|
&& ((ki->kaio_buffer_count == 0) && (ki->kaio_queue_count == 0)))) {
|
1997-11-29 01:33:10 +00:00
|
|
|
ki->kaio_flags &= ~KAIO_WAKEUP;
|
|
|
|
wakeup(p);
|
|
|
|
}
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if (aiocbe->jobstate == JOBST_JOBQBUF) {
|
1997-11-29 01:33:10 +00:00
|
|
|
if ((error = aio_fphysio(p, aiocbe, 1)) != 0)
|
|
|
|
return error;
|
|
|
|
if (aiocbe->jobstate != JOBST_JOBBFINISHED)
|
|
|
|
panic("aio_free_entry: invalid physio finish-up state");
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
s = splbio();
|
1997-11-29 01:33:10 +00:00
|
|
|
TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist);
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
splx(s);
|
2000-01-14 02:53:29 +00:00
|
|
|
} else if (aiocbe->jobstate == JOBST_JOBQPROC) {
|
1997-07-06 02:40:43 +00:00
|
|
|
aiop = aiocbe->jobaioproc;
|
|
|
|
TAILQ_REMOVE(&aiop->jobtorun, aiocbe, list);
|
2000-01-14 02:53:29 +00:00
|
|
|
} else if (aiocbe->jobstate == JOBST_JOBQGLOBAL)
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_REMOVE(&aio_jobs, aiocbe, list);
|
2000-01-14 02:53:29 +00:00
|
|
|
else if (aiocbe->jobstate == JOBST_JOBFINISHED)
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_REMOVE(&ki->kaio_jobdone, aiocbe, plist);
|
2000-01-14 02:53:29 +00:00
|
|
|
else if (aiocbe->jobstate == JOBST_JOBBFINISHED) {
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
s = splbio();
|
1997-11-29 01:33:10 +00:00
|
|
|
TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist);
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
splx(s);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
if (aiocbe->bp) {
|
|
|
|
vunmapbuf(aiocbe->bp);
|
1999-01-21 08:29:12 +00:00
|
|
|
relpbuf(aiocbe->bp, NULL);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
aiocbe->bp = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (lj && (lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 0)) {
|
|
|
|
TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
|
|
|
|
zfree(aiolio_zone, lj);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list);
|
|
|
|
aiocbe->jobstate = JOBST_NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
2000-07-28 23:10:10 +00:00
|
|
|
#endif /* VFS_AIO */
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Rundown the jobs for a given process.
|
|
|
|
*/
|
|
|
|
void
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_proc_rundown(struct proc *p)
|
|
|
|
{
|
2000-07-28 23:10:10 +00:00
|
|
|
#ifndef VFS_AIO
|
|
|
|
return;
|
|
|
|
#else
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
int s;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct kaioinfo *ki;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
struct aio_liojob *lj, *ljn;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct aiocblist *aiocbe, *aiocbn;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct file *fp;
|
|
|
|
struct filedesc *fdp;
|
|
|
|
struct socket *so;
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
ki = p->p_aioinfo;
|
|
|
|
if (ki == NULL)
|
|
|
|
return;
|
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
ki->kaio_flags |= LIOJ_SIGNAL_POSTED;
|
2000-01-14 02:53:29 +00:00
|
|
|
while ((ki->kaio_active_count > 0) || (ki->kaio_buffer_count >
|
|
|
|
ki->kaio_buffer_finished_count)) {
|
1997-11-29 01:33:10 +00:00
|
|
|
ki->kaio_flags |= KAIO_RUNDOWN;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
if (tsleep(p, PRIBIO, "kaiowt", aiod_timeout))
|
1997-10-09 04:14:41 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/*
|
|
|
|
* Move any aio ops that are waiting on socket I/O to the normal job
|
|
|
|
* queues so they are cleaned up with any others.
|
|
|
|
*/
|
|
|
|
fdp = p->p_fd;
|
|
|
|
|
|
|
|
s = splnet();
|
|
|
|
for (aiocbe = TAILQ_FIRST(&ki->kaio_sockqueue); aiocbe; aiocbe =
|
|
|
|
aiocbn) {
|
|
|
|
aiocbn = TAILQ_NEXT(aiocbe, plist);
|
|
|
|
fp = fdp->fd_ofiles[aiocbe->uaiocb.aio_fildes];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Under some circumstances, the aio_fildes and the file
|
|
|
|
* structure don't match. This would leave aiocbe's in the
|
|
|
|
* TAILQ associated with the socket and cause a panic later.
|
|
|
|
*
|
|
|
|
* Detect and fix.
|
|
|
|
*/
|
|
|
|
if ((fp == NULL) || (fp != aiocbe->fd_file))
|
|
|
|
fp = aiocbe->fd_file;
|
|
|
|
if (fp) {
|
|
|
|
so = (struct socket *)fp->f_data;
|
|
|
|
TAILQ_REMOVE(&so->so_aiojobq, aiocbe, list);
|
|
|
|
if (TAILQ_EMPTY(&so->so_aiojobq)) {
|
|
|
|
so->so_snd.sb_flags &= ~SB_AIO;
|
|
|
|
so->so_rcv.sb_flags &= ~SB_AIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
TAILQ_REMOVE(&ki->kaio_sockqueue, aiocbe, plist);
|
|
|
|
TAILQ_INSERT_HEAD(&aio_jobs, aiocbe, list);
|
|
|
|
TAILQ_INSERT_HEAD(&ki->kaio_jobqueue, aiocbe, plist);
|
|
|
|
}
|
|
|
|
splx(s);
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
restart1:
|
2000-01-14 02:53:29 +00:00
|
|
|
for (aiocbe = TAILQ_FIRST(&ki->kaio_jobdone); aiocbe; aiocbe = aiocbn) {
|
1997-07-06 02:40:43 +00:00
|
|
|
aiocbn = TAILQ_NEXT(aiocbe, plist);
|
|
|
|
if (aio_free_entry(aiocbe))
|
|
|
|
goto restart1;
|
|
|
|
}
|
|
|
|
|
|
|
|
restart2:
|
2000-01-14 02:53:29 +00:00
|
|
|
for (aiocbe = TAILQ_FIRST(&ki->kaio_jobqueue); aiocbe; aiocbe =
|
|
|
|
aiocbn) {
|
1997-07-06 02:40:43 +00:00
|
|
|
aiocbn = TAILQ_NEXT(aiocbe, plist);
|
|
|
|
if (aio_free_entry(aiocbe))
|
|
|
|
goto restart2;
|
|
|
|
}
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
1997-11-30 21:47:36 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Note the use of lots of splbio here, trying to avoid splbio for long chains
|
|
|
|
* of I/O. Probably unnecessary.
|
1997-11-30 21:47:36 +00:00
|
|
|
*/
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
restart3:
|
|
|
|
s = splbio();
|
|
|
|
while (TAILQ_FIRST(&ki->kaio_bufqueue)) {
|
|
|
|
ki->kaio_flags |= KAIO_WAKEUP;
|
2000-01-14 02:53:29 +00:00
|
|
|
tsleep(p, PRIBIO, "aioprn", 0);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
splx(s);
|
|
|
|
goto restart3;
|
|
|
|
}
|
1997-11-30 21:47:36 +00:00
|
|
|
splx(s);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
|
|
|
restart4:
|
|
|
|
s = splbio();
|
2000-01-14 02:53:29 +00:00
|
|
|
for (aiocbe = TAILQ_FIRST(&ki->kaio_bufdone); aiocbe; aiocbe = aiocbn) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
aiocbn = TAILQ_NEXT(aiocbe, plist);
|
|
|
|
if (aio_free_entry(aiocbe)) {
|
|
|
|
splx(s);
|
|
|
|
goto restart4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
splx(s);
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
for (lj = TAILQ_FIRST(&ki->kaio_liojoblist); lj; lj = ljn) {
|
|
|
|
ljn = TAILQ_NEXT(lj, lioj_list);
|
|
|
|
if ((lj->lioj_buffer_count == 0) && (lj->lioj_queue_count ==
|
|
|
|
0)) {
|
|
|
|
TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
|
|
|
|
zfree(aiolio_zone, lj);
|
|
|
|
} else {
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
printf("LIO job not cleaned up: B:%d, BF:%d, Q:%d, "
|
|
|
|
"QF:%d\n", lj->lioj_buffer_count,
|
|
|
|
lj->lioj_buffer_finished_count,
|
|
|
|
lj->lioj_queue_count,
|
|
|
|
lj->lioj_queue_finished_count);
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
#endif
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
zfree(kaio_zone, ki);
|
1997-10-09 04:14:41 +00:00
|
|
|
p->p_aioinfo = NULL;
|
2000-07-28 23:10:10 +00:00
|
|
|
#endif /* VFS_AIO */
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
2000-07-28 23:10:10 +00:00
|
|
|
#ifdef VFS_AIO
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Select a job to run (called by an AIO daemon).
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
static struct aiocblist *
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_selectjob(struct aioproclist *aiop)
|
|
|
|
{
|
2000-01-14 02:53:29 +00:00
|
|
|
int s;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct aiocblist *aiocbe;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct kaioinfo *ki;
|
|
|
|
struct proc *userp;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
aiocbe = TAILQ_FIRST(&aiop->jobtorun);
|
|
|
|
if (aiocbe) {
|
|
|
|
TAILQ_REMOVE(&aiop->jobtorun, aiocbe, list);
|
|
|
|
return aiocbe;
|
|
|
|
}
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
s = splnet();
|
|
|
|
for (aiocbe = TAILQ_FIRST(&aio_jobs); aiocbe; aiocbe =
|
|
|
|
TAILQ_NEXT(aiocbe, list)) {
|
1997-07-06 02:40:43 +00:00
|
|
|
userp = aiocbe->userproc;
|
|
|
|
ki = userp->p_aioinfo;
|
|
|
|
|
|
|
|
if (ki->kaio_active_count < ki->kaio_maxactive_count) {
|
|
|
|
TAILQ_REMOVE(&aio_jobs, aiocbe, list);
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
1997-07-06 02:40:43 +00:00
|
|
|
return aiocbe;
|
|
|
|
}
|
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* The AIO processing activity. This is the code that does the I/O request for
|
|
|
|
* the non-physio version of the operations. The normal vn operations are used,
|
|
|
|
* and this code should work in all instances for every type of file, including
|
|
|
|
* pipes, sockets, fifos, and regular files.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
void
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_process(struct aiocblist *aiocbe)
|
|
|
|
{
|
1997-07-06 02:40:43 +00:00
|
|
|
struct filedesc *fdp;
|
1997-11-29 01:33:10 +00:00
|
|
|
struct proc *userp, *mycp;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct aiocb *cb;
|
|
|
|
struct file *fp;
|
|
|
|
struct uio auio;
|
|
|
|
struct iovec aiov;
|
|
|
|
unsigned int fd;
|
|
|
|
int cnt;
|
|
|
|
int error;
|
1997-10-09 04:14:41 +00:00
|
|
|
off_t offset;
|
1997-11-29 01:33:10 +00:00
|
|
|
int oublock_st, oublock_end;
|
|
|
|
int inblock_st, inblock_end;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
userp = aiocbe->userproc;
|
|
|
|
cb = &aiocbe->uaiocb;
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
mycp = curproc;
|
|
|
|
|
|
|
|
fdp = mycp->p_fd;
|
1997-07-06 02:40:43 +00:00
|
|
|
fd = cb->aio_fildes;
|
|
|
|
fp = fdp->fd_ofiles[fd];
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((fp == NULL) || (fp != aiocbe->fd_file)) {
|
|
|
|
cb->_aiocb_private.error = EBADF;
|
|
|
|
cb->_aiocb_private.status = -1;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
aiov.iov_base = (void *)cb->aio_buf;
|
1997-07-06 02:40:43 +00:00
|
|
|
aiov.iov_len = cb->aio_nbytes;
|
|
|
|
|
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
1997-10-09 04:14:41 +00:00
|
|
|
auio.uio_offset = offset = cb->aio_offset;
|
1997-07-06 02:40:43 +00:00
|
|
|
auio.uio_resid = cb->aio_nbytes;
|
|
|
|
cnt = cb->aio_nbytes;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
1997-11-29 01:33:10 +00:00
|
|
|
auio.uio_procp = mycp;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
inblock_st = mycp->p_stats->p_ru.ru_inblock;
|
|
|
|
oublock_st = mycp->p_stats->p_ru.ru_oublock;
|
1997-07-06 02:40:43 +00:00
|
|
|
if (cb->aio_lio_opcode == LIO_READ) {
|
|
|
|
auio.uio_rw = UIO_READ;
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, mycp);
|
1997-07-06 02:40:43 +00:00
|
|
|
} else {
|
|
|
|
auio.uio_rw = UIO_WRITE;
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, mycp);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
inblock_end = mycp->p_stats->p_ru.ru_inblock;
|
|
|
|
oublock_end = mycp->p_stats->p_ru.ru_oublock;
|
|
|
|
|
|
|
|
aiocbe->inputcharge = inblock_end - inblock_st;
|
|
|
|
aiocbe->outputcharge = oublock_end - oublock_st;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((error) && (auio.uio_resid != cnt)) {
|
|
|
|
if (error == ERESTART || error == EINTR || error == EWOULDBLOCK)
|
|
|
|
error = 0;
|
|
|
|
if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE))
|
|
|
|
psignal(userp, SIGPIPE);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cnt -= auio.uio_resid;
|
|
|
|
cb->_aiocb_private.error = error;
|
|
|
|
cb->_aiocb_private.status = cnt;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
return;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
* The AIO daemon, most of the actual work is done in aio_process,
|
|
|
|
* but the setup (and address space mgmt) is done in this routine.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
static void
|
1999-07-01 13:21:46 +00:00
|
|
|
aio_daemon(void *uproc)
|
1997-07-06 02:40:43 +00:00
|
|
|
{
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
int s;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct aio_liojob *lj;
|
|
|
|
struct aiocb *cb;
|
|
|
|
struct aiocblist *aiocbe;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct aioproclist *aiop;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct kaioinfo *ki;
|
|
|
|
struct proc *curcp, *mycp, *userp;
|
|
|
|
struct vmspace *myvm, *tmpvm;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-09-11 04:06:48 +00:00
|
|
|
mtx_enter(&Giant, MTX_DEF);
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
1997-11-29 01:33:10 +00:00
|
|
|
* Local copies of curproc (cp) and vmspace (myvm)
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
1997-11-29 01:33:10 +00:00
|
|
|
mycp = curproc;
|
|
|
|
myvm = mycp->p_vmspace;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
if (mycp->p_textvp) {
|
|
|
|
vrele(mycp->p_textvp);
|
|
|
|
mycp->p_textvp = NULL;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Allocate and ready the aio control info. There is one aiop structure
|
|
|
|
* per daemon.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
aiop = zalloc(aiop_zone);
|
|
|
|
aiop->aioproc = mycp;
|
|
|
|
aiop->aioprocflags |= AIOP_FREE;
|
|
|
|
TAILQ_INIT(&aiop->jobtorun);
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
s = splnet();
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Place thread (lightweight process) onto the AIO free thread list.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
if (TAILQ_EMPTY(&aio_freeproc))
|
|
|
|
wakeup(&aio_freeproc);
|
|
|
|
TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
|
|
|
|
|
|
|
/* Make up a name for the daemon. */
|
1997-11-29 01:33:10 +00:00
|
|
|
strcpy(mycp->p_comm, "aiod");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get rid of our current filedescriptors. AIOD's don't need any
|
|
|
|
* filedescriptors, except as temporarily inherited from the client.
|
2000-01-14 02:53:29 +00:00
|
|
|
* Credentials are also cloned, and made equivalent to "root".
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
fdfree(mycp);
|
|
|
|
mycp->p_fd = NULL;
|
|
|
|
mycp->p_ucred = crcopy(mycp->p_ucred);
|
|
|
|
mycp->p_ucred->cr_uid = 0;
|
2000-09-05 22:11:13 +00:00
|
|
|
uifree(mycp->p_ucred->cr_uidinfo);
|
|
|
|
mycp->p_ucred->cr_uidinfo = uifind(0);
|
1997-11-29 01:33:10 +00:00
|
|
|
mycp->p_ucred->cr_ngroups = 1;
|
|
|
|
mycp->p_ucred->cr_groups[0] = 1;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* The daemon resides in its own pgrp. */
|
1997-11-29 01:33:10 +00:00
|
|
|
enterpgrp(mycp, mycp->p_pid, 1);
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Mark special process type. */
|
|
|
|
mycp->p_flag |= P_SYSTEM | P_KTHREADP;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
|
|
|
* Wakeup parent process. (Parent sleeps to keep from blasting away
|
|
|
|
* creating to many daemons.)
|
|
|
|
*/
|
|
|
|
wakeup(mycp);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
for (;;) {
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
|
|
|
* curcp is the current daemon process context.
|
|
|
|
* userp is the current user process context.
|
|
|
|
*/
|
|
|
|
curcp = mycp;
|
1997-10-11 01:07:03 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
|
|
|
* Take daemon off of free queue
|
|
|
|
*/
|
1997-07-06 02:40:43 +00:00
|
|
|
if (aiop->aioprocflags & AIOP_FREE) {
|
2000-01-14 02:53:29 +00:00
|
|
|
s = splnet();
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_REMOVE(&aio_freeproc, aiop, list);
|
|
|
|
TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list);
|
|
|
|
aiop->aioprocflags &= ~AIOP_FREE;
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
aiop->aioprocflags &= ~AIOP_SCHED;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Check for jobs.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
1999-01-27 21:50:00 +00:00
|
|
|
while ((aiocbe = aio_selectjob(aiop)) != NULL) {
|
1997-07-06 02:40:43 +00:00
|
|
|
cb = &aiocbe->uaiocb;
|
|
|
|
userp = aiocbe->userproc;
|
|
|
|
|
|
|
|
aiocbe->jobstate = JOBST_JOBRUNNING;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Connect to process address space for user program.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
if (userp != curcp) {
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Save the current address space that we are
|
|
|
|
* connected to.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
tmpvm = mycp->p_vmspace;
|
2000-01-14 02:53:29 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Point to the new user address space, and
|
|
|
|
* refer to it.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
mycp->p_vmspace = userp->p_vmspace;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
mycp->p_vmspace->vm_refcnt++;
|
2000-01-14 02:53:29 +00:00
|
|
|
|
|
|
|
/* Activate the new mapping. */
|
1997-11-29 01:33:10 +00:00
|
|
|
pmap_activate(mycp);
|
2000-01-14 02:53:29 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* If the old address space wasn't the daemons
|
|
|
|
* own address space, then we need to remove the
|
|
|
|
* daemon's reference from the other process
|
|
|
|
* that it was acting on behalf of.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
1997-07-06 02:40:43 +00:00
|
|
|
if (tmpvm != myvm) {
|
|
|
|
vmspace_free(tmpvm);
|
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Disassociate from previous clients file
|
|
|
|
* descriptors, and associate to the new clients
|
|
|
|
* descriptors. Note that the daemon doesn't
|
|
|
|
* need to worry about its orginal descriptors,
|
|
|
|
* because they were originally freed.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
if (mycp->p_fd)
|
|
|
|
fdfree(mycp);
|
|
|
|
mycp->p_fd = fdshare(userp);
|
|
|
|
curcp = userp;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
ki = userp->p_aioinfo;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj = aiocbe->lio;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Account for currently active jobs. */
|
1997-07-06 02:40:43 +00:00
|
|
|
ki->kaio_active_count++;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Do the I/O function. */
|
1997-07-06 02:40:43 +00:00
|
|
|
aiocbe->jobaioproc = aiop;
|
|
|
|
aio_process(aiocbe);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Decrement the active job count. */
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
ki->kaio_active_count--;
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Increment the completion count for wakeup/signal
|
|
|
|
* comparisons.
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
*/
|
|
|
|
aiocbe->jobflags |= AIOCBLIST_DONE;
|
|
|
|
ki->kaio_queue_finished_count++;
|
2000-01-14 02:53:29 +00:00
|
|
|
if (lj)
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj->lioj_queue_finished_count++;
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags
|
|
|
|
& KAIO_RUNDOWN) && (ki->kaio_active_count == 0))) {
|
1997-11-29 01:33:10 +00:00
|
|
|
ki->kaio_flags &= ~KAIO_WAKEUP;
|
|
|
|
wakeup(userp);
|
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
s = splbio();
|
2000-01-14 02:53:29 +00:00
|
|
|
if (lj && (lj->lioj_flags &
|
|
|
|
(LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == LIOJ_SIGNAL) {
|
|
|
|
if ((lj->lioj_queue_finished_count ==
|
|
|
|
lj->lioj_queue_count) &&
|
|
|
|
(lj->lioj_buffer_finished_count ==
|
|
|
|
lj->lioj_buffer_count)) {
|
|
|
|
psignal(userp,
|
|
|
|
lj->lioj_signal.sigev_signo);
|
|
|
|
lj->lioj_flags |=
|
|
|
|
LIOJ_SIGNAL_POSTED;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
splx(s);
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
aiocbe->jobstate = JOBST_JOBFINISHED;
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* If the I/O request should be automatically rundown,
|
|
|
|
* do the needed cleanup. Otherwise, place the queue
|
|
|
|
* entry for the just finished I/O request into the done
|
|
|
|
* queue for the associated client.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
2000-01-14 02:53:29 +00:00
|
|
|
s = splnet();
|
1997-07-06 02:40:43 +00:00
|
|
|
if (aiocbe->jobflags & AIOCBLIST_ASYNCFREE) {
|
|
|
|
aiocbe->jobflags &= ~AIOCBLIST_ASYNCFREE;
|
|
|
|
TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list);
|
|
|
|
} else {
|
2000-01-14 02:53:29 +00:00
|
|
|
TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist);
|
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_jobdone, aiocbe,
|
|
|
|
plist);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
2000-04-16 18:53:38 +00:00
|
|
|
KNOTE(&aiocbe->klist, 0);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
if (aiocbe->jobflags & AIOCBLIST_RUNDOWN) {
|
|
|
|
wakeup(aiocbe);
|
|
|
|
aiocbe->jobflags &= ~AIOCBLIST_RUNDOWN;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) {
|
|
|
|
psignal(userp, cb->aio_sigevent.sigev_signo);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Disconnect from user address space.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
if (curcp != mycp) {
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Get the user address space to disconnect from. */
|
1997-11-29 01:33:10 +00:00
|
|
|
tmpvm = mycp->p_vmspace;
|
2000-01-14 02:53:29 +00:00
|
|
|
|
|
|
|
/* Get original address space for daemon. */
|
1997-11-29 01:33:10 +00:00
|
|
|
mycp->p_vmspace = myvm;
|
2000-01-14 02:53:29 +00:00
|
|
|
|
|
|
|
/* Activate the daemon's address space. */
|
1997-11-29 01:33:10 +00:00
|
|
|
pmap_activate(mycp);
|
2000-01-14 02:53:29 +00:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (tmpvm == myvm) {
|
|
|
|
printf("AIOD: vmspace problem -- %d\n",
|
|
|
|
mycp->p_pid);
|
|
|
|
}
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
#endif
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Remove our vmspace reference. */
|
1997-07-06 02:40:43 +00:00
|
|
|
vmspace_free(tmpvm);
|
2000-01-14 02:53:29 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Disassociate from the user process's file
|
|
|
|
* descriptors.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
if (mycp->p_fd)
|
|
|
|
fdfree(mycp);
|
|
|
|
mycp->p_fd = NULL;
|
|
|
|
curcp = mycp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are the first to be put onto the free queue, wakeup
|
|
|
|
* anyone waiting for a daemon.
|
|
|
|
*/
|
2000-01-14 02:53:29 +00:00
|
|
|
s = splnet();
|
1997-11-29 01:33:10 +00:00
|
|
|
TAILQ_REMOVE(&aio_activeproc, aiop, list);
|
|
|
|
if (TAILQ_EMPTY(&aio_freeproc))
|
|
|
|
wakeup(&aio_freeproc);
|
|
|
|
TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
|
|
|
|
aiop->aioprocflags |= AIOP_FREE;
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* If daemon is inactive for a long time, allow it to exit,
|
|
|
|
* thereby freeing resources.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
2000-01-14 02:53:29 +00:00
|
|
|
if (((aiop->aioprocflags & AIOP_SCHED) == 0) && tsleep(mycp,
|
|
|
|
PRIBIO, "aiordy", aiod_lifetime)) {
|
|
|
|
s = splnet();
|
1997-11-29 01:33:10 +00:00
|
|
|
if ((TAILQ_FIRST(&aio_jobs) == NULL) &&
|
2000-01-14 02:53:29 +00:00
|
|
|
(TAILQ_FIRST(&aiop->jobtorun) == NULL)) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
if ((aiop->aioprocflags & AIOP_FREE) &&
|
2000-01-14 02:53:29 +00:00
|
|
|
(num_aio_procs > target_aio_procs)) {
|
1997-11-29 01:33:10 +00:00
|
|
|
TAILQ_REMOVE(&aio_freeproc, aiop, list);
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
1997-11-29 01:33:10 +00:00
|
|
|
zfree(aiop_zone, aiop);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
num_aio_procs--;
|
2000-01-14 02:53:29 +00:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (mycp->p_vmspace->vm_refcnt <= 1) {
|
|
|
|
printf("AIOD: bad vm refcnt for"
|
|
|
|
" exiting daemon: %d\n",
|
|
|
|
mycp->p_vmspace->vm_refcnt);
|
|
|
|
}
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
#endif
|
1997-11-29 01:33:10 +00:00
|
|
|
exit1(mycp, 0);
|
|
|
|
}
|
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
|
|
|
|
* AIO daemon modifies its environment itself.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
static int
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_newproc()
|
|
|
|
{
|
1997-07-06 02:40:43 +00:00
|
|
|
int error;
|
1997-11-29 01:33:10 +00:00
|
|
|
struct proc *p, *np;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
1999-04-28 01:04:33 +00:00
|
|
|
p = &proc0;
|
1999-06-30 15:33:41 +00:00
|
|
|
error = fork1(p, RFPROC|RFMEM|RFNOWAIT, &np);
|
1999-05-06 18:44:42 +00:00
|
|
|
if (error)
|
1997-07-06 02:40:43 +00:00
|
|
|
return error;
|
1999-04-28 01:04:33 +00:00
|
|
|
cpu_set_fork_handler(np, aio_daemon, curproc);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Wait until daemon is started, but continue on just in case to
|
1997-11-29 01:33:10 +00:00
|
|
|
* handle error conditions.
|
|
|
|
*/
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
error = tsleep(np, PZERO, "aiosta", aiod_timeout);
|
|
|
|
num_aio_procs++;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
* Try the high-performance physio method for eligible VCHR devices. This
|
2000-01-14 02:53:29 +00:00
|
|
|
* routine doesn't require the use of any additional threads, and have overhead.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
int
|
2000-01-14 02:53:29 +00:00
|
|
|
aio_qphysio(struct proc *p, struct aiocblist *aiocbe)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct aiocb *cb;
|
|
|
|
struct file *fp;
|
|
|
|
struct buf *bp;
|
|
|
|
struct vnode *vp;
|
|
|
|
struct kaioinfo *ki;
|
|
|
|
struct filedesc *fdp;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
struct aio_liojob *lj;
|
1997-11-29 01:33:10 +00:00
|
|
|
int fd;
|
|
|
|
int s;
|
2000-04-16 18:53:38 +00:00
|
|
|
int cnt, notify;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
cb = &aiocbe->uaiocb;
|
1997-11-29 01:33:10 +00:00
|
|
|
fdp = p->p_fd;
|
|
|
|
fd = cb->aio_fildes;
|
|
|
|
fp = fdp->fd_ofiles[fd];
|
|
|
|
|
1999-11-07 13:09:09 +00:00
|
|
|
if (fp->f_type != DTYPE_VNODE)
|
|
|
|
return (-1);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
vp = (struct vnode *)fp->f_data;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
|
2000-01-17 21:18:39 +00:00
|
|
|
/*
|
|
|
|
* If its not a disk, we don't want to return a positive error.
|
|
|
|
* It causes the aio code to not fall through to try the thread
|
|
|
|
* way when you're talking to a regular file.
|
|
|
|
*/
|
|
|
|
if (!vn_isdisk(vp, &error)) {
|
|
|
|
if (error == ENOTBLK)
|
|
|
|
return (-1);
|
|
|
|
else
|
|
|
|
return (error);
|
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
|
1999-11-07 13:09:09 +00:00
|
|
|
if (cb->aio_nbytes % vp->v_rdev->si_bsize_phys)
|
|
|
|
return (-1);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
1999-11-07 13:09:09 +00:00
|
|
|
if ((cb->aio_nbytes > MAXPHYS) && (num_buf_aio >= max_buf_aio))
|
|
|
|
return (-1);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
ki = p->p_aioinfo;
|
1999-11-07 13:09:09 +00:00
|
|
|
if (ki->kaio_buffer_count >= ki->kaio_ballowed_count)
|
|
|
|
return (-1);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
cnt = cb->aio_nbytes;
|
1999-11-07 13:09:09 +00:00
|
|
|
if (cnt > MAXPHYS)
|
|
|
|
return (-1);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Physical I/O is charged directly to the process, so we don't have to
|
|
|
|
* fake it.
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
*/
|
|
|
|
aiocbe->inputcharge = 0;
|
|
|
|
aiocbe->outputcharge = 0;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
ki->kaio_buffer_count++;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
|
|
|
|
lj = aiocbe->lio;
|
2000-01-14 02:53:29 +00:00
|
|
|
if (lj)
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj->lioj_buffer_count++;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Create and build a buffer header for a transfer. */
|
1999-01-21 08:29:12 +00:00
|
|
|
bp = (struct buf *)getpbuf(NULL);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Get a copy of the kva from the physical buffer.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
1999-05-06 20:00:34 +00:00
|
|
|
bp->b_caller1 = p;
|
1999-08-14 11:40:51 +00:00
|
|
|
bp->b_dev = vp->v_rdev;
|
1997-11-29 01:33:10 +00:00
|
|
|
error = bp->b_error = 0;
|
|
|
|
|
|
|
|
bp->b_bcount = cb->aio_nbytes;
|
|
|
|
bp->b_bufsize = cb->aio_nbytes;
|
2000-03-20 10:44:49 +00:00
|
|
|
bp->b_flags = B_PHYS;
|
1997-11-29 01:33:10 +00:00
|
|
|
bp->b_iodone = aio_physwakeup;
|
|
|
|
bp->b_saveaddr = bp->b_data;
|
2000-01-14 02:53:29 +00:00
|
|
|
bp->b_data = (void *)cb->aio_buf;
|
1997-11-29 01:33:10 +00:00
|
|
|
bp->b_blkno = btodb(cb->aio_offset);
|
|
|
|
|
1999-10-30 06:32:05 +00:00
|
|
|
if (cb->aio_lio_opcode == LIO_WRITE) {
|
2000-03-20 10:44:49 +00:00
|
|
|
bp->b_iocmd = BIO_WRITE;
|
1999-10-30 06:32:05 +00:00
|
|
|
if (!useracc(bp->b_data, bp->b_bufsize, VM_PROT_READ)) {
|
|
|
|
error = EFAULT;
|
|
|
|
goto doerror;
|
|
|
|
}
|
|
|
|
} else {
|
2000-03-20 10:44:49 +00:00
|
|
|
bp->b_iocmd = BIO_READ;
|
1999-10-30 06:32:05 +00:00
|
|
|
if (!useracc(bp->b_data, bp->b_bufsize, VM_PROT_WRITE)) {
|
|
|
|
error = EFAULT;
|
|
|
|
goto doerror;
|
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Bring buffer into kernel space. */
|
1997-11-29 01:33:10 +00:00
|
|
|
vmapbuf(bp);
|
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
s = splbio();
|
1997-11-29 01:33:10 +00:00
|
|
|
aiocbe->bp = bp;
|
|
|
|
bp->b_spc = (void *)aiocbe;
|
|
|
|
TAILQ_INSERT_TAIL(&aio_bufjobs, aiocbe, list);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist);
|
1997-11-29 01:33:10 +00:00
|
|
|
aiocbe->jobstate = JOBST_JOBQBUF;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
cb->_aiocb_private.status = cb->aio_nbytes;
|
|
|
|
num_buf_aio++;
|
1997-11-29 01:33:10 +00:00
|
|
|
bp->b_error = 0;
|
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
splx(s);
|
2000-01-14 02:53:29 +00:00
|
|
|
|
|
|
|
/* Perform transfer. */
|
2000-03-20 11:29:10 +00:00
|
|
|
DEV_STRATEGY(bp, 0);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2000-04-16 18:53:38 +00:00
|
|
|
notify = 0;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
s = splbio();
|
2000-01-14 02:53:29 +00:00
|
|
|
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
/*
|
|
|
|
* If we had an error invoking the request, or an error in processing
|
2000-01-14 02:53:29 +00:00
|
|
|
* the request before we have returned, we process it as an error in
|
|
|
|
* transfer. Note that such an I/O error is not indicated immediately,
|
|
|
|
* but is returned using the aio_error mechanism. In this case,
|
|
|
|
* aio_suspend will return immediately.
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
*/
|
2000-04-02 15:24:56 +00:00
|
|
|
if (bp->b_error || (bp->b_ioflags & BIO_ERROR)) {
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
struct aiocb *job = aiocbe->uuaiocb;
|
|
|
|
|
|
|
|
aiocbe->uaiocb._aiocb_private.status = 0;
|
|
|
|
suword(&job->_aiocb_private.status, 0);
|
|
|
|
aiocbe->uaiocb._aiocb_private.error = bp->b_error;
|
|
|
|
suword(&job->_aiocb_private.error, bp->b_error);
|
|
|
|
|
|
|
|
ki->kaio_buffer_finished_count++;
|
|
|
|
|
|
|
|
if (aiocbe->jobstate != JOBST_JOBBFINISHED) {
|
|
|
|
aiocbe->jobstate = JOBST_JOBBFINISHED;
|
|
|
|
aiocbe->jobflags |= AIOCBLIST_DONE;
|
|
|
|
TAILQ_REMOVE(&aio_bufjobs, aiocbe, list);
|
|
|
|
TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist);
|
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist);
|
2000-04-16 18:53:38 +00:00
|
|
|
notify = 1;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
splx(s);
|
2000-04-16 18:53:38 +00:00
|
|
|
if (notify)
|
|
|
|
KNOTE(&aiocbe->klist, 0);
|
1997-11-29 01:33:10 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
doerror:
|
|
|
|
ki->kaio_buffer_count--;
|
2000-01-14 02:53:29 +00:00
|
|
|
if (lj)
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj->lioj_buffer_count--;
|
|
|
|
aiocbe->bp = NULL;
|
1999-01-21 08:29:12 +00:00
|
|
|
relpbuf(bp, NULL);
|
1997-11-29 01:33:10 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
/*
|
|
|
|
* This waits/tests physio completion.
|
|
|
|
*/
|
1997-11-29 01:33:10 +00:00
|
|
|
int
|
2000-01-14 02:53:29 +00:00
|
|
|
aio_fphysio(struct proc *p, struct aiocblist *iocb, int flgwait)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
|
|
|
int s;
|
|
|
|
struct buf *bp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
bp = iocb->bp;
|
|
|
|
|
|
|
|
s = splbio();
|
|
|
|
if (flgwait == 0) {
|
|
|
|
if ((bp->b_flags & B_DONE) == 0) {
|
|
|
|
splx(s);
|
|
|
|
return EINPROGRESS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while ((bp->b_flags & B_DONE) == 0) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
if (tsleep((caddr_t)bp, PRIBIO, "physstr", aiod_timeout)) {
|
1997-11-29 01:33:10 +00:00
|
|
|
if ((bp->b_flags & B_DONE) == 0) {
|
|
|
|
splx(s);
|
|
|
|
return EINPROGRESS;
|
2000-01-14 02:53:29 +00:00
|
|
|
} else
|
1997-11-29 01:33:10 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Release mapping into kernel space. */
|
1997-11-29 01:33:10 +00:00
|
|
|
vunmapbuf(bp);
|
|
|
|
iocb->bp = 0;
|
|
|
|
|
|
|
|
error = 0;
|
2000-01-14 02:53:29 +00:00
|
|
|
|
|
|
|
/* Check for an error. */
|
2000-04-02 15:24:56 +00:00
|
|
|
if (bp->b_ioflags & BIO_ERROR)
|
1997-11-29 01:33:10 +00:00
|
|
|
error = bp->b_error;
|
|
|
|
|
1999-01-21 08:29:12 +00:00
|
|
|
relpbuf(bp, NULL);
|
1997-11-29 01:33:10 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2000-07-28 23:10:10 +00:00
|
|
|
#endif /* VFS_AIO */
|
1997-11-29 01:33:10 +00:00
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Wake up aio requests that may be serviceable now.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
aio_swake(struct socket *so, struct sockbuf *sb)
|
|
|
|
{
|
2000-07-28 23:10:10 +00:00
|
|
|
#ifndef VFS_AIO
|
|
|
|
return;
|
|
|
|
#else
|
2000-01-14 02:53:29 +00:00
|
|
|
struct aiocblist *cb,*cbn;
|
|
|
|
struct proc *p;
|
|
|
|
struct kaioinfo *ki = NULL;
|
|
|
|
int opcode, wakecount = 0;
|
|
|
|
struct aioproclist *aiop;
|
|
|
|
|
|
|
|
if (sb == &so->so_snd) {
|
|
|
|
opcode = LIO_WRITE;
|
|
|
|
so->so_snd.sb_flags &= ~SB_AIO;
|
|
|
|
} else {
|
|
|
|
opcode = LIO_READ;
|
|
|
|
so->so_rcv.sb_flags &= ~SB_AIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (cb = TAILQ_FIRST(&so->so_aiojobq); cb; cb = cbn) {
|
|
|
|
cbn = TAILQ_NEXT(cb, list);
|
|
|
|
if (opcode == cb->uaiocb.aio_lio_opcode) {
|
|
|
|
p = cb->userproc;
|
|
|
|
ki = p->p_aioinfo;
|
|
|
|
TAILQ_REMOVE(&so->so_aiojobq, cb, list);
|
|
|
|
TAILQ_REMOVE(&ki->kaio_sockqueue, cb, plist);
|
|
|
|
TAILQ_INSERT_TAIL(&aio_jobs, cb, list);
|
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, cb, plist);
|
|
|
|
wakecount++;
|
|
|
|
if (cb->jobstate != JOBST_JOBQGLOBAL)
|
|
|
|
panic("invalid queue value");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while (wakecount--) {
|
|
|
|
if ((aiop = TAILQ_FIRST(&aio_freeproc)) != 0) {
|
|
|
|
TAILQ_REMOVE(&aio_freeproc, aiop, list);
|
|
|
|
TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list);
|
|
|
|
aiop->aioprocflags &= ~AIOP_FREE;
|
|
|
|
wakeup(aiop->aioproc);
|
|
|
|
}
|
|
|
|
}
|
2000-07-28 23:10:10 +00:00
|
|
|
#endif /* VFS_AIO */
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
|
|
|
|
2000-07-28 23:10:10 +00:00
|
|
|
#ifdef VFS_AIO
|
2000-01-14 02:53:29 +00:00
|
|
|
/*
|
|
|
|
* Queue a new AIO request. Choosing either the threaded or direct physio VCHR
|
|
|
|
* technique is done in this code.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
static int
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
_aio_aqueue(struct proc *p, struct aiocb *job, struct aio_liojob *lj, int type)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
1997-07-06 02:40:43 +00:00
|
|
|
struct filedesc *fdp;
|
|
|
|
struct file *fp;
|
|
|
|
unsigned int fd;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct socket *so;
|
|
|
|
int s;
|
2000-04-16 18:53:38 +00:00
|
|
|
int error = 0;
|
1997-07-06 02:40:43 +00:00
|
|
|
int opcode;
|
|
|
|
struct aiocblist *aiocbe;
|
|
|
|
struct aioproclist *aiop;
|
|
|
|
struct kaioinfo *ki;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((aiocbe = TAILQ_FIRST(&aio_freejobs)) != NULL)
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_REMOVE(&aio_freejobs, aiocbe, list);
|
2000-01-14 02:53:29 +00:00
|
|
|
else
|
1997-11-29 01:33:10 +00:00
|
|
|
aiocbe = zalloc (aiocb_zone);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
aiocbe->inputcharge = 0;
|
|
|
|
aiocbe->outputcharge = 0;
|
2000-04-16 18:53:38 +00:00
|
|
|
SLIST_INIT(&aiocbe->klist);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
suword(&job->_aiocb_private.status, -1);
|
|
|
|
suword(&job->_aiocb_private.error, 0);
|
|
|
|
suword(&job->_aiocb_private.kernelinfo, -1);
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
error = copyin((caddr_t)job, (caddr_t) &aiocbe->uaiocb, sizeof
|
|
|
|
aiocbe->uaiocb);
|
1997-07-06 02:40:43 +00:00
|
|
|
if (error) {
|
1997-11-29 01:33:10 +00:00
|
|
|
suword(&job->_aiocb_private.error, error);
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Save userspace address of the job info. */
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
aiocbe->uuaiocb = job;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Get the opcode. */
|
|
|
|
if (type != LIO_NOP)
|
1997-10-09 04:14:41 +00:00
|
|
|
aiocbe->uaiocb.aio_lio_opcode = type;
|
|
|
|
opcode = aiocbe->uaiocb.aio_lio_opcode;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Get the fd info for process. */
|
1997-07-06 02:40:43 +00:00
|
|
|
fdp = p->p_fd;
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Range check file descriptor.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
fd = aiocbe->uaiocb.aio_fildes;
|
|
|
|
if (fd >= fdp->fd_nfiles) {
|
|
|
|
TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list);
|
2000-01-14 02:53:29 +00:00
|
|
|
if (type == 0)
|
1997-07-06 02:40:43 +00:00
|
|
|
suword(&job->_aiocb_private.error, EBADF);
|
|
|
|
return EBADF;
|
|
|
|
}
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
fp = aiocbe->fd_file = fdp->fd_ofiles[fd];
|
|
|
|
if ((fp == NULL) || ((opcode == LIO_WRITE) && ((fp->f_flag & FWRITE) ==
|
|
|
|
0))) {
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list);
|
2000-01-14 02:53:29 +00:00
|
|
|
if (type == 0)
|
1997-07-06 02:40:43 +00:00
|
|
|
suword(&job->_aiocb_private.error, EBADF);
|
|
|
|
return EBADF;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (aiocbe->uaiocb.aio_offset == -1LL) {
|
|
|
|
TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list);
|
2000-01-14 02:53:29 +00:00
|
|
|
if (type == 0)
|
1997-07-06 02:40:43 +00:00
|
|
|
suword(&job->_aiocb_private.error, EINVAL);
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = suword(&job->_aiocb_private.kernelinfo, jobrefid);
|
|
|
|
if (error) {
|
|
|
|
TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list);
|
2000-01-14 02:53:29 +00:00
|
|
|
if (type == 0)
|
1997-07-06 02:40:43 +00:00
|
|
|
suword(&job->_aiocb_private.error, EINVAL);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
1998-07-15 06:51:14 +00:00
|
|
|
aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jobrefid;
|
1998-08-17 17:28:10 +00:00
|
|
|
if (jobrefid == LONG_MAX)
|
1997-11-29 01:33:10 +00:00
|
|
|
jobrefid = 1;
|
1998-08-17 17:28:10 +00:00
|
|
|
else
|
|
|
|
jobrefid++;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
if (opcode == LIO_NOP) {
|
|
|
|
TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list);
|
|
|
|
if (type == 0) {
|
|
|
|
suword(&job->_aiocb_private.error, 0);
|
1997-11-29 01:33:10 +00:00
|
|
|
suword(&job->_aiocb_private.status, 0);
|
|
|
|
suword(&job->_aiocb_private.kernelinfo, 0);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
if ((opcode != LIO_READ) && (opcode != LIO_WRITE)) {
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list);
|
|
|
|
if (type == 0) {
|
1997-11-29 01:33:10 +00:00
|
|
|
suword(&job->_aiocb_private.status, 0);
|
1997-07-06 02:40:43 +00:00
|
|
|
suword(&job->_aiocb_private.error, EINVAL);
|
|
|
|
}
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2000-04-16 18:53:38 +00:00
|
|
|
/*
|
|
|
|
* XXX
|
|
|
|
* Figure out how to do this properly. This currently won't
|
|
|
|
* work on the alpha, since we're passing in a pointer via
|
|
|
|
* aio_lio_opcode, which is an int.
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
struct kevent kev, *kevp;
|
|
|
|
struct kqueue *kq;
|
|
|
|
|
|
|
|
kevp = (struct kevent *)job->aio_lio_opcode;
|
|
|
|
if (kevp == NULL)
|
|
|
|
goto no_kqueue;
|
|
|
|
|
|
|
|
error = copyin((caddr_t)kevp, (caddr_t)&kev, sizeof(kev));
|
|
|
|
if (error)
|
|
|
|
goto aqueue_fail;
|
|
|
|
|
|
|
|
if ((u_int)kev.ident >= fdp->fd_nfiles ||
|
|
|
|
(fp = fdp->fd_ofiles[kev.ident]) == NULL ||
|
|
|
|
(fp->f_type != DTYPE_KQUEUE)) {
|
|
|
|
error = EBADF;
|
|
|
|
goto aqueue_fail;
|
|
|
|
}
|
|
|
|
kq = (struct kqueue *)fp->f_data;
|
|
|
|
kev.ident = (u_long)aiocbe;
|
|
|
|
kev.filter = EVFILT_AIO;
|
|
|
|
kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
|
|
|
|
error = kqueue_register(kq, &kev, p);
|
|
|
|
aqueue_fail:
|
|
|
|
if (error) {
|
|
|
|
TAILQ_INSERT_HEAD(&aio_freejobs, aiocbe, list);
|
|
|
|
if (type == 0)
|
|
|
|
suword(&job->_aiocb_private.error, error);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
no_kqueue:
|
|
|
|
}
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
suword(&job->_aiocb_private.error, EINPROGRESS);
|
|
|
|
aiocbe->uaiocb._aiocb_private.error = EINPROGRESS;
|
1997-07-06 02:40:43 +00:00
|
|
|
aiocbe->userproc = p;
|
|
|
|
aiocbe->jobflags = 0;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
aiocbe->lio = lj;
|
|
|
|
ki = p->p_aioinfo;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if (fp->f_type == DTYPE_SOCKET) {
|
|
|
|
/*
|
|
|
|
* Alternate queueing for socket ops: Reach down into the
|
|
|
|
* descriptor to get the socket data. Then check to see if the
|
|
|
|
* socket is ready to be read or written (based on the requested
|
|
|
|
* operation).
|
|
|
|
*
|
|
|
|
* If it is not ready for io, then queue the aiocbe on the
|
|
|
|
* socket, and set the flags so we get a call when sbnotify()
|
|
|
|
* happens.
|
|
|
|
*/
|
|
|
|
so = (struct socket *)fp->f_data;
|
|
|
|
s = splnet();
|
|
|
|
if (((opcode == LIO_READ) && (!soreadable(so))) || ((opcode ==
|
|
|
|
LIO_WRITE) && (!sowriteable(so)))) {
|
|
|
|
TAILQ_INSERT_TAIL(&so->so_aiojobq, aiocbe, list);
|
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_sockqueue, aiocbe, plist);
|
|
|
|
if (opcode == LIO_READ)
|
|
|
|
so->so_rcv.sb_flags |= SB_AIO;
|
|
|
|
else
|
|
|
|
so->so_snd.sb_flags |= SB_AIO;
|
|
|
|
aiocbe->jobstate = JOBST_JOBQGLOBAL; /* XXX */
|
|
|
|
ki->kaio_queue_count++;
|
|
|
|
num_queue_count++;
|
|
|
|
splx(s);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((error = aio_qphysio(p, aiocbe)) == 0)
|
1997-11-29 01:33:10 +00:00
|
|
|
return 0;
|
2000-01-14 02:53:29 +00:00
|
|
|
else if (error > 0) {
|
1997-11-29 01:33:10 +00:00
|
|
|
suword(&job->_aiocb_private.status, 0);
|
|
|
|
aiocbe->uaiocb._aiocb_private.error = error;
|
|
|
|
suword(&job->_aiocb_private.error, error);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* No buffer for daemon I/O. */
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
aiocbe->bp = NULL;
|
|
|
|
|
|
|
|
ki->kaio_queue_count++;
|
2000-01-14 02:53:29 +00:00
|
|
|
if (lj)
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj->lioj_queue_count++;
|
2000-01-14 02:53:29 +00:00
|
|
|
s = splnet();
|
1997-11-29 01:33:10 +00:00
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist);
|
|
|
|
TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list);
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
1997-11-29 01:33:10 +00:00
|
|
|
aiocbe->jobstate = JOBST_JOBQGLOBAL;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
num_queue_count++;
|
1997-11-29 01:33:10 +00:00
|
|
|
error = 0;
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* If we don't have a free AIO process, and we are below our quota, then
|
|
|
|
* start one. Otherwise, depend on the subsequent I/O completions to
|
|
|
|
* pick-up this job. If we don't sucessfully create the new process
|
|
|
|
* (thread) due to resource issues, we return an error for now (EAGAIN),
|
|
|
|
* which is likely not the correct thing to do.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
retryproc:
|
2000-01-14 02:53:29 +00:00
|
|
|
s = splnet();
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_REMOVE(&aio_freeproc, aiop, list);
|
|
|
|
TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list);
|
|
|
|
aiop->aioprocflags &= ~AIOP_FREE;
|
|
|
|
wakeup(aiop->aioproc);
|
1997-11-29 01:33:10 +00:00
|
|
|
} else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) &&
|
2000-01-14 02:53:29 +00:00
|
|
|
((ki->kaio_active_count + num_aio_resv_start) <
|
|
|
|
ki->kaio_maxactive_count)) {
|
1997-11-29 01:33:10 +00:00
|
|
|
num_aio_resv_start++;
|
|
|
|
if ((error = aio_newproc()) == 0) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
num_aio_resv_start--;
|
1997-12-01 18:41:08 +00:00
|
|
|
p->p_retval[0] = 0;
|
1997-11-29 01:33:10 +00:00
|
|
|
goto retryproc;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
num_aio_resv_start--;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
1997-11-29 01:33:10 +00:00
|
|
|
return error;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
|
|
|
* This routine queues an AIO request, checking for quotas.
|
|
|
|
*/
|
1997-07-06 02:40:43 +00:00
|
|
|
static int
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_aqueue(struct proc *p, struct aiocb *job, int type)
|
|
|
|
{
|
1997-07-06 02:40:43 +00:00
|
|
|
struct kaioinfo *ki;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if (p->p_aioinfo == NULL)
|
1997-07-06 02:40:43 +00:00
|
|
|
aio_init_aioinfo(p);
|
|
|
|
|
|
|
|
if (num_queue_count >= max_queue_count)
|
|
|
|
return EAGAIN;
|
|
|
|
|
|
|
|
ki = p->p_aioinfo;
|
|
|
|
if (ki->kaio_queue_count >= ki->kaio_qallowed_count)
|
|
|
|
return EAGAIN;
|
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
return _aio_aqueue(p, job, NULL, type);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2000-07-28 23:10:10 +00:00
|
|
|
#endif /* VFS_AIO */
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Support the aio_return system call, as a side-effect, kernel resources are
|
|
|
|
* released.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
int
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_return(struct proc *p, struct aio_return_args *uap)
|
|
|
|
{
|
2000-02-23 07:44:25 +00:00
|
|
|
#ifndef VFS_AIO
|
|
|
|
return ENOSYS;
|
|
|
|
#else
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
int s;
|
1998-10-25 17:44:59 +00:00
|
|
|
int jobref;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
struct aiocblist *cb, *ncb;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
struct aiocb *ujob;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct kaioinfo *ki;
|
|
|
|
|
|
|
|
ki = p->p_aioinfo;
|
2000-01-14 02:53:29 +00:00
|
|
|
if (ki == NULL)
|
1997-07-06 02:40:43 +00:00
|
|
|
return EINVAL;
|
|
|
|
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
ujob = uap->aiocbp;
|
|
|
|
|
|
|
|
jobref = fuword(&ujob->_aiocb_private.kernelinfo);
|
1997-11-29 01:33:10 +00:00
|
|
|
if (jobref == -1 || jobref == 0)
|
1997-07-06 02:40:43 +00:00
|
|
|
return EINVAL;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
s = splnet();
|
|
|
|
for (cb = TAILQ_FIRST(&ki->kaio_jobdone); cb; cb = TAILQ_NEXT(cb,
|
|
|
|
plist)) {
|
|
|
|
if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) ==
|
|
|
|
jobref) {
|
|
|
|
splx(s);
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
if (ujob == cb->uuaiocb) {
|
2000-01-14 02:53:29 +00:00
|
|
|
p->p_retval[0] =
|
|
|
|
cb->uaiocb._aiocb_private.status;
|
|
|
|
} else
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
p->p_retval[0] = EFAULT;
|
1997-11-29 01:33:10 +00:00
|
|
|
if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
|
2000-01-14 02:53:29 +00:00
|
|
|
curproc->p_stats->p_ru.ru_oublock +=
|
|
|
|
cb->outputcharge;
|
1997-11-29 01:33:10 +00:00
|
|
|
cb->outputcharge = 0;
|
|
|
|
} else if (cb->uaiocb.aio_lio_opcode == LIO_READ) {
|
2000-01-14 02:53:29 +00:00
|
|
|
curproc->p_stats->p_ru.ru_inblock +=
|
|
|
|
cb->inputcharge;
|
1997-11-29 01:33:10 +00:00
|
|
|
cb->inputcharge = 0;
|
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
aio_free_entry(cb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
s = splbio();
|
2000-01-14 02:53:29 +00:00
|
|
|
for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = ncb) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
ncb = TAILQ_NEXT(cb, plist);
|
2000-01-14 02:53:29 +00:00
|
|
|
if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo)
|
|
|
|
== jobref) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
splx(s);
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
if (ujob == cb->uuaiocb) {
|
2000-01-14 02:53:29 +00:00
|
|
|
p->p_retval[0] =
|
|
|
|
cb->uaiocb._aiocb_private.status;
|
|
|
|
} else
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
p->p_retval[0] = EFAULT;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
aio_free_entry(cb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
splx(s);
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
return (EINVAL);
|
2000-02-23 07:44:25 +00:00
|
|
|
#endif /* VFS_AIO */
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Allow a process to wakeup when any of the I/O requests are completed.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
int
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_suspend(struct proc *p, struct aio_suspend_args *uap)
|
|
|
|
{
|
2000-02-23 07:44:25 +00:00
|
|
|
#ifndef VFS_AIO
|
|
|
|
return ENOSYS;
|
|
|
|
#else
|
1997-11-07 08:53:44 +00:00
|
|
|
struct timeval atv;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct timespec ts;
|
|
|
|
struct aiocb *const *cbptr, *cbp;
|
|
|
|
struct kaioinfo *ki;
|
|
|
|
struct aiocblist *cb;
|
|
|
|
int i;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
int njoblist;
|
1997-07-06 02:40:43 +00:00
|
|
|
int error, s, timo;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
int *ijoblist;
|
|
|
|
struct aiocb **ujoblist;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
if (uap->nent >= AIO_LISTIO_MAX)
|
|
|
|
return EINVAL;
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
timo = 0;
|
|
|
|
if (uap->timeout) {
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Get timespec struct. */
|
|
|
|
if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0)
|
1997-07-06 02:40:43 +00:00
|
|
|
return error;
|
|
|
|
|
|
|
|
if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000)
|
|
|
|
return (EINVAL);
|
|
|
|
|
1998-12-15 17:38:33 +00:00
|
|
|
TIMESPEC_TO_TIMEVAL(&atv, &ts);
|
1997-07-06 02:40:43 +00:00
|
|
|
if (itimerfix(&atv))
|
|
|
|
return (EINVAL);
|
1998-03-30 09:56:58 +00:00
|
|
|
timo = tvtohz(&atv);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ki = p->p_aioinfo;
|
|
|
|
if (ki == NULL)
|
|
|
|
return EAGAIN;
|
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
njoblist = 0;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
ijoblist = zalloc(aiol_zone);
|
|
|
|
ujoblist = zalloc(aiol_zone);
|
1997-07-06 02:40:43 +00:00
|
|
|
cbptr = uap->aiocbp;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
for (i = 0; i < uap->nent; i++) {
|
|
|
|
cbp = (struct aiocb *)(intptr_t)fuword((caddr_t)&cbptr[i]);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
if (cbp == 0)
|
|
|
|
continue;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
ujoblist[njoblist] = cbp;
|
|
|
|
ijoblist[njoblist] = fuword(&cbp->_aiocb_private.kernelinfo);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
njoblist++;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
if (njoblist == 0) {
|
|
|
|
zfree(aiol_zone, ijoblist);
|
|
|
|
zfree(aiol_zone, ujoblist);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
return 0;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
error = 0;
|
2000-01-14 02:53:29 +00:00
|
|
|
for (;;) {
|
|
|
|
for (cb = TAILQ_FIRST(&ki->kaio_jobdone); cb; cb =
|
|
|
|
TAILQ_NEXT(cb, plist)) {
|
|
|
|
for (i = 0; i < njoblist; i++) {
|
|
|
|
if (((intptr_t)
|
|
|
|
cb->uaiocb._aiocb_private.kernelinfo) ==
|
|
|
|
ijoblist[i]) {
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
if (ujoblist[i] != cb->uuaiocb)
|
|
|
|
error = EINVAL;
|
|
|
|
zfree(aiol_zone, ijoblist);
|
|
|
|
zfree(aiol_zone, ujoblist);
|
|
|
|
return error;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
s = splbio();
|
2000-01-14 02:53:29 +00:00
|
|
|
for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb =
|
|
|
|
TAILQ_NEXT(cb, plist)) {
|
|
|
|
for (i = 0; i < njoblist; i++) {
|
|
|
|
if (((intptr_t)
|
|
|
|
cb->uaiocb._aiocb_private.kernelinfo) ==
|
|
|
|
ijoblist[i]) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
splx(s);
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
if (ujoblist[i] != cb->uuaiocb)
|
|
|
|
error = EINVAL;
|
|
|
|
zfree(aiol_zone, ijoblist);
|
|
|
|
zfree(aiol_zone, ujoblist);
|
|
|
|
return error;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
ki->kaio_flags |= KAIO_WAKEUP;
|
2000-01-14 02:53:29 +00:00
|
|
|
error = tsleep(p, PRIBIO | PCATCH, "aiospn", timo);
|
2000-01-20 08:15:13 +00:00
|
|
|
splx(s);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-20 01:59:58 +00:00
|
|
|
if (error == ERESTART || error == EINTR) {
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
zfree(aiol_zone, ijoblist);
|
|
|
|
zfree(aiol_zone, ujoblist);
|
1997-07-06 02:40:43 +00:00
|
|
|
return EINTR;
|
|
|
|
} else if (error == EWOULDBLOCK) {
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
zfree(aiol_zone, ijoblist);
|
|
|
|
zfree(aiol_zone, ujoblist);
|
1997-07-06 02:40:43 +00:00
|
|
|
return EAGAIN;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* NOTREACHED */
|
|
|
|
return EINVAL;
|
2000-02-23 07:44:25 +00:00
|
|
|
#endif /* VFS_AIO */
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
1997-06-16 00:27:26 +00:00
|
|
|
|
|
|
|
/*
|
2000-02-23 07:44:25 +00:00
|
|
|
* aio_cancel cancels any non-physio aio operations not currently in
|
|
|
|
* progress.
|
1997-06-16 00:27:26 +00:00
|
|
|
*/
|
|
|
|
int
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_cancel(struct proc *p, struct aio_cancel_args *uap)
|
|
|
|
{
|
2000-02-23 07:44:25 +00:00
|
|
|
#ifndef VFS_AIO
|
|
|
|
return ENOSYS;
|
|
|
|
#else
|
|
|
|
struct kaioinfo *ki;
|
|
|
|
struct aiocblist *cbe, *cbn;
|
|
|
|
struct file *fp;
|
|
|
|
struct filedesc *fdp;
|
|
|
|
struct socket *so;
|
|
|
|
struct proc *po;
|
|
|
|
int s,error;
|
|
|
|
int cancelled=0;
|
|
|
|
int notcancelled=0;
|
|
|
|
struct vnode *vp;
|
|
|
|
|
|
|
|
fdp = p->p_fd;
|
|
|
|
|
|
|
|
fp = fdp->fd_ofiles[uap->fd];
|
|
|
|
|
|
|
|
if (fp == NULL) {
|
|
|
|
return EBADF;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fp->f_type == DTYPE_VNODE) {
|
|
|
|
vp = (struct vnode *)fp->f_data;
|
|
|
|
|
|
|
|
if (vn_isdisk(vp,&error)) {
|
|
|
|
p->p_retval[0] = AIO_NOTCANCELED;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
} else if (fp->f_type == DTYPE_SOCKET) {
|
|
|
|
so = (struct socket *)fp->f_data;
|
|
|
|
|
|
|
|
s = splnet();
|
|
|
|
|
|
|
|
for (cbe = TAILQ_FIRST(&so->so_aiojobq); cbe; cbe = cbn) {
|
|
|
|
cbn = TAILQ_NEXT(cbe, list);
|
|
|
|
if ((uap->aiocbp == NULL) ||
|
|
|
|
(uap->aiocbp == cbe->uuaiocb) ) {
|
|
|
|
po = cbe->userproc;
|
|
|
|
ki = po->p_aioinfo;
|
|
|
|
TAILQ_REMOVE(&so->so_aiojobq, cbe, list);
|
|
|
|
TAILQ_REMOVE(&ki->kaio_sockqueue, cbe, plist);
|
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe, plist);
|
|
|
|
if (ki->kaio_flags & KAIO_WAKEUP) {
|
|
|
|
wakeup(po);
|
|
|
|
}
|
|
|
|
cbe->jobstate = JOBST_JOBFINISHED;
|
|
|
|
cbe->uaiocb._aiocb_private.status=-1;
|
|
|
|
cbe->uaiocb._aiocb_private.error=ECANCELED;
|
|
|
|
cancelled++;
|
2000-04-16 18:53:38 +00:00
|
|
|
/* XXX cancelled, knote? */
|
2000-02-23 07:44:25 +00:00
|
|
|
if (cbe->uaiocb.aio_sigevent.sigev_notify ==
|
|
|
|
SIGEV_SIGNAL)
|
|
|
|
psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo);
|
|
|
|
if (uap->aiocbp)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
splx(s);
|
|
|
|
|
|
|
|
if ((cancelled) && (uap->aiocbp)) {
|
|
|
|
p->p_retval[0] = AIO_CANCELED;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
ki=p->p_aioinfo;
|
|
|
|
|
|
|
|
s = splnet();
|
|
|
|
|
|
|
|
for (cbe = TAILQ_FIRST(&ki->kaio_jobqueue); cbe; cbe = cbn) {
|
|
|
|
cbn = TAILQ_NEXT(cbe, plist);
|
|
|
|
|
|
|
|
if ((uap->fd == cbe->uaiocb.aio_fildes) &&
|
|
|
|
((uap->aiocbp == NULL ) ||
|
|
|
|
(uap->aiocbp == cbe->uuaiocb))) {
|
|
|
|
|
|
|
|
if (cbe->jobstate == JOBST_JOBQGLOBAL) {
|
|
|
|
TAILQ_REMOVE(&aio_jobs, cbe, list);
|
|
|
|
TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist);
|
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe,
|
|
|
|
plist);
|
|
|
|
cancelled++;
|
|
|
|
ki->kaio_queue_finished_count++;
|
|
|
|
cbe->jobstate = JOBST_JOBFINISHED;
|
|
|
|
cbe->uaiocb._aiocb_private.status = -1;
|
|
|
|
cbe->uaiocb._aiocb_private.error = ECANCELED;
|
2000-04-16 18:53:38 +00:00
|
|
|
/* XXX cancelled, knote? */
|
2000-02-23 07:44:25 +00:00
|
|
|
if (cbe->uaiocb.aio_sigevent.sigev_notify ==
|
|
|
|
SIGEV_SIGNAL)
|
|
|
|
psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo);
|
|
|
|
} else {
|
|
|
|
notcancelled++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
splx(s);
|
|
|
|
|
|
|
|
|
|
|
|
if (notcancelled) {
|
|
|
|
p->p_retval[0] = AIO_NOTCANCELED;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cancelled) {
|
|
|
|
p->p_retval[0] = AIO_CANCELED;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
p->p_retval[0] = AIO_ALLDONE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
#endif /* VFS_AIO */
|
1997-06-16 00:27:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* aio_error is implemented in the kernel level for compatibility purposes only.
|
|
|
|
* For a user mode async implementation, it would be best to do it in a userland
|
|
|
|
* subroutine.
|
1997-06-16 00:27:26 +00:00
|
|
|
*/
|
|
|
|
int
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_error(struct proc *p, struct aio_error_args *uap)
|
|
|
|
{
|
2000-02-23 07:44:25 +00:00
|
|
|
#ifndef VFS_AIO
|
|
|
|
return ENOSYS;
|
|
|
|
#else
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
int s;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct aiocblist *cb;
|
|
|
|
struct kaioinfo *ki;
|
|
|
|
int jobref;
|
1997-06-16 00:27:26 +00:00
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
ki = p->p_aioinfo;
|
|
|
|
if (ki == NULL)
|
|
|
|
return EINVAL;
|
|
|
|
|
|
|
|
jobref = fuword(&uap->aiocbp->_aiocb_private.kernelinfo);
|
1997-11-29 01:33:10 +00:00
|
|
|
if ((jobref == -1) || (jobref == 0))
|
|
|
|
return EINVAL;
|
1997-06-16 00:27:26 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
for (cb = TAILQ_FIRST(&ki->kaio_jobdone); cb; cb = TAILQ_NEXT(cb,
|
|
|
|
plist)) {
|
|
|
|
if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
|
|
|
|
jobref) {
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = cb->uaiocb._aiocb_private.error;
|
1997-07-06 02:40:43 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
s = splnet();
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
for (cb = TAILQ_FIRST(&ki->kaio_jobqueue); cb; cb = TAILQ_NEXT(cb,
|
2000-01-20 01:59:58 +00:00
|
|
|
plist)) {
|
|
|
|
if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
|
|
|
|
jobref) {
|
|
|
|
p->p_retval[0] = EINPROGRESS;
|
|
|
|
splx(s);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (cb = TAILQ_FIRST(&ki->kaio_sockqueue); cb; cb = TAILQ_NEXT(cb,
|
2000-01-14 02:53:29 +00:00
|
|
|
plist)) {
|
|
|
|
if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
|
|
|
|
jobref) {
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = EINPROGRESS;
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
1997-07-06 02:40:43 +00:00
|
|
|
return 0;
|
|
|
|
}
|
1997-06-16 00:27:26 +00:00
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
1997-06-16 00:27:26 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
s = splbio();
|
2000-01-14 02:53:29 +00:00
|
|
|
for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = TAILQ_NEXT(cb,
|
|
|
|
plist)) {
|
|
|
|
if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
|
|
|
|
jobref) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
p->p_retval[0] = cb->uaiocb._aiocb_private.error;
|
|
|
|
splx(s);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
for (cb = TAILQ_FIRST(&ki->kaio_bufqueue); cb; cb = TAILQ_NEXT(cb,
|
|
|
|
plist)) {
|
|
|
|
if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
|
|
|
|
jobref) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
p->p_retval[0] = EINPROGRESS;
|
|
|
|
splx(s);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
splx(s);
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
#if (0)
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Hack for lio.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
status = fuword(&uap->aiocbp->_aiocb_private.status);
|
2000-01-14 02:53:29 +00:00
|
|
|
if (status == -1)
|
1997-07-06 02:40:43 +00:00
|
|
|
return fuword(&uap->aiocbp->_aiocb_private.error);
|
2000-01-14 02:53:29 +00:00
|
|
|
#endif
|
1997-07-06 02:40:43 +00:00
|
|
|
return EINVAL;
|
2000-02-23 07:44:25 +00:00
|
|
|
#endif /* VFS_AIO */
|
1997-06-16 00:27:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_read(struct proc *p, struct aio_read_args *uap)
|
|
|
|
{
|
2000-02-23 07:44:25 +00:00
|
|
|
#ifndef VFS_AIO
|
|
|
|
return ENOSYS;
|
|
|
|
#else
|
1997-06-16 00:27:26 +00:00
|
|
|
struct filedesc *fdp;
|
|
|
|
struct file *fp;
|
|
|
|
struct uio auio;
|
|
|
|
struct iovec aiov;
|
|
|
|
unsigned int fd;
|
|
|
|
int cnt;
|
|
|
|
struct aiocb iocb;
|
1997-07-06 02:40:43 +00:00
|
|
|
int error, pmodes;
|
1997-06-16 00:27:26 +00:00
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
pmodes = fuword(&uap->aiocbp->_aiocb_private.privatemodes);
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((pmodes & AIO_PMODE_SYNC) == 0)
|
|
|
|
return aio_aqueue(p, (struct aiocb *)uap->aiocbp, LIO_READ);
|
1997-06-16 00:27:26 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Get control block. */
|
|
|
|
if ((error = copyin((caddr_t)uap->aiocbp, (caddr_t)&iocb, sizeof iocb))
|
|
|
|
!= 0)
|
1997-06-16 00:27:26 +00:00
|
|
|
return error;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Get the fd info for process. */
|
1997-06-16 00:27:26 +00:00
|
|
|
fdp = p->p_fd;
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Range check file descriptor.
|
1997-06-16 00:27:26 +00:00
|
|
|
*/
|
|
|
|
fd = iocb.aio_fildes;
|
|
|
|
if (fd >= fdp->fd_nfiles)
|
|
|
|
return EBADF;
|
|
|
|
fp = fdp->fd_ofiles[fd];
|
|
|
|
if ((fp == NULL) || ((fp->f_flag & FREAD) == 0))
|
|
|
|
return EBADF;
|
1997-07-06 02:40:43 +00:00
|
|
|
if (iocb.aio_offset == -1LL)
|
1997-06-16 00:27:26 +00:00
|
|
|
return EINVAL;
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
auio.uio_resid = iocb.aio_nbytes;
|
|
|
|
if (auio.uio_resid < 0)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process sync simply -- queue async request.
|
|
|
|
*/
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((iocb._aiocb_private.privatemodes & AIO_PMODE_SYNC) == 0)
|
|
|
|
return aio_aqueue(p, (struct aiocb *)uap->aiocbp, LIO_READ);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
aiov.iov_base = (void *)iocb.aio_buf;
|
1997-06-16 00:27:26 +00:00
|
|
|
aiov.iov_len = iocb.aio_nbytes;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
1997-06-16 00:27:26 +00:00
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
|
|
|
auio.uio_offset = iocb.aio_offset;
|
|
|
|
auio.uio_rw = UIO_READ;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
|
|
|
auio.uio_procp = p;
|
|
|
|
|
|
|
|
cnt = iocb.aio_nbytes;
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, p);
|
2000-01-14 02:53:29 +00:00
|
|
|
if (error && (auio.uio_resid != cnt) && (error == ERESTART || error ==
|
|
|
|
EINTR || error == EWOULDBLOCK))
|
|
|
|
error = 0;
|
1997-06-16 00:27:26 +00:00
|
|
|
cnt -= auio.uio_resid;
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = cnt;
|
1997-06-16 00:27:26 +00:00
|
|
|
return error;
|
2000-02-23 07:44:25 +00:00
|
|
|
#endif /* VFS_AIO */
|
1997-06-16 00:27:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_write(struct proc *p, struct aio_write_args *uap)
|
|
|
|
{
|
2000-02-23 07:44:25 +00:00
|
|
|
#ifndef VFS_AIO
|
|
|
|
return ENOSYS;
|
|
|
|
#else
|
1997-06-16 00:27:26 +00:00
|
|
|
struct filedesc *fdp;
|
|
|
|
struct file *fp;
|
|
|
|
struct uio auio;
|
|
|
|
struct iovec aiov;
|
|
|
|
unsigned int fd;
|
|
|
|
int cnt;
|
|
|
|
struct aiocb iocb;
|
|
|
|
int error;
|
1997-07-06 02:40:43 +00:00
|
|
|
int pmodes;
|
1997-06-16 00:27:26 +00:00
|
|
|
|
|
|
|
/*
|
1997-07-06 02:40:43 +00:00
|
|
|
* Process sync simply -- queue async request.
|
1997-06-16 00:27:26 +00:00
|
|
|
*/
|
1997-07-06 02:40:43 +00:00
|
|
|
pmodes = fuword(&uap->aiocbp->_aiocb_private.privatemodes);
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((pmodes & AIO_PMODE_SYNC) == 0)
|
|
|
|
return aio_aqueue(p, (struct aiocb *)uap->aiocbp, LIO_WRITE);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((error = copyin((caddr_t)uap->aiocbp, (caddr_t)&iocb, sizeof iocb))
|
|
|
|
!= 0)
|
1997-07-06 02:40:43 +00:00
|
|
|
return error;
|
1997-06-16 00:27:26 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Get the fd info for process. */
|
1997-06-16 00:27:26 +00:00
|
|
|
fdp = p->p_fd;
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Range check file descriptor.
|
1997-06-16 00:27:26 +00:00
|
|
|
*/
|
|
|
|
fd = iocb.aio_fildes;
|
|
|
|
if (fd >= fdp->fd_nfiles)
|
|
|
|
return EBADF;
|
|
|
|
fp = fdp->fd_ofiles[fd];
|
|
|
|
if ((fp == NULL) || ((fp->f_flag & FWRITE) == 0))
|
|
|
|
return EBADF;
|
1997-07-06 02:40:43 +00:00
|
|
|
if (iocb.aio_offset == -1LL)
|
1997-06-16 00:27:26 +00:00
|
|
|
return EINVAL;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
aiov.iov_base = (void *)iocb.aio_buf;
|
1997-06-16 00:27:26 +00:00
|
|
|
aiov.iov_len = iocb.aio_nbytes;
|
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
|
|
|
auio.uio_offset = iocb.aio_offset;
|
|
|
|
|
|
|
|
auio.uio_resid = iocb.aio_nbytes;
|
|
|
|
if (auio.uio_resid < 0)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
auio.uio_rw = UIO_WRITE;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
|
|
|
auio.uio_procp = p;
|
|
|
|
|
|
|
|
cnt = iocb.aio_nbytes;
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, p);
|
1997-06-16 00:27:26 +00:00
|
|
|
if (error) {
|
|
|
|
if (auio.uio_resid != cnt) {
|
2000-01-14 02:53:29 +00:00
|
|
|
if (error == ERESTART || error == EINTR || error ==
|
|
|
|
EWOULDBLOCK)
|
1997-06-16 00:27:26 +00:00
|
|
|
error = 0;
|
|
|
|
if (error == EPIPE)
|
|
|
|
psignal(p, SIGPIPE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cnt -= auio.uio_resid;
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = cnt;
|
1997-06-16 00:27:26 +00:00
|
|
|
return error;
|
2000-02-23 07:44:25 +00:00
|
|
|
#endif /* VFS_AIO */
|
1997-06-16 00:27:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
1997-11-29 01:33:10 +00:00
|
|
|
lio_listio(struct proc *p, struct lio_listio_args *uap)
|
|
|
|
{
|
2000-02-23 07:44:25 +00:00
|
|
|
#ifndef VFS_AIO
|
|
|
|
return ENOSYS;
|
|
|
|
#else
|
1997-11-07 08:53:44 +00:00
|
|
|
int nent, nentqueued;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct aiocb *iocb, * const *cbptr;
|
|
|
|
struct aiocblist *cb;
|
|
|
|
struct kaioinfo *ki;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
struct aio_liojob *lj;
|
1997-07-06 02:40:43 +00:00
|
|
|
int error, runningcode;
|
1997-11-29 01:33:10 +00:00
|
|
|
int nerror;
|
1997-06-16 00:27:26 +00:00
|
|
|
int i;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
int s;
|
1997-06-16 00:27:26 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
|
1997-06-16 00:27:26 +00:00
|
|
|
return EINVAL;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
nent = uap->nent;
|
2000-01-14 02:53:29 +00:00
|
|
|
if (nent > AIO_LISTIO_MAX)
|
1997-07-06 02:40:43 +00:00
|
|
|
return EINVAL;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if (p->p_aioinfo == NULL)
|
1997-07-06 02:40:43 +00:00
|
|
|
aio_init_aioinfo(p);
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((nent + num_queue_count) > max_queue_count)
|
1997-07-06 02:40:43 +00:00
|
|
|
return EAGAIN;
|
|
|
|
|
|
|
|
ki = p->p_aioinfo;
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((nent + ki->kaio_queue_count) > ki->kaio_qallowed_count)
|
1997-07-06 02:40:43 +00:00
|
|
|
return EAGAIN;
|
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj = zalloc(aiolio_zone);
|
2000-01-14 02:53:29 +00:00
|
|
|
if (!lj)
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
return EAGAIN;
|
|
|
|
|
|
|
|
lj->lioj_flags = 0;
|
|
|
|
lj->lioj_buffer_count = 0;
|
|
|
|
lj->lioj_buffer_finished_count = 0;
|
|
|
|
lj->lioj_queue_count = 0;
|
|
|
|
lj->lioj_queue_finished_count = 0;
|
|
|
|
lj->lioj_ki = ki;
|
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list);
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Setup signal.
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
*/
|
|
|
|
if (uap->sig && (uap->mode == LIO_NOWAIT)) {
|
2000-01-14 02:53:29 +00:00
|
|
|
error = copyin(uap->sig, &lj->lioj_signal,
|
|
|
|
sizeof(lj->lioj_signal));
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
lj->lioj_flags |= LIOJ_SIGNAL;
|
|
|
|
lj->lioj_flags &= ~LIOJ_SIGNAL_POSTED;
|
2000-01-14 02:53:29 +00:00
|
|
|
} else
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj->lioj_flags &= ~LIOJ_SIGNAL;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/*
|
|
|
|
* Get pointers to the list of I/O requests.
|
|
|
|
*/
|
1997-11-29 01:33:10 +00:00
|
|
|
nerror = 0;
|
|
|
|
nentqueued = 0;
|
1997-07-06 02:40:43 +00:00
|
|
|
cbptr = uap->acb_list;
|
2000-01-14 02:53:29 +00:00
|
|
|
for (i = 0; i < uap->nent; i++) {
|
|
|
|
iocb = (struct aiocb *)(intptr_t)fuword((caddr_t)&cbptr[i]);
|
|
|
|
if (((intptr_t)iocb != -1) && ((intptr_t)iocb != NULL)) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
error = _aio_aqueue(p, iocb, lj, 0);
|
2000-01-14 02:53:29 +00:00
|
|
|
if (error == 0)
|
1997-11-29 01:33:10 +00:00
|
|
|
nentqueued++;
|
2000-01-14 02:53:29 +00:00
|
|
|
else
|
1997-11-29 01:33:10 +00:00
|
|
|
nerror++;
|
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
1997-10-09 04:14:41 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* If we haven't queued any, then just return error.
|
1997-10-09 04:14:41 +00:00
|
|
|
*/
|
2000-01-14 02:53:29 +00:00
|
|
|
if (nentqueued == 0)
|
1997-11-29 01:33:10 +00:00
|
|
|
return 0;
|
1997-10-09 04:14:41 +00:00
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Calculate the appropriate error return.
|
1997-10-09 04:14:41 +00:00
|
|
|
*/
|
1997-07-06 02:40:43 +00:00
|
|
|
runningcode = 0;
|
1997-11-29 01:33:10 +00:00
|
|
|
if (nerror)
|
1997-07-06 02:40:43 +00:00
|
|
|
runningcode = EIO;
|
|
|
|
|
|
|
|
if (uap->mode == LIO_WAIT) {
|
2000-01-14 02:53:29 +00:00
|
|
|
int command, found, jobref;
|
|
|
|
|
|
|
|
for (;;) {
|
1997-11-29 01:33:10 +00:00
|
|
|
found = 0;
|
2000-01-14 02:53:29 +00:00
|
|
|
for (i = 0; i < uap->nent; i++) {
|
1997-10-09 04:14:41 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Fetch address of the control buf pointer in
|
|
|
|
* user space.
|
1997-10-09 04:14:41 +00:00
|
|
|
*/
|
2000-01-14 02:53:29 +00:00
|
|
|
iocb = (struct aiocb *)(intptr_t)fuword((caddr_t)&cbptr[i]);
|
|
|
|
if (((intptr_t)iocb == -1) || ((intptr_t)iocb
|
|
|
|
== 0))
|
1997-11-29 01:33:10 +00:00
|
|
|
continue;
|
1997-10-09 04:14:41 +00:00
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Fetch the associated command from user space.
|
1997-10-09 04:14:41 +00:00
|
|
|
*/
|
1997-07-06 02:40:43 +00:00
|
|
|
command = fuword(&iocb->aio_lio_opcode);
|
1997-11-29 01:33:10 +00:00
|
|
|
if (command == LIO_NOP) {
|
|
|
|
found++;
|
1997-07-06 02:40:43 +00:00
|
|
|
continue;
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
1997-10-09 04:14:41 +00:00
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
jobref = fuword(&iocb->_aiocb_private.kernelinfo);
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
for (cb = TAILQ_FIRST(&ki->kaio_jobdone); cb;
|
|
|
|
cb = TAILQ_NEXT(cb, plist)) {
|
|
|
|
if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo)
|
|
|
|
== jobref) {
|
|
|
|
if (cb->uaiocb.aio_lio_opcode
|
|
|
|
== LIO_WRITE) {
|
|
|
|
curproc->p_stats->p_ru.ru_oublock
|
|
|
|
+=
|
|
|
|
cb->outputcharge;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
cb->outputcharge = 0;
|
2000-01-14 02:53:29 +00:00
|
|
|
} else if (cb->uaiocb.aio_lio_opcode
|
|
|
|
== LIO_READ) {
|
|
|
|
curproc->p_stats->p_ru.ru_inblock
|
|
|
|
+= cb->inputcharge;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
cb->inputcharge = 0;
|
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
found++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
s = splbio();
|
2000-01-14 02:53:29 +00:00
|
|
|
for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb;
|
|
|
|
cb = TAILQ_NEXT(cb, plist)) {
|
|
|
|
if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo)
|
|
|
|
== jobref) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
found++;
|
|
|
|
break;
|
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
splx(s);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
1997-10-09 04:14:41 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* If all I/Os have been disposed of, then we can
|
|
|
|
* return.
|
1997-10-09 04:14:41 +00:00
|
|
|
*/
|
2000-01-14 02:53:29 +00:00
|
|
|
if (found == nentqueued)
|
1997-07-06 02:40:43 +00:00
|
|
|
return runningcode;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
ki->kaio_flags |= KAIO_WAKEUP;
|
2000-01-14 02:53:29 +00:00
|
|
|
error = tsleep(p, PRIBIO | PCATCH, "aiospn", 0);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if (error == EINTR)
|
1997-07-06 02:40:43 +00:00
|
|
|
return EINTR;
|
2000-01-14 02:53:29 +00:00
|
|
|
else if (error == EWOULDBLOCK)
|
1997-07-06 02:40:43 +00:00
|
|
|
return EAGAIN;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return runningcode;
|
2000-02-23 07:44:25 +00:00
|
|
|
#endif /* VFS_AIO */
|
1997-06-16 00:27:26 +00:00
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2000-07-28 23:10:10 +00:00
|
|
|
#ifdef VFS_AIO
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* This is a wierd hack so that we can post a signal. It is safe to do so from
|
|
|
|
* a timeout routine, but *not* from an interrupt routine.
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
*/
|
|
|
|
static void
|
2000-01-14 02:53:29 +00:00
|
|
|
process_signal(void *aioj)
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
{
|
2000-01-14 02:53:29 +00:00
|
|
|
struct aiocblist *aiocbe = aioj;
|
|
|
|
struct aio_liojob *lj = aiocbe->lio;
|
|
|
|
struct aiocb *cb = &aiocbe->uaiocb;
|
|
|
|
|
|
|
|
if ((lj) && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL) &&
|
|
|
|
(lj->lioj_queue_count == lj->lioj_queue_finished_count)) {
|
|
|
|
psignal(lj->lioj_ki->kaio_p, lj->lioj_signal.sigev_signo);
|
|
|
|
lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
|
|
|
|
if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL)
|
|
|
|
psignal(aiocbe->userproc, cb->aio_sigevent.sigev_signo);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Interrupt handler for physio, performs the necessary process wakeups, and
|
|
|
|
* signals.
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
*/
|
1997-11-29 01:33:10 +00:00
|
|
|
static void
|
2000-01-14 02:53:29 +00:00
|
|
|
aio_physwakeup(struct buf *bp)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
struct aiocblist *aiocbe;
|
1997-11-29 01:33:10 +00:00
|
|
|
struct proc *p;
|
|
|
|
struct kaioinfo *ki;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
struct aio_liojob *lj;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
int s;
|
|
|
|
s = splbio();
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
wakeup((caddr_t)bp);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
bp->b_flags |= B_DONE;
|
|
|
|
|
|
|
|
aiocbe = (struct aiocblist *)bp->b_spc;
|
|
|
|
if (aiocbe) {
|
1999-05-06 20:00:34 +00:00
|
|
|
p = bp->b_caller1;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
|
|
|
aiocbe->jobstate = JOBST_JOBBFINISHED;
|
|
|
|
aiocbe->uaiocb._aiocb_private.status -= bp->b_resid;
|
|
|
|
aiocbe->uaiocb._aiocb_private.error = 0;
|
|
|
|
aiocbe->jobflags |= AIOCBLIST_DONE;
|
|
|
|
|
2000-04-02 15:24:56 +00:00
|
|
|
if (bp->b_ioflags & BIO_ERROR)
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
aiocbe->uaiocb._aiocb_private.error = bp->b_error;
|
|
|
|
|
|
|
|
lj = aiocbe->lio;
|
|
|
|
if (lj) {
|
|
|
|
lj->lioj_buffer_finished_count++;
|
2000-01-14 02:53:29 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* wakeup/signal if all of the interrupt jobs are done.
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
*/
|
2000-01-14 02:53:29 +00:00
|
|
|
if (lj->lioj_buffer_finished_count ==
|
|
|
|
lj->lioj_buffer_count) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Post a signal if it is called for.
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
*/
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((lj->lioj_flags &
|
|
|
|
(LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) ==
|
|
|
|
LIOJ_SIGNAL) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
|
2000-01-14 02:53:29 +00:00
|
|
|
timeout(process_signal, aiocbe, 0);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
ki = p->p_aioinfo;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
if (ki) {
|
|
|
|
ki->kaio_buffer_finished_count++;
|
|
|
|
TAILQ_REMOVE(&aio_bufjobs, aiocbe, list);
|
|
|
|
TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist);
|
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist);
|
2000-04-16 18:53:38 +00:00
|
|
|
|
|
|
|
KNOTE(&aiocbe->klist, 0);
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Do the wakeup. */
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
if (ki->kaio_flags & (KAIO_RUNDOWN|KAIO_WAKEUP)) {
|
|
|
|
ki->kaio_flags &= ~KAIO_WAKEUP;
|
|
|
|
wakeup(p);
|
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
|
|
|
|
if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL)
|
|
|
|
timeout(process_signal, aiocbe, 0);
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
splx(s);
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
2000-07-28 23:10:10 +00:00
|
|
|
#endif /* VFS_AIO */
|
2000-01-14 02:53:29 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
aio_waitcomplete(struct proc *p, struct aio_waitcomplete_args *uap)
|
|
|
|
{
|
2000-02-23 07:44:25 +00:00
|
|
|
#ifndef VFS_AIO
|
|
|
|
return ENOSYS;
|
|
|
|
#else
|
2000-01-14 02:53:29 +00:00
|
|
|
struct timeval atv;
|
|
|
|
struct timespec ts;
|
|
|
|
struct aiocb **cbptr;
|
|
|
|
struct kaioinfo *ki;
|
|
|
|
struct aiocblist *cb = NULL;
|
|
|
|
int error, s, timo;
|
|
|
|
|
2000-02-23 07:44:25 +00:00
|
|
|
suword(uap->aiocbp, (int)NULL);
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
timo = 0;
|
|
|
|
if (uap->timeout) {
|
|
|
|
/* Get timespec struct. */
|
|
|
|
error = copyin((caddr_t)uap->timeout, (caddr_t)&ts,
|
|
|
|
sizeof(ts));
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
if ((ts.tv_nsec < 0) || (ts.tv_nsec >= 1000000000))
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
TIMESPEC_TO_TIMEVAL(&atv, &ts);
|
|
|
|
if (itimerfix(&atv))
|
|
|
|
return (EINVAL);
|
|
|
|
timo = tvtohz(&atv);
|
|
|
|
}
|
|
|
|
|
|
|
|
ki = p->p_aioinfo;
|
|
|
|
if (ki == NULL)
|
|
|
|
return EAGAIN;
|
|
|
|
|
|
|
|
cbptr = uap->aiocbp;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
if ((cb = TAILQ_FIRST(&ki->kaio_jobdone)) != 0) {
|
|
|
|
suword(uap->aiocbp, (int)cb->uuaiocb);
|
|
|
|
p->p_retval[0] = cb->uaiocb._aiocb_private.status;
|
|
|
|
if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
|
|
|
|
curproc->p_stats->p_ru.ru_oublock +=
|
|
|
|
cb->outputcharge;
|
|
|
|
cb->outputcharge = 0;
|
|
|
|
} else if (cb->uaiocb.aio_lio_opcode == LIO_READ) {
|
|
|
|
curproc->p_stats->p_ru.ru_inblock +=
|
|
|
|
cb->inputcharge;
|
|
|
|
cb->inputcharge = 0;
|
|
|
|
}
|
|
|
|
aio_free_entry(cb);
|
2000-02-23 07:44:25 +00:00
|
|
|
return cb->uaiocb._aiocb_private.error;
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
s = splbio();
|
|
|
|
if ((cb = TAILQ_FIRST(&ki->kaio_bufdone)) != 0 ) {
|
|
|
|
splx(s);
|
|
|
|
suword(uap->aiocbp, (int)cb->uuaiocb);
|
|
|
|
p->p_retval[0] = cb->uaiocb._aiocb_private.status;
|
|
|
|
aio_free_entry(cb);
|
2000-02-23 07:44:25 +00:00
|
|
|
return cb->uaiocb._aiocb_private.error;
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ki->kaio_flags |= KAIO_WAKEUP;
|
|
|
|
error = tsleep(p, PRIBIO | PCATCH, "aiowc", timo);
|
2000-02-23 07:44:25 +00:00
|
|
|
splx(s);
|
2000-01-14 02:53:29 +00:00
|
|
|
|
2000-02-23 07:44:25 +00:00
|
|
|
if (error == ERESTART)
|
|
|
|
return EINTR;
|
|
|
|
else if (error < 0)
|
2000-01-14 02:53:29 +00:00
|
|
|
return error;
|
|
|
|
else if (error == EINTR)
|
|
|
|
return EINTR;
|
|
|
|
else if (error == EWOULDBLOCK)
|
|
|
|
return EAGAIN;
|
|
|
|
}
|
2000-02-23 07:44:25 +00:00
|
|
|
#endif /* VFS_AIO */
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2000-07-28 23:10:10 +00:00
|
|
|
|
|
|
|
#ifndef VFS_AIO
|
|
|
|
static int
|
|
|
|
filt_aioattach(struct knote *kn)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct filterops aio_filtops =
|
|
|
|
{ 0, filt_aioattach, NULL, NULL };
|
|
|
|
|
|
|
|
#else
|
2000-04-16 18:53:38 +00:00
|
|
|
static int
|
|
|
|
filt_aioattach(struct knote *kn)
|
|
|
|
{
|
|
|
|
struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_id;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The aiocbe pointer must be validated before using it, so
|
|
|
|
* registration is restricted to the kernel; the user cannot
|
|
|
|
* set EV_FLAG1.
|
|
|
|
*/
|
|
|
|
if ((kn->kn_flags & EV_FLAG1) == 0)
|
|
|
|
return (EPERM);
|
|
|
|
kn->kn_flags &= ~EV_FLAG1;
|
|
|
|
|
|
|
|
SLIST_INSERT_HEAD(&aiocbe->klist, kn, kn_selnext);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
filt_aiodetach(struct knote *kn)
|
|
|
|
{
|
|
|
|
struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_id;
|
|
|
|
int s = splhigh(); /* XXX no clue, so overkill */
|
|
|
|
|
2000-05-26 02:09:24 +00:00
|
|
|
SLIST_REMOVE(&aiocbe->klist, kn, knote, kn_selnext);
|
2000-04-16 18:53:38 +00:00
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*ARGSUSED*/
|
|
|
|
static int
|
|
|
|
filt_aio(struct knote *kn, long hint)
|
|
|
|
{
|
|
|
|
struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_id;
|
|
|
|
|
|
|
|
kn->kn_data = 0; /* XXX data returned? */
|
2000-09-04 07:56:32 +00:00
|
|
|
if (aiocbe->jobstate != JOBST_JOBFINISHED &&
|
|
|
|
aiocbe->jobstate != JOBST_JOBBFINISHED)
|
2000-04-16 18:53:38 +00:00
|
|
|
return (0);
|
|
|
|
kn->kn_flags |= EV_EOF;
|
|
|
|
return (1);
|
|
|
|
}
|
2000-07-28 23:10:10 +00:00
|
|
|
|
|
|
|
struct filterops aio_filtops =
|
|
|
|
{ 0, filt_aioattach, filt_aiodetach, filt_aio };
|
|
|
|
#endif /* VFS_AIO */
|