1997-06-16 00:27:26 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1997 John S. Dyson. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. John S. Dyson's name may not be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* DISCLAIMER: This code isn't warranted to do anything useful. Anything
|
|
|
|
* bad that happens because of using this software isn't the responsibility
|
|
|
|
* of the author. This software is distributed AS-IS.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
1998-03-28 11:51:01 +00:00
|
|
|
* This file contains support for the POSIX 1003.1B AIO/LIO facility.
|
1997-06-16 00:27:26 +00:00
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
1997-06-16 00:27:26 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2002-02-23 11:12:57 +00:00
|
|
|
#include <sys/malloc.h>
|
2000-05-05 09:59:14 +00:00
|
|
|
#include <sys/bio.h>
|
1999-02-25 15:54:06 +00:00
|
|
|
#include <sys/buf.h>
|
2003-03-24 21:15:35 +00:00
|
|
|
#include <sys/eventhandler.h>
|
1997-06-16 00:27:26 +00:00
|
|
|
#include <sys/sysproto.h>
|
|
|
|
#include <sys/filedesc.h>
|
|
|
|
#include <sys/kernel.h>
|
2004-05-30 20:34:58 +00:00
|
|
|
#include <sys/module.h>
|
2001-03-09 06:27:01 +00:00
|
|
|
#include <sys/kthread.h>
|
1997-06-16 00:27:26 +00:00
|
|
|
#include <sys/fcntl.h>
|
|
|
|
#include <sys/file.h>
|
2003-04-29 13:36:06 +00:00
|
|
|
#include <sys/limits.h>
|
1997-11-18 10:02:40 +00:00
|
|
|
#include <sys/lock.h>
|
2000-10-20 07:58:15 +00:00
|
|
|
#include <sys/mutex.h>
|
1997-06-16 00:27:26 +00:00
|
|
|
#include <sys/unistd.h>
|
|
|
|
#include <sys/proc.h>
|
1998-08-17 17:28:10 +00:00
|
|
|
#include <sys/resourcevar.h>
|
1997-06-16 00:27:26 +00:00
|
|
|
#include <sys/signalvar.h>
|
2000-01-14 02:53:29 +00:00
|
|
|
#include <sys/protosw.h>
|
|
|
|
#include <sys/socketvar.h>
|
2001-12-29 07:13:47 +00:00
|
|
|
#include <sys/syscall.h>
|
|
|
|
#include <sys/sysent.h>
|
1997-10-09 04:14:41 +00:00
|
|
|
#include <sys/sysctl.h>
|
2002-03-25 21:52:04 +00:00
|
|
|
#include <sys/sx.h>
|
1997-11-29 01:33:10 +00:00
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/conf.h>
|
2000-04-16 18:53:38 +00:00
|
|
|
#include <sys/event.h>
|
1997-06-16 00:27:26 +00:00
|
|
|
|
2002-11-16 04:22:55 +00:00
|
|
|
#include <posix4/posix4.h>
|
1997-06-16 00:27:26 +00:00
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/vm_extern.h>
|
1997-07-06 02:40:43 +00:00
|
|
|
#include <vm/pmap.h>
|
|
|
|
#include <vm/vm_map.h>
|
2002-03-20 04:09:59 +00:00
|
|
|
#include <vm/uma.h>
|
1997-06-16 00:27:26 +00:00
|
|
|
#include <sys/aio.h>
|
1997-07-17 04:49:43 +00:00
|
|
|
|
2000-02-23 07:44:25 +00:00
|
|
|
#include "opt_vfs_aio.h"
|
1997-06-16 00:27:26 +00:00
|
|
|
|
2004-09-03 03:19:14 +00:00
|
|
|
NET_NEEDS_GIANT("aio");
|
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/*
|
|
|
|
* Counter for allocating reference ids to new jobs. Wrapped to 1 on
|
|
|
|
* overflow.
|
|
|
|
*/
|
1998-06-10 10:31:08 +00:00
|
|
|
static long jobrefid;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
#define JOBST_NULL 0x0
|
1997-07-06 02:40:43 +00:00
|
|
|
#define JOBST_JOBQGLOBAL 0x2
|
|
|
|
#define JOBST_JOBRUNNING 0x3
|
|
|
|
#define JOBST_JOBFINISHED 0x4
|
1997-11-29 01:33:10 +00:00
|
|
|
#define JOBST_JOBQBUF 0x5
|
|
|
|
#define JOBST_JOBBFINISHED 0x6
|
1997-07-06 02:40:43 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#ifndef MAX_AIO_PER_PROC
|
1997-07-06 02:40:43 +00:00
|
|
|
#define MAX_AIO_PER_PROC 32
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MAX_AIO_QUEUE_PER_PROC
|
1997-07-06 02:40:43 +00:00
|
|
|
#define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MAX_AIO_PROCS
|
1997-11-29 01:33:10 +00:00
|
|
|
#define MAX_AIO_PROCS 32
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MAX_AIO_QUEUE
|
1997-07-06 02:40:43 +00:00
|
|
|
#define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef TARGET_AIO_PROCS
|
2000-01-14 02:53:29 +00:00
|
|
|
#define TARGET_AIO_PROCS 4
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MAX_BUF_AIO
|
2000-01-14 02:53:29 +00:00
|
|
|
#define MAX_BUF_AIO 16
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef AIOD_TIMEOUT_DEFAULT
|
2000-01-14 02:53:29 +00:00
|
|
|
#define AIOD_TIMEOUT_DEFAULT (10 * hz)
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef AIOD_LIFETIME_DEFAULT
|
2000-01-14 02:53:29 +00:00
|
|
|
#define AIOD_LIFETIME_DEFAULT (30 * hz)
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "Async IO management");
|
1997-10-09 04:14:41 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
static int max_aio_procs = MAX_AIO_PROCS;
|
1997-10-09 04:14:41 +00:00
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs,
|
2002-03-05 15:38:49 +00:00
|
|
|
CTLFLAG_RW, &max_aio_procs, 0,
|
|
|
|
"Maximum number of kernel threads to use for handling async IO ");
|
1997-10-09 04:14:41 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
static int num_aio_procs = 0;
|
1997-10-09 04:14:41 +00:00
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs,
|
2002-03-05 15:38:49 +00:00
|
|
|
CTLFLAG_RD, &num_aio_procs, 0,
|
|
|
|
"Number of presently active kernel threads for async IO");
|
1997-10-09 04:14:41 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/*
|
|
|
|
* The code will adjust the actual number of AIO processes towards this
|
|
|
|
* number when it gets a chance.
|
|
|
|
*/
|
|
|
|
static int target_aio_procs = TARGET_AIO_PROCS;
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs,
|
|
|
|
0, "Preferred number of ready kernel threads for async IO");
|
1997-10-09 04:14:41 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
static int max_queue_count = MAX_AIO_QUEUE;
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0,
|
|
|
|
"Maximum number of aio requests to queue, globally");
|
1997-10-09 04:14:41 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
static int num_queue_count = 0;
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0,
|
|
|
|
"Number of queued aio requests");
|
1997-10-09 04:14:41 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
static int num_buf_aio = 0;
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0,
|
|
|
|
"Number of aio requests presently handled by the buf subsystem");
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/* Number of async I/O thread in the process of being started */
|
|
|
|
/* XXX This should be local to _aio_aqueue() */
|
|
|
|
static int num_aio_resv_start = 0;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
static int aiod_timeout;
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout, CTLFLAG_RW, &aiod_timeout, 0,
|
|
|
|
"Timeout value for synchronous aio operations");
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
static int aiod_lifetime;
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0,
|
|
|
|
"Maximum lifetime for idle aiod");
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
static int unloadable = 0;
|
2001-12-29 07:13:47 +00:00
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, unloadable, CTLFLAG_RW, &unloadable, 0,
|
|
|
|
"Allow unload of aio (not recommended)");
|
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
|
|
|
|
static int max_aio_per_proc = MAX_AIO_PER_PROC;
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc,
|
|
|
|
0, "Maximum active aio requests per process (stored in the process)");
|
|
|
|
|
|
|
|
static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC;
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW,
|
|
|
|
&max_aio_queue_per_proc, 0,
|
|
|
|
"Maximum queued aio requests per process (stored in the process)");
|
|
|
|
|
|
|
|
static int max_buf_aio = MAX_BUF_AIO;
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0,
|
|
|
|
"Maximum buf aio requests per process (stored in the process)");
|
|
|
|
|
2002-01-06 21:03:39 +00:00
|
|
|
struct aiocblist {
|
2004-08-13 17:43:53 +00:00
|
|
|
TAILQ_ENTRY(aiocblist) list; /* List of jobs */
|
|
|
|
TAILQ_ENTRY(aiocblist) plist; /* List of jobs for proc */
|
|
|
|
int jobflags;
|
|
|
|
int jobstate;
|
2002-01-06 21:03:39 +00:00
|
|
|
int inputcharge;
|
|
|
|
int outputcharge;
|
|
|
|
struct callout_handle timeouthandle;
|
2004-08-13 17:43:53 +00:00
|
|
|
struct buf *bp; /* Buffer pointer */
|
|
|
|
struct proc *userproc; /* User process */ /* Not td! */
|
2002-11-07 20:46:37 +00:00
|
|
|
struct ucred *cred; /* Active credential when created */
|
2004-08-13 17:43:53 +00:00
|
|
|
struct file *fd_file; /* Pointer to file structure */
|
|
|
|
struct aio_liojob *lio; /* Optional lio job */
|
|
|
|
struct aiocb *uuaiocb; /* Pointer in userspace of aiocb */
|
2004-08-15 06:24:42 +00:00
|
|
|
struct knlist klist; /* list of knotes */
|
2004-08-13 17:43:53 +00:00
|
|
|
struct aiocb uaiocb; /* Kernel I/O control block */
|
2002-01-06 21:03:39 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* jobflags */
|
2004-08-13 17:43:53 +00:00
|
|
|
#define AIOCBLIST_RUNDOWN 0x4
|
|
|
|
#define AIOCBLIST_DONE 0x10
|
2002-01-06 21:03:39 +00:00
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
|
|
|
* AIO process info
|
|
|
|
*/
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#define AIOP_FREE 0x1 /* proc on free queue */
|
|
|
|
#define AIOP_SCHED 0x2 /* proc explicitly scheduled */
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
struct aiothreadlist {
|
|
|
|
int aiothreadflags; /* AIO proc flags */
|
|
|
|
TAILQ_ENTRY(aiothreadlist) list; /* List of processes */
|
|
|
|
struct thread *aiothread; /* The AIO thread */
|
1997-07-06 02:40:43 +00:00
|
|
|
};
|
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
/*
|
|
|
|
* data-structure for lio signal management
|
|
|
|
*/
|
|
|
|
struct aio_liojob {
|
2000-01-14 02:53:29 +00:00
|
|
|
int lioj_flags;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
int lioj_buffer_count;
|
|
|
|
int lioj_buffer_finished_count;
|
|
|
|
int lioj_queue_count;
|
|
|
|
int lioj_queue_finished_count;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct sigevent lioj_signal; /* signal on all I/O done */
|
2002-01-01 00:40:29 +00:00
|
|
|
TAILQ_ENTRY(aio_liojob) lioj_list;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct kaioinfo *lioj_ki;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
};
|
2000-01-14 02:53:29 +00:00
|
|
|
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* per process aio data structure
|
|
|
|
*/
|
1997-07-06 02:40:43 +00:00
|
|
|
struct kaioinfo {
|
2000-01-14 02:53:29 +00:00
|
|
|
int kaio_flags; /* per process kaio flags */
|
1997-07-06 02:40:43 +00:00
|
|
|
int kaio_maxactive_count; /* maximum number of AIOs */
|
|
|
|
int kaio_active_count; /* number of currently used AIOs */
|
|
|
|
int kaio_qallowed_count; /* maxiumu size of AIO queue */
|
|
|
|
int kaio_queue_count; /* size of AIO queue */
|
1997-11-29 01:33:10 +00:00
|
|
|
int kaio_ballowed_count; /* maximum number of buffers */
|
2000-01-14 02:53:29 +00:00
|
|
|
int kaio_queue_finished_count; /* number of daemon jobs finished */
|
1997-11-29 01:33:10 +00:00
|
|
|
int kaio_buffer_count; /* number of physio buffers */
|
2000-01-14 02:53:29 +00:00
|
|
|
int kaio_buffer_finished_count; /* count of I/O done */
|
|
|
|
struct proc *kaio_p; /* process that uses this kaio block */
|
2002-01-01 00:40:29 +00:00
|
|
|
TAILQ_HEAD(,aio_liojob) kaio_liojoblist; /* list of lio jobs */
|
|
|
|
TAILQ_HEAD(,aiocblist) kaio_jobqueue; /* job queue for process */
|
|
|
|
TAILQ_HEAD(,aiocblist) kaio_jobdone; /* done queue for process */
|
|
|
|
TAILQ_HEAD(,aiocblist) kaio_bufqueue; /* buffer job queue for process */
|
|
|
|
TAILQ_HEAD(,aiocblist) kaio_bufdone; /* buffer done queue for process */
|
|
|
|
TAILQ_HEAD(,aiocblist) kaio_sockqueue; /* queue for aios waiting on sockets */
|
1997-07-06 02:40:43 +00:00
|
|
|
};
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
#define KAIO_RUNDOWN 0x1 /* process is being run down */
|
|
|
|
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
static TAILQ_HEAD(,aiothreadlist) aio_activeproc; /* Active daemons */
|
|
|
|
static TAILQ_HEAD(,aiothreadlist) aio_freeproc; /* Idle daemons */
|
2000-05-26 02:09:24 +00:00
|
|
|
static TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
|
|
|
|
static TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
static void aio_init_aioinfo(struct proc *p);
|
2001-12-29 07:13:47 +00:00
|
|
|
static void aio_onceonly(void);
|
2000-01-14 02:53:29 +00:00
|
|
|
static int aio_free_entry(struct aiocblist *aiocbe);
|
|
|
|
static void aio_process(struct aiocblist *aiocbe);
|
|
|
|
static int aio_newproc(void);
|
2001-09-12 08:38:13 +00:00
|
|
|
static int aio_aqueue(struct thread *td, struct aiocb *job, int type);
|
2000-01-14 02:53:29 +00:00
|
|
|
static void aio_physwakeup(struct buf *bp);
|
2003-03-24 21:15:35 +00:00
|
|
|
static void aio_proc_rundown(void *arg, struct proc *p);
|
2002-01-17 17:19:40 +00:00
|
|
|
static int aio_fphysio(struct aiocblist *aiocbe);
|
2000-01-14 02:53:29 +00:00
|
|
|
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
|
|
|
|
static void aio_daemon(void *uproc);
|
2002-01-06 21:03:39 +00:00
|
|
|
static void aio_swake_cb(struct socket *, struct sockbuf *);
|
2001-12-29 07:13:47 +00:00
|
|
|
static int aio_unload(void);
|
2001-03-10 22:47:57 +00:00
|
|
|
static void process_signal(void *aioj);
|
2001-12-29 07:13:47 +00:00
|
|
|
static int filt_aioattach(struct knote *kn);
|
|
|
|
static void filt_aiodetach(struct knote *kn);
|
|
|
|
static int filt_aio(struct knote *kn, long hint);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/*
|
|
|
|
* Zones for:
|
|
|
|
* kaio Per process async io info
|
|
|
|
* aiop async io thread data
|
|
|
|
* aiocb async io jobs
|
|
|
|
* aiol list io job pointer - internal to aio_suspend XXX
|
|
|
|
* aiolio list io jobs
|
|
|
|
*/
|
2002-03-20 04:09:59 +00:00
|
|
|
static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiol_zone, aiolio_zone;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/* kqueue filters for aio */
|
2001-12-29 07:13:47 +00:00
|
|
|
static struct filterops aio_filtops =
|
|
|
|
{ 0, filt_aioattach, filt_aiodetach, filt_aio };
|
|
|
|
|
2003-03-24 21:15:35 +00:00
|
|
|
static eventhandler_tag exit_tag, exec_tag;
|
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/*
|
|
|
|
* Main operations function for use as a kernel module.
|
|
|
|
*/
|
2001-12-29 07:13:47 +00:00
|
|
|
static int
|
|
|
|
aio_modload(struct module *module, int cmd, void *arg)
|
|
|
|
{
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case MOD_LOAD:
|
|
|
|
aio_onceonly();
|
|
|
|
break;
|
|
|
|
case MOD_UNLOAD:
|
|
|
|
error = aio_unload();
|
|
|
|
break;
|
|
|
|
case MOD_SHUTDOWN:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static moduledata_t aio_mod = {
|
|
|
|
"aio",
|
|
|
|
&aio_modload,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
SYSCALL_MODULE_HELPER(aio_return);
|
|
|
|
SYSCALL_MODULE_HELPER(aio_suspend);
|
|
|
|
SYSCALL_MODULE_HELPER(aio_cancel);
|
|
|
|
SYSCALL_MODULE_HELPER(aio_error);
|
|
|
|
SYSCALL_MODULE_HELPER(aio_read);
|
|
|
|
SYSCALL_MODULE_HELPER(aio_write);
|
|
|
|
SYSCALL_MODULE_HELPER(aio_waitcomplete);
|
|
|
|
SYSCALL_MODULE_HELPER(lio_listio);
|
|
|
|
|
|
|
|
DECLARE_MODULE(aio, aio_mod,
|
|
|
|
SI_SUB_VFS, SI_ORDER_ANY);
|
|
|
|
MODULE_VERSION(aio, 1);
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
|
|
|
* Startup initialization
|
|
|
|
*/
|
2001-03-05 01:30:23 +00:00
|
|
|
static void
|
2001-12-29 07:13:47 +00:00
|
|
|
aio_onceonly(void)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2001-12-29 07:13:47 +00:00
|
|
|
|
|
|
|
/* XXX: should probably just use so->callback */
|
|
|
|
aio_swake = &aio_swake_cb;
|
2003-03-24 21:15:35 +00:00
|
|
|
exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL,
|
|
|
|
EVENTHANDLER_PRI_ANY);
|
|
|
|
exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown, NULL,
|
|
|
|
EVENTHANDLER_PRI_ANY);
|
2001-12-29 07:13:47 +00:00
|
|
|
kqueue_add_filteropts(EVFILT_AIO, &aio_filtops);
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_INIT(&aio_freeproc);
|
|
|
|
TAILQ_INIT(&aio_activeproc);
|
|
|
|
TAILQ_INIT(&aio_jobs);
|
1997-11-29 01:33:10 +00:00
|
|
|
TAILQ_INIT(&aio_bufjobs);
|
2002-03-20 04:09:59 +00:00
|
|
|
kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL,
|
|
|
|
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
|
|
|
|
aiop_zone = uma_zcreate("AIOP", sizeof(struct aiothreadlist), NULL,
|
|
|
|
NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
|
|
|
|
aiocb_zone = uma_zcreate("AIOCB", sizeof(struct aiocblist), NULL, NULL,
|
|
|
|
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
|
|
|
|
aiol_zone = uma_zcreate("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t) , NULL,
|
|
|
|
NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
|
|
|
|
aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aio_liojob), NULL,
|
|
|
|
NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
|
|
|
|
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
|
1997-11-29 01:33:10 +00:00
|
|
|
jobrefid = 1;
|
2002-10-27 18:07:41 +00:00
|
|
|
async_io_version = _POSIX_VERSION;
|
2002-11-16 04:22:55 +00:00
|
|
|
p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, AIO_LISTIO_MAX);
|
2002-11-16 06:38:07 +00:00
|
|
|
p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE);
|
|
|
|
p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/*
|
|
|
|
* Callback for unload of AIO when used as a module.
|
|
|
|
*/
|
2001-12-29 07:13:47 +00:00
|
|
|
static int
|
|
|
|
aio_unload(void)
|
|
|
|
{
|
2004-08-15 06:24:42 +00:00
|
|
|
int error;
|
2001-12-29 07:13:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX: no unloads by default, it's too dangerous.
|
|
|
|
* perhaps we could do it if locked out callers and then
|
|
|
|
* did an aio_proc_rundown() on each process.
|
|
|
|
*/
|
|
|
|
if (!unloadable)
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
|
2004-08-15 06:24:42 +00:00
|
|
|
error = kqueue_del_filteropts(EVFILT_AIO);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2002-10-27 18:07:41 +00:00
|
|
|
async_io_version = 0;
|
2001-12-29 07:13:47 +00:00
|
|
|
aio_swake = NULL;
|
2003-03-24 21:15:35 +00:00
|
|
|
EVENTHANDLER_DEREGISTER(process_exit, exit_tag);
|
|
|
|
EVENTHANDLER_DEREGISTER(process_exec, exec_tag);
|
2002-11-17 04:15:34 +00:00
|
|
|
p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, -1);
|
|
|
|
p31b_setcfg(CTL_P1003_1B_AIO_MAX, -1);
|
|
|
|
p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, -1);
|
2001-12-29 07:13:47 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Init the per-process aioinfo structure. The aioinfo limits are set
|
|
|
|
* per-process for user limit (resource) management.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
2001-03-05 01:30:23 +00:00
|
|
|
static void
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_init_aioinfo(struct proc *p)
|
|
|
|
{
|
1997-07-06 02:40:43 +00:00
|
|
|
struct kaioinfo *ki;
|
2003-01-13 15:06:05 +00:00
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
if (p->p_aioinfo == NULL) {
|
2003-02-19 05:47:46 +00:00
|
|
|
ki = uma_zalloc(kaio_zone, M_WAITOK);
|
1997-07-06 02:40:43 +00:00
|
|
|
p->p_aioinfo = ki;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
ki->kaio_flags = 0;
|
1997-10-09 04:14:41 +00:00
|
|
|
ki->kaio_maxactive_count = max_aio_per_proc;
|
1997-07-06 02:40:43 +00:00
|
|
|
ki->kaio_active_count = 0;
|
1997-10-09 04:14:41 +00:00
|
|
|
ki->kaio_qallowed_count = max_aio_queue_per_proc;
|
1997-07-06 02:40:43 +00:00
|
|
|
ki->kaio_queue_count = 0;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
ki->kaio_ballowed_count = max_buf_aio;
|
1997-11-29 01:33:10 +00:00
|
|
|
ki->kaio_buffer_count = 0;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
ki->kaio_buffer_finished_count = 0;
|
|
|
|
ki->kaio_p = p;
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_INIT(&ki->kaio_jobdone);
|
|
|
|
TAILQ_INIT(&ki->kaio_jobqueue);
|
1997-11-29 01:33:10 +00:00
|
|
|
TAILQ_INIT(&ki->kaio_bufdone);
|
|
|
|
TAILQ_INIT(&ki->kaio_bufqueue);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
TAILQ_INIT(&ki->kaio_liojoblist);
|
2000-01-14 02:53:29 +00:00
|
|
|
TAILQ_INIT(&ki->kaio_sockqueue);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2004-08-13 17:43:53 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
while (num_aio_procs < target_aio_procs)
|
|
|
|
aio_newproc();
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Free a job entry. Wait for completion if it is currently active, but don't
|
|
|
|
* delay forever. If we delay, we return a flag that says that we have to
|
|
|
|
* restart the queue scan.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
2001-03-05 01:30:23 +00:00
|
|
|
static int
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_free_entry(struct aiocblist *aiocbe)
|
|
|
|
{
|
1997-07-06 02:40:43 +00:00
|
|
|
struct kaioinfo *ki;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
struct aio_liojob *lj;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct proc *p;
|
1997-11-29 01:33:10 +00:00
|
|
|
int error;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
int s;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
if (aiocbe->jobstate == JOBST_NULL)
|
|
|
|
panic("aio_free_entry: freeing already free job");
|
|
|
|
|
|
|
|
p = aiocbe->userproc;
|
|
|
|
ki = p->p_aioinfo;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj = aiocbe->lio;
|
1997-07-06 02:40:43 +00:00
|
|
|
if (ki == NULL)
|
|
|
|
panic("aio_free_entry: missing p->p_aioinfo");
|
|
|
|
|
2001-03-10 22:47:57 +00:00
|
|
|
while (aiocbe->jobstate == JOBST_JOBRUNNING) {
|
1997-07-06 02:40:43 +00:00
|
|
|
aiocbe->jobflags |= AIOCBLIST_RUNDOWN;
|
2001-03-10 22:47:57 +00:00
|
|
|
tsleep(aiocbe, PRIBIO, "jobwai", 0);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
if (aiocbe->bp == NULL) {
|
|
|
|
if (ki->kaio_queue_count <= 0)
|
|
|
|
panic("aio_free_entry: process queue size <= 0");
|
|
|
|
if (num_queue_count <= 0)
|
|
|
|
panic("aio_free_entry: system wide queue size <= 0");
|
2004-08-13 17:43:53 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if (lj) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj->lioj_queue_count--;
|
|
|
|
if (aiocbe->jobflags & AIOCBLIST_DONE)
|
|
|
|
lj->lioj_queue_finished_count--;
|
|
|
|
}
|
|
|
|
ki->kaio_queue_count--;
|
|
|
|
if (aiocbe->jobflags & AIOCBLIST_DONE)
|
|
|
|
ki->kaio_queue_finished_count--;
|
|
|
|
num_queue_count--;
|
1997-11-29 01:33:10 +00:00
|
|
|
} else {
|
2000-01-14 02:53:29 +00:00
|
|
|
if (lj) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj->lioj_buffer_count--;
|
|
|
|
if (aiocbe->jobflags & AIOCBLIST_DONE)
|
|
|
|
lj->lioj_buffer_finished_count--;
|
|
|
|
}
|
|
|
|
if (aiocbe->jobflags & AIOCBLIST_DONE)
|
|
|
|
ki->kaio_buffer_finished_count--;
|
|
|
|
ki->kaio_buffer_count--;
|
|
|
|
num_buf_aio--;
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
|
|
|
|
2000-04-16 18:53:38 +00:00
|
|
|
/* aiocbe is going away, we need to destroy any knotes */
|
2004-08-13 17:43:53 +00:00
|
|
|
/* XXXKSE Note the thread here is used to eventually find the
|
2001-09-12 08:38:13 +00:00
|
|
|
* owning process again, but it is also used to do a fo_close
|
|
|
|
* and that requires the thread. (but does it require the
|
2002-02-07 20:58:47 +00:00
|
|
|
* OWNING thread? (or maybe the running thread?)
|
2004-08-13 17:43:53 +00:00
|
|
|
* There is a semantic problem here...
|
2001-09-12 08:38:13 +00:00
|
|
|
*/
|
2004-08-15 06:24:42 +00:00
|
|
|
knlist_clear(&aiocbe->klist, 0); /* XXXKSE */
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags & KAIO_RUNDOWN)
|
|
|
|
&& ((ki->kaio_buffer_count == 0) && (ki->kaio_queue_count == 0)))) {
|
1997-11-29 01:33:10 +00:00
|
|
|
ki->kaio_flags &= ~KAIO_WAKEUP;
|
|
|
|
wakeup(p);
|
|
|
|
}
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if (aiocbe->jobstate == JOBST_JOBQBUF) {
|
2002-01-17 17:19:40 +00:00
|
|
|
if ((error = aio_fphysio(aiocbe)) != 0)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (error);
|
1997-11-29 01:33:10 +00:00
|
|
|
if (aiocbe->jobstate != JOBST_JOBBFINISHED)
|
|
|
|
panic("aio_free_entry: invalid physio finish-up state");
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
s = splbio();
|
1997-11-29 01:33:10 +00:00
|
|
|
TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist);
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
splx(s);
|
2001-03-10 22:47:57 +00:00
|
|
|
} else if (aiocbe->jobstate == JOBST_JOBQGLOBAL) {
|
2002-01-06 21:03:39 +00:00
|
|
|
s = splnet();
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_REMOVE(&aio_jobs, aiocbe, list);
|
2001-03-10 22:47:57 +00:00
|
|
|
TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist);
|
2002-01-06 21:03:39 +00:00
|
|
|
splx(s);
|
2001-03-10 22:47:57 +00:00
|
|
|
} else if (aiocbe->jobstate == JOBST_JOBFINISHED)
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_REMOVE(&ki->kaio_jobdone, aiocbe, plist);
|
2000-01-14 02:53:29 +00:00
|
|
|
else if (aiocbe->jobstate == JOBST_JOBBFINISHED) {
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
s = splbio();
|
1997-11-29 01:33:10 +00:00
|
|
|
TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist);
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
splx(s);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
if (aiocbe->bp) {
|
|
|
|
vunmapbuf(aiocbe->bp);
|
1999-01-21 08:29:12 +00:00
|
|
|
relpbuf(aiocbe->bp, NULL);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
aiocbe->bp = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (lj && (lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 0)) {
|
|
|
|
TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiolio_zone, lj);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
aiocbe->jobstate = JOBST_NULL;
|
2001-03-10 22:47:57 +00:00
|
|
|
untimeout(process_signal, aiocbe, aiocbe->timeouthandle);
|
2002-03-31 20:17:56 +00:00
|
|
|
fdrop(aiocbe->fd_file, curthread);
|
2002-11-07 20:46:37 +00:00
|
|
|
crfree(aiocbe->cred);
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiocb_zone, aiocbe);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2004-08-13 17:43:53 +00:00
|
|
|
* Rundown the jobs for a given process.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
2001-12-29 07:13:47 +00:00
|
|
|
static void
|
2003-03-24 21:15:35 +00:00
|
|
|
aio_proc_rundown(void *arg, struct proc *p)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
int s;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct kaioinfo *ki;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
struct aio_liojob *lj, *ljn;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct aiocblist *aiocbe, *aiocbn;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct file *fp;
|
|
|
|
struct socket *so;
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
ki = p->p_aioinfo;
|
|
|
|
if (ki == NULL)
|
|
|
|
return;
|
|
|
|
|
2004-03-14 02:06:28 +00:00
|
|
|
mtx_lock(&Giant);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
ki->kaio_flags |= LIOJ_SIGNAL_POSTED;
|
2000-01-14 02:53:29 +00:00
|
|
|
while ((ki->kaio_active_count > 0) || (ki->kaio_buffer_count >
|
|
|
|
ki->kaio_buffer_finished_count)) {
|
1997-11-29 01:33:10 +00:00
|
|
|
ki->kaio_flags |= KAIO_RUNDOWN;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
if (tsleep(p, PRIBIO, "kaiowt", aiod_timeout))
|
1997-10-09 04:14:41 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/*
|
|
|
|
* Move any aio ops that are waiting on socket I/O to the normal job
|
|
|
|
* queues so they are cleaned up with any others.
|
|
|
|
*/
|
|
|
|
s = splnet();
|
|
|
|
for (aiocbe = TAILQ_FIRST(&ki->kaio_sockqueue); aiocbe; aiocbe =
|
|
|
|
aiocbn) {
|
|
|
|
aiocbn = TAILQ_NEXT(aiocbe, plist);
|
2002-04-14 03:04:19 +00:00
|
|
|
fp = aiocbe->fd_file;
|
|
|
|
if (fp != NULL) {
|
2003-01-13 00:33:17 +00:00
|
|
|
so = fp->f_data;
|
2000-01-14 02:53:29 +00:00
|
|
|
TAILQ_REMOVE(&so->so_aiojobq, aiocbe, list);
|
|
|
|
if (TAILQ_EMPTY(&so->so_aiojobq)) {
|
2004-06-17 22:48:11 +00:00
|
|
|
SOCKBUF_LOCK(&so->so_snd);
|
2000-01-14 02:53:29 +00:00
|
|
|
so->so_snd.sb_flags &= ~SB_AIO;
|
2004-06-17 22:48:11 +00:00
|
|
|
SOCKBUF_UNLOCK(&so->so_snd);
|
|
|
|
SOCKBUF_LOCK(&so->so_rcv);
|
2000-01-14 02:53:29 +00:00
|
|
|
so->so_rcv.sb_flags &= ~SB_AIO;
|
2004-06-17 22:48:11 +00:00
|
|
|
SOCKBUF_UNLOCK(&so->so_rcv);
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
TAILQ_REMOVE(&ki->kaio_sockqueue, aiocbe, plist);
|
|
|
|
TAILQ_INSERT_HEAD(&aio_jobs, aiocbe, list);
|
|
|
|
TAILQ_INSERT_HEAD(&ki->kaio_jobqueue, aiocbe, plist);
|
|
|
|
}
|
|
|
|
splx(s);
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
restart1:
|
2000-01-14 02:53:29 +00:00
|
|
|
for (aiocbe = TAILQ_FIRST(&ki->kaio_jobdone); aiocbe; aiocbe = aiocbn) {
|
1997-07-06 02:40:43 +00:00
|
|
|
aiocbn = TAILQ_NEXT(aiocbe, plist);
|
|
|
|
if (aio_free_entry(aiocbe))
|
|
|
|
goto restart1;
|
|
|
|
}
|
|
|
|
|
|
|
|
restart2:
|
2000-01-14 02:53:29 +00:00
|
|
|
for (aiocbe = TAILQ_FIRST(&ki->kaio_jobqueue); aiocbe; aiocbe =
|
|
|
|
aiocbn) {
|
1997-07-06 02:40:43 +00:00
|
|
|
aiocbn = TAILQ_NEXT(aiocbe, plist);
|
|
|
|
if (aio_free_entry(aiocbe))
|
|
|
|
goto restart2;
|
|
|
|
}
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
1997-11-30 21:47:36 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Note the use of lots of splbio here, trying to avoid splbio for long chains
|
|
|
|
* of I/O. Probably unnecessary.
|
1997-11-30 21:47:36 +00:00
|
|
|
*/
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
restart3:
|
|
|
|
s = splbio();
|
|
|
|
while (TAILQ_FIRST(&ki->kaio_bufqueue)) {
|
|
|
|
ki->kaio_flags |= KAIO_WAKEUP;
|
2000-01-14 02:53:29 +00:00
|
|
|
tsleep(p, PRIBIO, "aioprn", 0);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
splx(s);
|
|
|
|
goto restart3;
|
|
|
|
}
|
1997-11-30 21:47:36 +00:00
|
|
|
splx(s);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
|
|
|
restart4:
|
|
|
|
s = splbio();
|
2000-01-14 02:53:29 +00:00
|
|
|
for (aiocbe = TAILQ_FIRST(&ki->kaio_bufdone); aiocbe; aiocbe = aiocbn) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
aiocbn = TAILQ_NEXT(aiocbe, plist);
|
|
|
|
if (aio_free_entry(aiocbe)) {
|
|
|
|
splx(s);
|
|
|
|
goto restart4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
splx(s);
|
|
|
|
|
2004-08-13 17:43:53 +00:00
|
|
|
/*
|
|
|
|
* If we've slept, jobs might have moved from one queue to another.
|
|
|
|
* Retry rundown if we didn't manage to empty the queues.
|
|
|
|
*/
|
|
|
|
if (TAILQ_FIRST(&ki->kaio_jobdone) != NULL ||
|
2001-03-10 22:47:57 +00:00
|
|
|
TAILQ_FIRST(&ki->kaio_jobqueue) != NULL ||
|
|
|
|
TAILQ_FIRST(&ki->kaio_bufqueue) != NULL ||
|
|
|
|
TAILQ_FIRST(&ki->kaio_bufdone) != NULL)
|
|
|
|
goto restart1;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
for (lj = TAILQ_FIRST(&ki->kaio_liojoblist); lj; lj = ljn) {
|
|
|
|
ljn = TAILQ_NEXT(lj, lioj_list);
|
|
|
|
if ((lj->lioj_buffer_count == 0) && (lj->lioj_queue_count ==
|
|
|
|
0)) {
|
|
|
|
TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiolio_zone, lj);
|
2000-01-14 02:53:29 +00:00
|
|
|
} else {
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
printf("LIO job not cleaned up: B:%d, BF:%d, Q:%d, "
|
|
|
|
"QF:%d\n", lj->lioj_buffer_count,
|
|
|
|
lj->lioj_buffer_finished_count,
|
|
|
|
lj->lioj_queue_count,
|
|
|
|
lj->lioj_queue_finished_count);
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
#endif
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
|
|
|
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(kaio_zone, ki);
|
1997-10-09 04:14:41 +00:00
|
|
|
p->p_aioinfo = NULL;
|
2004-03-14 02:06:28 +00:00
|
|
|
mtx_unlock(&Giant);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Select a job to run (called by an AIO daemon).
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
static struct aiocblist *
|
2001-09-12 08:38:13 +00:00
|
|
|
aio_selectjob(struct aiothreadlist *aiop)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2000-01-14 02:53:29 +00:00
|
|
|
int s;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct aiocblist *aiocbe;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct kaioinfo *ki;
|
|
|
|
struct proc *userp;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
s = splnet();
|
|
|
|
for (aiocbe = TAILQ_FIRST(&aio_jobs); aiocbe; aiocbe =
|
|
|
|
TAILQ_NEXT(aiocbe, list)) {
|
1997-07-06 02:40:43 +00:00
|
|
|
userp = aiocbe->userproc;
|
|
|
|
ki = userp->p_aioinfo;
|
|
|
|
|
|
|
|
if (ki->kaio_active_count < ki->kaio_maxactive_count) {
|
|
|
|
TAILQ_REMOVE(&aio_jobs, aiocbe, list);
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (aiocbe);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2003-01-13 15:06:05 +00:00
|
|
|
return (NULL);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* The AIO processing activity. This is the code that does the I/O request for
|
|
|
|
* the non-physio version of the operations. The normal vn operations are used,
|
|
|
|
* and this code should work in all instances for every type of file, including
|
|
|
|
* pipes, sockets, fifos, and regular files.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
2001-03-05 01:30:23 +00:00
|
|
|
static void
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_process(struct aiocblist *aiocbe)
|
|
|
|
{
|
2002-11-07 20:46:37 +00:00
|
|
|
struct ucred *td_savedcred;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
|
|
|
struct proc *mycp;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct aiocb *cb;
|
|
|
|
struct file *fp;
|
|
|
|
struct uio auio;
|
|
|
|
struct iovec aiov;
|
|
|
|
int cnt;
|
|
|
|
int error;
|
1997-11-29 01:33:10 +00:00
|
|
|
int oublock_st, oublock_end;
|
|
|
|
int inblock_st, inblock_end;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
td = curthread;
|
2002-11-07 20:46:37 +00:00
|
|
|
td_savedcred = td->td_ucred;
|
|
|
|
td->td_ucred = aiocbe->cred;
|
2001-09-12 08:38:13 +00:00
|
|
|
mycp = td->td_proc;
|
1997-07-06 02:40:43 +00:00
|
|
|
cb = &aiocbe->uaiocb;
|
2002-04-14 03:04:19 +00:00
|
|
|
fp = aiocbe->fd_file;
|
2000-01-14 02:53:29 +00:00
|
|
|
|
2001-12-09 08:16:36 +00:00
|
|
|
aiov.iov_base = (void *)(uintptr_t)cb->aio_buf;
|
1997-07-06 02:40:43 +00:00
|
|
|
aiov.iov_len = cb->aio_nbytes;
|
|
|
|
|
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
2002-04-04 02:13:20 +00:00
|
|
|
auio.uio_offset = cb->aio_offset;
|
1997-07-06 02:40:43 +00:00
|
|
|
auio.uio_resid = cb->aio_nbytes;
|
|
|
|
cnt = cb->aio_nbytes;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
2001-09-12 08:38:13 +00:00
|
|
|
auio.uio_td = td;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
inblock_st = mycp->p_stats->p_ru.ru_inblock;
|
|
|
|
oublock_st = mycp->p_stats->p_ru.ru_oublock;
|
2000-11-18 21:01:04 +00:00
|
|
|
/*
|
2002-04-04 02:13:20 +00:00
|
|
|
* _aio_aqueue() acquires a reference to the file that is
|
|
|
|
* released in aio_free_entry().
|
2000-11-18 21:01:04 +00:00
|
|
|
*/
|
1997-07-06 02:40:43 +00:00
|
|
|
if (cb->aio_lio_opcode == LIO_READ) {
|
|
|
|
auio.uio_rw = UIO_READ;
|
2001-09-12 08:38:13 +00:00
|
|
|
error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td);
|
1997-07-06 02:40:43 +00:00
|
|
|
} else {
|
|
|
|
auio.uio_rw = UIO_WRITE;
|
2001-09-12 08:38:13 +00:00
|
|
|
error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
inblock_end = mycp->p_stats->p_ru.ru_inblock;
|
|
|
|
oublock_end = mycp->p_stats->p_ru.ru_oublock;
|
|
|
|
|
|
|
|
aiocbe->inputcharge = inblock_end - inblock_st;
|
|
|
|
aiocbe->outputcharge = oublock_end - oublock_st;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((error) && (auio.uio_resid != cnt)) {
|
|
|
|
if (error == ERESTART || error == EINTR || error == EWOULDBLOCK)
|
|
|
|
error = 0;
|
2001-03-07 03:37:06 +00:00
|
|
|
if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) {
|
2002-04-04 02:13:20 +00:00
|
|
|
PROC_LOCK(aiocbe->userproc);
|
|
|
|
psignal(aiocbe->userproc, SIGPIPE);
|
|
|
|
PROC_UNLOCK(aiocbe->userproc);
|
2001-03-07 03:37:06 +00:00
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cnt -= auio.uio_resid;
|
|
|
|
cb->_aiocb_private.error = error;
|
|
|
|
cb->_aiocb_private.status = cnt;
|
2002-11-07 20:46:37 +00:00
|
|
|
td->td_ucred = td_savedcred;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
* The AIO daemon, most of the actual work is done in aio_process,
|
|
|
|
* but the setup (and address space mgmt) is done in this routine.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
static void
|
1999-07-01 13:21:46 +00:00
|
|
|
aio_daemon(void *uproc)
|
1997-07-06 02:40:43 +00:00
|
|
|
{
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
int s;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct aio_liojob *lj;
|
|
|
|
struct aiocb *cb;
|
|
|
|
struct aiocblist *aiocbe;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct aiothreadlist *aiop;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct kaioinfo *ki;
|
|
|
|
struct proc *curcp, *mycp, *userp;
|
|
|
|
struct vmspace *myvm, *tmpvm;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td = curthread;
|
2002-02-23 11:12:57 +00:00
|
|
|
struct pgrp *newpgrp;
|
|
|
|
struct session *newsess;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&Giant);
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
1997-11-29 01:33:10 +00:00
|
|
|
* Local copies of curproc (cp) and vmspace (myvm)
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
mycp = td->td_proc;
|
1997-11-29 01:33:10 +00:00
|
|
|
myvm = mycp->p_vmspace;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2003-04-17 22:37:48 +00:00
|
|
|
KASSERT(mycp->p_textvp == NULL, ("kthread has a textvp"));
|
1997-07-06 02:40:43 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Allocate and ready the aio control info. There is one aiop structure
|
|
|
|
* per daemon.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
2003-02-19 05:47:46 +00:00
|
|
|
aiop = uma_zalloc(aiop_zone, M_WAITOK);
|
2001-09-12 08:38:13 +00:00
|
|
|
aiop->aiothread = td;
|
|
|
|
aiop->aiothreadflags |= AIOP_FREE;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
s = splnet();
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Place thread (lightweight process) onto the AIO free thread list.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
if (TAILQ_EMPTY(&aio_freeproc))
|
|
|
|
wakeup(&aio_freeproc);
|
|
|
|
TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
|
|
|
* Get rid of our current filedescriptors. AIOD's don't need any
|
|
|
|
* filedescriptors, except as temporarily inherited from the client.
|
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
fdfree(td);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2002-05-03 07:46:59 +00:00
|
|
|
mtx_unlock(&Giant);
|
2000-01-14 02:53:29 +00:00
|
|
|
/* The daemon resides in its own pgrp. */
|
2002-03-05 15:38:49 +00:00
|
|
|
MALLOC(newpgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
|
2003-02-19 05:47:46 +00:00
|
|
|
M_WAITOK | M_ZERO);
|
2002-03-05 15:38:49 +00:00
|
|
|
MALLOC(newsess, struct session *, sizeof(struct session), M_SESSION,
|
2003-02-19 05:47:46 +00:00
|
|
|
M_WAITOK | M_ZERO);
|
2002-02-23 11:12:57 +00:00
|
|
|
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_xlock(&proctree_lock);
|
2002-02-23 11:12:57 +00:00
|
|
|
enterpgrp(mycp, mycp->p_pid, newpgrp, newsess);
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_xunlock(&proctree_lock);
|
2002-04-20 12:02:52 +00:00
|
|
|
mtx_lock(&Giant);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wakeup parent process. (Parent sleeps to keep from blasting away
|
2001-09-12 08:38:13 +00:00
|
|
|
* and creating too many daemons.)
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
wakeup(mycp);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
for (;;) {
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
|
|
|
* curcp is the current daemon process context.
|
|
|
|
* userp is the current user process context.
|
|
|
|
*/
|
|
|
|
curcp = mycp;
|
1997-10-11 01:07:03 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
|
|
|
* Take daemon off of free queue
|
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
if (aiop->aiothreadflags & AIOP_FREE) {
|
2000-01-14 02:53:29 +00:00
|
|
|
s = splnet();
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_REMOVE(&aio_freeproc, aiop, list);
|
|
|
|
TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list);
|
2001-09-12 08:38:13 +00:00
|
|
|
aiop->aiothreadflags &= ~AIOP_FREE;
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
aiop->aiothreadflags &= ~AIOP_SCHED;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Check for jobs.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
1999-01-27 21:50:00 +00:00
|
|
|
while ((aiocbe = aio_selectjob(aiop)) != NULL) {
|
1997-07-06 02:40:43 +00:00
|
|
|
cb = &aiocbe->uaiocb;
|
|
|
|
userp = aiocbe->userproc;
|
|
|
|
|
|
|
|
aiocbe->jobstate = JOBST_JOBRUNNING;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Connect to process address space for user program.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
if (userp != curcp) {
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Save the current address space that we are
|
|
|
|
* connected to.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
tmpvm = mycp->p_vmspace;
|
2004-08-13 17:43:53 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Point to the new user address space, and
|
|
|
|
* refer to it.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
mycp->p_vmspace = userp->p_vmspace;
|
2004-07-27 03:53:41 +00:00
|
|
|
atomic_add_int(&mycp->p_vmspace->vm_refcnt, 1);
|
2004-08-13 17:43:53 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Activate the new mapping. */
|
2002-02-07 20:58:47 +00:00
|
|
|
pmap_activate(FIRST_THREAD_IN_PROC(mycp));
|
2004-08-13 17:43:53 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* If the old address space wasn't the daemons
|
|
|
|
* own address space, then we need to remove the
|
|
|
|
* daemon's reference from the other process
|
|
|
|
* that it was acting on behalf of.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
1997-07-06 02:40:43 +00:00
|
|
|
if (tmpvm != myvm) {
|
|
|
|
vmspace_free(tmpvm);
|
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
curcp = userp;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
ki = userp->p_aioinfo;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj = aiocbe->lio;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Account for currently active jobs. */
|
1997-07-06 02:40:43 +00:00
|
|
|
ki->kaio_active_count++;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Do the I/O function. */
|
1997-07-06 02:40:43 +00:00
|
|
|
aio_process(aiocbe);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Decrement the active job count. */
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
ki->kaio_active_count--;
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Increment the completion count for wakeup/signal
|
|
|
|
* comparisons.
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
*/
|
|
|
|
aiocbe->jobflags |= AIOCBLIST_DONE;
|
|
|
|
ki->kaio_queue_finished_count++;
|
2000-01-14 02:53:29 +00:00
|
|
|
if (lj)
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj->lioj_queue_finished_count++;
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags
|
|
|
|
& KAIO_RUNDOWN) && (ki->kaio_active_count == 0))) {
|
1997-11-29 01:33:10 +00:00
|
|
|
ki->kaio_flags &= ~KAIO_WAKEUP;
|
|
|
|
wakeup(userp);
|
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
s = splbio();
|
2000-01-14 02:53:29 +00:00
|
|
|
if (lj && (lj->lioj_flags &
|
|
|
|
(LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == LIOJ_SIGNAL) {
|
|
|
|
if ((lj->lioj_queue_finished_count ==
|
|
|
|
lj->lioj_queue_count) &&
|
|
|
|
(lj->lioj_buffer_finished_count ==
|
|
|
|
lj->lioj_buffer_count)) {
|
2001-03-07 03:37:06 +00:00
|
|
|
PROC_LOCK(userp);
|
|
|
|
psignal(userp,
|
|
|
|
lj->lioj_signal.sigev_signo);
|
|
|
|
PROC_UNLOCK(userp);
|
|
|
|
lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
splx(s);
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
aiocbe->jobstate = JOBST_JOBFINISHED;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
s = splnet();
|
2002-08-22 08:50:15 +00:00
|
|
|
TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist);
|
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_jobdone, aiocbe, plist);
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
2004-08-15 06:24:42 +00:00
|
|
|
KNOTE_UNLOCKED(&aiocbe->klist, 0);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
if (aiocbe->jobflags & AIOCBLIST_RUNDOWN) {
|
|
|
|
wakeup(aiocbe);
|
|
|
|
aiocbe->jobflags &= ~AIOCBLIST_RUNDOWN;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) {
|
2001-03-07 03:37:06 +00:00
|
|
|
PROC_LOCK(userp);
|
1997-07-06 02:40:43 +00:00
|
|
|
psignal(userp, cb->aio_sigevent.sigev_signo);
|
2001-03-07 03:37:06 +00:00
|
|
|
PROC_UNLOCK(userp);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Disconnect from user address space.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
if (curcp != mycp) {
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Get the user address space to disconnect from. */
|
1997-11-29 01:33:10 +00:00
|
|
|
tmpvm = mycp->p_vmspace;
|
2004-08-13 17:43:53 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Get original address space for daemon. */
|
1997-11-29 01:33:10 +00:00
|
|
|
mycp->p_vmspace = myvm;
|
2004-08-13 17:43:53 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Activate the daemon's address space. */
|
2002-02-07 20:58:47 +00:00
|
|
|
pmap_activate(FIRST_THREAD_IN_PROC(mycp));
|
2000-01-14 02:53:29 +00:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (tmpvm == myvm) {
|
|
|
|
printf("AIOD: vmspace problem -- %d\n",
|
|
|
|
mycp->p_pid);
|
|
|
|
}
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
#endif
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Remove our vmspace reference. */
|
1997-07-06 02:40:43 +00:00
|
|
|
vmspace_free(tmpvm);
|
2004-08-13 17:43:53 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
curcp = mycp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are the first to be put onto the free queue, wakeup
|
|
|
|
* anyone waiting for a daemon.
|
|
|
|
*/
|
2000-01-14 02:53:29 +00:00
|
|
|
s = splnet();
|
1997-11-29 01:33:10 +00:00
|
|
|
TAILQ_REMOVE(&aio_activeproc, aiop, list);
|
|
|
|
if (TAILQ_EMPTY(&aio_freeproc))
|
|
|
|
wakeup(&aio_freeproc);
|
|
|
|
TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
|
2001-09-12 08:38:13 +00:00
|
|
|
aiop->aiothreadflags |= AIOP_FREE;
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* If daemon is inactive for a long time, allow it to exit,
|
|
|
|
* thereby freeing resources.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
2002-01-20 00:52:44 +00:00
|
|
|
if ((aiop->aiothreadflags & AIOP_SCHED) == 0 &&
|
|
|
|
tsleep(aiop->aiothread, PRIBIO, "aiordy", aiod_lifetime)) {
|
2000-01-14 02:53:29 +00:00
|
|
|
s = splnet();
|
2002-01-20 18:59:58 +00:00
|
|
|
if (TAILQ_EMPTY(&aio_jobs)) {
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((aiop->aiothreadflags & AIOP_FREE) &&
|
2000-01-14 02:53:29 +00:00
|
|
|
(num_aio_procs > target_aio_procs)) {
|
1997-11-29 01:33:10 +00:00
|
|
|
TAILQ_REMOVE(&aio_freeproc, aiop, list);
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiop_zone, aiop);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
num_aio_procs--;
|
2000-01-14 02:53:29 +00:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (mycp->p_vmspace->vm_refcnt <= 1) {
|
|
|
|
printf("AIOD: bad vm refcnt for"
|
|
|
|
" exiting daemon: %d\n",
|
|
|
|
mycp->p_vmspace->vm_refcnt);
|
|
|
|
}
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
#endif
|
2001-03-09 06:27:01 +00:00
|
|
|
kthread_exit(0);
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
|
|
|
|
* AIO daemon modifies its environment itself.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
static int
|
2003-01-13 15:06:05 +00:00
|
|
|
aio_newproc(void)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
1997-07-06 02:40:43 +00:00
|
|
|
int error;
|
2001-03-09 06:27:01 +00:00
|
|
|
struct proc *p;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2002-10-02 07:44:29 +00:00
|
|
|
error = kthread_create(aio_daemon, curproc, &p, RFNOWAIT, 0, "aiod%d",
|
2004-08-13 17:43:53 +00:00
|
|
|
num_aio_procs);
|
1999-05-06 18:44:42 +00:00
|
|
|
if (error)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (error);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Wait until daemon is started, but continue on just in case to
|
1997-11-29 01:33:10 +00:00
|
|
|
* handle error conditions.
|
|
|
|
*/
|
2001-03-09 06:27:01 +00:00
|
|
|
error = tsleep(p, PZERO, "aiosta", aiod_timeout);
|
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
num_aio_procs++;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2003-01-13 15:06:05 +00:00
|
|
|
return (error);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2001-03-05 01:30:23 +00:00
|
|
|
* Try the high-performance, low-overhead physio method for eligible
|
|
|
|
* VCHR devices. This method doesn't use an aio helper thread, and
|
2004-08-13 17:43:53 +00:00
|
|
|
* thus has very low overhead.
|
2001-03-05 01:30:23 +00:00
|
|
|
*
|
|
|
|
* Assumes that the caller, _aio_aqueue(), has incremented the file
|
|
|
|
* structure's reference count, preventing its deallocation for the
|
2004-08-13 17:43:53 +00:00
|
|
|
* duration of this call.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
2001-03-05 01:30:23 +00:00
|
|
|
static int
|
2000-01-14 02:53:29 +00:00
|
|
|
aio_qphysio(struct proc *p, struct aiocblist *aiocbe)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct aiocb *cb;
|
|
|
|
struct file *fp;
|
|
|
|
struct buf *bp;
|
|
|
|
struct vnode *vp;
|
|
|
|
struct kaioinfo *ki;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
struct aio_liojob *lj;
|
1997-11-29 01:33:10 +00:00
|
|
|
int s;
|
2000-09-26 06:35:22 +00:00
|
|
|
int notify;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
cb = &aiocbe->uaiocb;
|
2002-02-12 17:40:41 +00:00
|
|
|
fp = aiocbe->fd_file;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2004-08-13 17:43:53 +00:00
|
|
|
if (fp->f_type != DTYPE_VNODE)
|
1999-11-07 13:09:09 +00:00
|
|
|
return (-1);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2003-06-22 08:41:43 +00:00
|
|
|
vp = fp->f_vnode;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
|
2000-01-17 21:18:39 +00:00
|
|
|
/*
|
|
|
|
* If its not a disk, we don't want to return a positive error.
|
|
|
|
* It causes the aio code to not fall through to try the thread
|
|
|
|
* way when you're talking to a regular file.
|
|
|
|
*/
|
|
|
|
if (!vn_isdisk(vp, &error)) {
|
|
|
|
if (error == ENOTBLK)
|
|
|
|
return (-1);
|
|
|
|
else
|
|
|
|
return (error);
|
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2004-10-26 07:39:12 +00:00
|
|
|
if (cb->aio_nbytes % vp->v_bufobj.bo_bsize)
|
1999-11-07 13:09:09 +00:00
|
|
|
return (-1);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2001-03-10 22:47:57 +00:00
|
|
|
if (cb->aio_nbytes >
|
|
|
|
MAXPHYS - (((vm_offset_t) cb->aio_buf) & PAGE_MASK))
|
1999-11-07 13:09:09 +00:00
|
|
|
return (-1);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
ki = p->p_aioinfo;
|
2004-08-13 17:43:53 +00:00
|
|
|
if (ki->kaio_buffer_count >= ki->kaio_ballowed_count)
|
1999-11-07 13:09:09 +00:00
|
|
|
return (-1);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
ki->kaio_buffer_count++;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
|
|
|
|
lj = aiocbe->lio;
|
2000-01-14 02:53:29 +00:00
|
|
|
if (lj)
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj->lioj_buffer_count++;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Create and build a buffer header for a transfer. */
|
1999-01-21 08:29:12 +00:00
|
|
|
bp = (struct buf *)getpbuf(NULL);
|
2001-03-10 22:47:57 +00:00
|
|
|
BUF_KERNPROC(bp);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Get a copy of the kva from the physical buffer.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
2003-04-05 23:02:58 +00:00
|
|
|
error = 0;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
bp->b_bcount = cb->aio_nbytes;
|
|
|
|
bp->b_bufsize = cb->aio_nbytes;
|
|
|
|
bp->b_iodone = aio_physwakeup;
|
|
|
|
bp->b_saveaddr = bp->b_data;
|
2001-12-09 08:16:36 +00:00
|
|
|
bp->b_data = (void *)(uintptr_t)cb->aio_buf;
|
2003-10-21 13:18:19 +00:00
|
|
|
bp->b_offset = cb->aio_offset;
|
|
|
|
bp->b_iooffset = cb->aio_offset;
|
1997-11-29 01:33:10 +00:00
|
|
|
bp->b_blkno = btodb(cb->aio_offset);
|
2003-04-04 06:26:28 +00:00
|
|
|
bp->b_iocmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2003-01-20 17:46:48 +00:00
|
|
|
/*
|
|
|
|
* Bring buffer into kernel space.
|
|
|
|
*/
|
|
|
|
if (vmapbuf(bp) < 0) {
|
|
|
|
error = EFAULT;
|
|
|
|
goto doerror;
|
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
s = splbio();
|
1997-11-29 01:33:10 +00:00
|
|
|
aiocbe->bp = bp;
|
2003-09-10 15:48:51 +00:00
|
|
|
bp->b_caller1 = (void *)aiocbe;
|
1997-11-29 01:33:10 +00:00
|
|
|
TAILQ_INSERT_TAIL(&aio_bufjobs, aiocbe, list);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist);
|
1997-11-29 01:33:10 +00:00
|
|
|
aiocbe->jobstate = JOBST_JOBQBUF;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
cb->_aiocb_private.status = cb->aio_nbytes;
|
|
|
|
num_buf_aio++;
|
1997-11-29 01:33:10 +00:00
|
|
|
bp->b_error = 0;
|
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
splx(s);
|
2004-08-13 17:43:53 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Perform transfer. */
|
2004-10-29 07:16:37 +00:00
|
|
|
dev_strategy(vp->v_rdev, bp);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2000-04-16 18:53:38 +00:00
|
|
|
notify = 0;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
s = splbio();
|
2004-08-13 17:43:53 +00:00
|
|
|
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
/*
|
|
|
|
* If we had an error invoking the request, or an error in processing
|
2000-01-14 02:53:29 +00:00
|
|
|
* the request before we have returned, we process it as an error in
|
|
|
|
* transfer. Note that such an I/O error is not indicated immediately,
|
|
|
|
* but is returned using the aio_error mechanism. In this case,
|
|
|
|
* aio_suspend will return immediately.
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
*/
|
2000-04-02 15:24:56 +00:00
|
|
|
if (bp->b_error || (bp->b_ioflags & BIO_ERROR)) {
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
struct aiocb *job = aiocbe->uuaiocb;
|
|
|
|
|
|
|
|
aiocbe->uaiocb._aiocb_private.status = 0;
|
|
|
|
suword(&job->_aiocb_private.status, 0);
|
|
|
|
aiocbe->uaiocb._aiocb_private.error = bp->b_error;
|
|
|
|
suword(&job->_aiocb_private.error, bp->b_error);
|
|
|
|
|
|
|
|
ki->kaio_buffer_finished_count++;
|
|
|
|
|
|
|
|
if (aiocbe->jobstate != JOBST_JOBBFINISHED) {
|
|
|
|
aiocbe->jobstate = JOBST_JOBBFINISHED;
|
|
|
|
aiocbe->jobflags |= AIOCBLIST_DONE;
|
|
|
|
TAILQ_REMOVE(&aio_bufjobs, aiocbe, list);
|
|
|
|
TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist);
|
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist);
|
2000-04-16 18:53:38 +00:00
|
|
|
notify = 1;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
splx(s);
|
2000-04-16 18:53:38 +00:00
|
|
|
if (notify)
|
2004-08-15 06:24:42 +00:00
|
|
|
KNOTE_UNLOCKED(&aiocbe->klist, 0);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
doerror:
|
|
|
|
ki->kaio_buffer_count--;
|
2000-01-14 02:53:29 +00:00
|
|
|
if (lj)
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj->lioj_buffer_count--;
|
|
|
|
aiocbe->bp = NULL;
|
1999-01-21 08:29:12 +00:00
|
|
|
relpbuf(bp, NULL);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (error);
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
/*
|
|
|
|
* This waits/tests physio completion.
|
|
|
|
*/
|
2001-03-05 01:30:23 +00:00
|
|
|
static int
|
2002-01-17 17:19:40 +00:00
|
|
|
aio_fphysio(struct aiocblist *iocb)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
|
|
|
int s;
|
|
|
|
struct buf *bp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
bp = iocb->bp;
|
|
|
|
|
|
|
|
s = splbio();
|
|
|
|
while ((bp->b_flags & B_DONE) == 0) {
|
2001-03-06 15:54:38 +00:00
|
|
|
if (tsleep(bp, PRIBIO, "physstr", aiod_timeout)) {
|
1997-11-29 01:33:10 +00:00
|
|
|
if ((bp->b_flags & B_DONE) == 0) {
|
|
|
|
splx(s);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINPROGRESS);
|
2000-01-14 02:53:29 +00:00
|
|
|
} else
|
1997-11-29 01:33:10 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2001-03-06 15:54:38 +00:00
|
|
|
splx(s);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Release mapping into kernel space. */
|
1997-11-29 01:33:10 +00:00
|
|
|
vunmapbuf(bp);
|
|
|
|
iocb->bp = 0;
|
|
|
|
|
|
|
|
error = 0;
|
2004-08-13 17:43:53 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Check for an error. */
|
2000-04-02 15:24:56 +00:00
|
|
|
if (bp->b_ioflags & BIO_ERROR)
|
1997-11-29 01:33:10 +00:00
|
|
|
error = bp->b_error;
|
|
|
|
|
1999-01-21 08:29:12 +00:00
|
|
|
relpbuf(bp, NULL);
|
1997-11-29 01:33:10 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Wake up aio requests that may be serviceable now.
|
|
|
|
*/
|
2002-01-06 21:03:39 +00:00
|
|
|
static void
|
2001-12-29 07:13:47 +00:00
|
|
|
aio_swake_cb(struct socket *so, struct sockbuf *sb)
|
2000-01-14 02:53:29 +00:00
|
|
|
{
|
|
|
|
struct aiocblist *cb,*cbn;
|
|
|
|
struct proc *p;
|
|
|
|
struct kaioinfo *ki = NULL;
|
|
|
|
int opcode, wakecount = 0;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct aiothreadlist *aiop;
|
2000-01-14 02:53:29 +00:00
|
|
|
|
|
|
|
if (sb == &so->so_snd) {
|
|
|
|
opcode = LIO_WRITE;
|
2004-06-17 22:48:11 +00:00
|
|
|
SOCKBUF_LOCK(&so->so_snd);
|
2000-01-14 02:53:29 +00:00
|
|
|
so->so_snd.sb_flags &= ~SB_AIO;
|
2004-06-17 22:48:11 +00:00
|
|
|
SOCKBUF_UNLOCK(&so->so_snd);
|
2000-01-14 02:53:29 +00:00
|
|
|
} else {
|
|
|
|
opcode = LIO_READ;
|
2004-06-17 22:48:11 +00:00
|
|
|
SOCKBUF_LOCK(&so->so_rcv);
|
2000-01-14 02:53:29 +00:00
|
|
|
so->so_rcv.sb_flags &= ~SB_AIO;
|
2004-06-17 22:48:11 +00:00
|
|
|
SOCKBUF_UNLOCK(&so->so_rcv);
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (cb = TAILQ_FIRST(&so->so_aiojobq); cb; cb = cbn) {
|
|
|
|
cbn = TAILQ_NEXT(cb, list);
|
|
|
|
if (opcode == cb->uaiocb.aio_lio_opcode) {
|
|
|
|
p = cb->userproc;
|
|
|
|
ki = p->p_aioinfo;
|
|
|
|
TAILQ_REMOVE(&so->so_aiojobq, cb, list);
|
|
|
|
TAILQ_REMOVE(&ki->kaio_sockqueue, cb, plist);
|
|
|
|
TAILQ_INSERT_TAIL(&aio_jobs, cb, list);
|
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, cb, plist);
|
|
|
|
wakecount++;
|
|
|
|
if (cb->jobstate != JOBST_JOBQGLOBAL)
|
|
|
|
panic("invalid queue value");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while (wakecount--) {
|
|
|
|
if ((aiop = TAILQ_FIRST(&aio_freeproc)) != 0) {
|
|
|
|
TAILQ_REMOVE(&aio_freeproc, aiop, list);
|
|
|
|
TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list);
|
2001-09-12 08:38:13 +00:00
|
|
|
aiop->aiothreadflags &= ~AIOP_FREE;
|
|
|
|
wakeup(aiop->aiothread);
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Queue a new AIO request. Choosing either the threaded or direct physio VCHR
|
|
|
|
* technique is done in this code.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
static int
|
2001-09-12 08:38:13 +00:00
|
|
|
_aio_aqueue(struct thread *td, struct aiocb *job, struct aio_liojob *lj, int type)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct filedesc *fdp;
|
|
|
|
struct file *fp;
|
|
|
|
unsigned int fd;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct socket *so;
|
|
|
|
int s;
|
2000-09-26 06:35:22 +00:00
|
|
|
int error;
|
2002-08-05 19:14:27 +00:00
|
|
|
int opcode, user_opcode;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct aiocblist *aiocbe;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct aiothreadlist *aiop;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct kaioinfo *ki;
|
2000-11-21 19:36:36 +00:00
|
|
|
struct kevent kev;
|
|
|
|
struct kqueue *kq;
|
|
|
|
struct file *kq_fp;
|
2004-10-01 05:54:06 +00:00
|
|
|
struct sockbuf *sb;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2003-02-19 05:47:46 +00:00
|
|
|
aiocbe = uma_zalloc(aiocb_zone, M_WAITOK);
|
1997-11-29 01:33:10 +00:00
|
|
|
aiocbe->inputcharge = 0;
|
|
|
|
aiocbe->outputcharge = 0;
|
2001-03-10 22:47:57 +00:00
|
|
|
callout_handle_init(&aiocbe->timeouthandle);
|
2004-08-15 06:24:42 +00:00
|
|
|
/* XXX - need a lock */
|
|
|
|
knlist_init(&aiocbe->klist, NULL);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
suword(&job->_aiocb_private.status, -1);
|
|
|
|
suword(&job->_aiocb_private.error, 0);
|
|
|
|
suword(&job->_aiocb_private.kernelinfo, -1);
|
|
|
|
|
2001-03-05 01:30:23 +00:00
|
|
|
error = copyin(job, &aiocbe->uaiocb, sizeof(aiocbe->uaiocb));
|
1997-07-06 02:40:43 +00:00
|
|
|
if (error) {
|
1997-11-29 01:33:10 +00:00
|
|
|
suword(&job->_aiocb_private.error, error);
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiocb_zone, aiocbe);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (error);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2001-04-18 22:18:39 +00:00
|
|
|
if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL &&
|
|
|
|
!_SIG_VALID(aiocbe->uaiocb.aio_sigevent.sigev_signo)) {
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiocb_zone, aiocbe);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINVAL);
|
2001-04-18 22:18:39 +00:00
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Save userspace address of the job info. */
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
aiocbe->uuaiocb = job;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Get the opcode. */
|
2002-08-05 19:14:27 +00:00
|
|
|
user_opcode = aiocbe->uaiocb.aio_lio_opcode;
|
2000-01-14 02:53:29 +00:00
|
|
|
if (type != LIO_NOP)
|
1997-10-09 04:14:41 +00:00
|
|
|
aiocbe->uaiocb.aio_lio_opcode = type;
|
|
|
|
opcode = aiocbe->uaiocb.aio_lio_opcode;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Get the fd info for process. */
|
1997-07-06 02:40:43 +00:00
|
|
|
fdp = p->p_fd;
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Range check file descriptor.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
2002-12-27 08:39:42 +00:00
|
|
|
FILEDESC_LOCK(fdp);
|
1997-07-06 02:40:43 +00:00
|
|
|
fd = aiocbe->uaiocb.aio_fildes;
|
|
|
|
if (fd >= fdp->fd_nfiles) {
|
2002-12-27 08:39:42 +00:00
|
|
|
FILEDESC_UNLOCK(fdp);
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiocb_zone, aiocbe);
|
2000-01-14 02:53:29 +00:00
|
|
|
if (type == 0)
|
1997-07-06 02:40:43 +00:00
|
|
|
suword(&job->_aiocb_private.error, EBADF);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EBADF);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
fp = aiocbe->fd_file = fdp->fd_ofiles[fd];
|
2003-10-24 21:07:53 +00:00
|
|
|
if ((fp == NULL) ||
|
|
|
|
((opcode == LIO_WRITE) && ((fp->f_flag & FWRITE) == 0)) ||
|
|
|
|
((opcode == LIO_READ) && ((fp->f_flag & FREAD) == 0))) {
|
2002-12-27 08:39:42 +00:00
|
|
|
FILEDESC_UNLOCK(fdp);
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiocb_zone, aiocbe);
|
2000-01-14 02:53:29 +00:00
|
|
|
if (type == 0)
|
1997-07-06 02:40:43 +00:00
|
|
|
suword(&job->_aiocb_private.error, EBADF);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EBADF);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2002-03-31 20:17:56 +00:00
|
|
|
fhold(fp);
|
2002-12-27 08:39:42 +00:00
|
|
|
FILEDESC_UNLOCK(fdp);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
if (aiocbe->uaiocb.aio_offset == -1LL) {
|
2002-04-07 07:17:59 +00:00
|
|
|
error = EINVAL;
|
|
|
|
goto aqueue_fail;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
error = suword(&job->_aiocb_private.kernelinfo, jobrefid);
|
|
|
|
if (error) {
|
2002-04-07 07:17:59 +00:00
|
|
|
error = EINVAL;
|
|
|
|
goto aqueue_fail;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
1998-07-15 06:51:14 +00:00
|
|
|
aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jobrefid;
|
1998-08-17 17:28:10 +00:00
|
|
|
if (jobrefid == LONG_MAX)
|
1997-11-29 01:33:10 +00:00
|
|
|
jobrefid = 1;
|
1998-08-17 17:28:10 +00:00
|
|
|
else
|
|
|
|
jobrefid++;
|
2004-08-13 17:43:53 +00:00
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
if (opcode == LIO_NOP) {
|
2002-03-31 20:17:56 +00:00
|
|
|
fdrop(fp, td);
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiocb_zone, aiocbe);
|
1997-07-06 02:40:43 +00:00
|
|
|
if (type == 0) {
|
|
|
|
suword(&job->_aiocb_private.error, 0);
|
1997-11-29 01:33:10 +00:00
|
|
|
suword(&job->_aiocb_private.status, 0);
|
|
|
|
suword(&job->_aiocb_private.kernelinfo, 0);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
if ((opcode != LIO_READ) && (opcode != LIO_WRITE)) {
|
2002-04-07 07:17:59 +00:00
|
|
|
if (type == 0)
|
1997-11-29 01:33:10 +00:00
|
|
|
suword(&job->_aiocb_private.status, 0);
|
2002-04-07 07:17:59 +00:00
|
|
|
error = EINVAL;
|
|
|
|
goto aqueue_fail;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
2000-11-21 19:36:36 +00:00
|
|
|
if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_KEVENT) {
|
|
|
|
kev.ident = aiocbe->uaiocb.aio_sigevent.sigev_notify_kqueue;
|
|
|
|
kev.udata = aiocbe->uaiocb.aio_sigevent.sigev_value.sigval_ptr;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* This method for requesting kevent-based notification won't
|
|
|
|
* work on the alpha, since we're passing in a pointer
|
|
|
|
* via aio_lio_opcode, which is an int. Use the SIGEV_KEVENT-
|
|
|
|
* based method instead.
|
|
|
|
*/
|
2002-08-05 19:14:27 +00:00
|
|
|
if (user_opcode == LIO_NOP || user_opcode == LIO_READ ||
|
|
|
|
user_opcode == LIO_WRITE)
|
2000-04-16 18:53:38 +00:00
|
|
|
goto no_kqueue;
|
|
|
|
|
2002-08-05 19:14:27 +00:00
|
|
|
error = copyin((struct kevent *)(uintptr_t)user_opcode,
|
|
|
|
&kev, sizeof(kev));
|
2000-04-16 18:53:38 +00:00
|
|
|
if (error)
|
|
|
|
goto aqueue_fail;
|
2000-11-21 19:36:36 +00:00
|
|
|
}
|
|
|
|
if ((u_int)kev.ident >= fdp->fd_nfiles ||
|
|
|
|
(kq_fp = fdp->fd_ofiles[kev.ident]) == NULL ||
|
|
|
|
(kq_fp->f_type != DTYPE_KQUEUE)) {
|
|
|
|
error = EBADF;
|
|
|
|
goto aqueue_fail;
|
|
|
|
}
|
2003-01-13 00:33:17 +00:00
|
|
|
kq = kq_fp->f_data;
|
2002-08-06 19:01:08 +00:00
|
|
|
kev.ident = (uintptr_t)aiocbe->uuaiocb;
|
2000-11-21 19:36:36 +00:00
|
|
|
kev.filter = EVFILT_AIO;
|
|
|
|
kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
|
2002-08-06 19:01:08 +00:00
|
|
|
kev.data = (intptr_t)aiocbe;
|
2004-08-15 06:24:42 +00:00
|
|
|
error = kqueue_register(kq, &kev, td, 1);
|
2000-04-16 18:53:38 +00:00
|
|
|
aqueue_fail:
|
2000-11-21 19:36:36 +00:00
|
|
|
if (error) {
|
2002-03-31 20:17:56 +00:00
|
|
|
fdrop(fp, td);
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiocb_zone, aiocbe);
|
2000-11-21 19:36:36 +00:00
|
|
|
if (type == 0)
|
|
|
|
suword(&job->_aiocb_private.error, error);
|
|
|
|
goto done;
|
2000-04-16 18:53:38 +00:00
|
|
|
}
|
2000-11-21 19:36:36 +00:00
|
|
|
no_kqueue:
|
2000-04-16 18:53:38 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
suword(&job->_aiocb_private.error, EINPROGRESS);
|
|
|
|
aiocbe->uaiocb._aiocb_private.error = EINPROGRESS;
|
1997-07-06 02:40:43 +00:00
|
|
|
aiocbe->userproc = p;
|
2002-11-07 20:46:37 +00:00
|
|
|
aiocbe->cred = crhold(td->td_ucred);
|
1997-07-06 02:40:43 +00:00
|
|
|
aiocbe->jobflags = 0;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
aiocbe->lio = lj;
|
|
|
|
ki = p->p_aioinfo;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if (fp->f_type == DTYPE_SOCKET) {
|
|
|
|
/*
|
|
|
|
* Alternate queueing for socket ops: Reach down into the
|
|
|
|
* descriptor to get the socket data. Then check to see if the
|
|
|
|
* socket is ready to be read or written (based on the requested
|
|
|
|
* operation).
|
|
|
|
*
|
|
|
|
* If it is not ready for io, then queue the aiocbe on the
|
|
|
|
* socket, and set the flags so we get a call when sbnotify()
|
|
|
|
* happens.
|
2004-10-01 05:54:06 +00:00
|
|
|
*
|
|
|
|
* Note if opcode is neither LIO_WRITE nor LIO_READ we lock
|
|
|
|
* and unlock the snd sockbuf for no reason.
|
2000-01-14 02:53:29 +00:00
|
|
|
*/
|
2003-01-13 00:33:17 +00:00
|
|
|
so = fp->f_data;
|
2004-10-01 05:54:06 +00:00
|
|
|
sb = (opcode == LIO_READ) ? &so->so_rcv : &so->so_snd;
|
|
|
|
SOCKBUF_LOCK(sb);
|
2000-01-14 02:53:29 +00:00
|
|
|
s = splnet();
|
|
|
|
if (((opcode == LIO_READ) && (!soreadable(so))) || ((opcode ==
|
|
|
|
LIO_WRITE) && (!sowriteable(so)))) {
|
|
|
|
TAILQ_INSERT_TAIL(&so->so_aiojobq, aiocbe, list);
|
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_sockqueue, aiocbe, plist);
|
2004-10-01 05:54:06 +00:00
|
|
|
sb->sb_flags |= SB_AIO;
|
2000-01-14 02:53:29 +00:00
|
|
|
aiocbe->jobstate = JOBST_JOBQGLOBAL; /* XXX */
|
|
|
|
ki->kaio_queue_count++;
|
|
|
|
num_queue_count++;
|
2004-10-01 05:54:06 +00:00
|
|
|
SOCKBUF_UNLOCK(sb);
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
2000-11-18 21:01:04 +00:00
|
|
|
error = 0;
|
|
|
|
goto done;
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
2004-10-01 05:54:06 +00:00
|
|
|
SOCKBUF_UNLOCK(sb);
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((error = aio_qphysio(p, aiocbe)) == 0)
|
2000-11-18 21:01:04 +00:00
|
|
|
goto done;
|
|
|
|
if (error > 0) {
|
1997-11-29 01:33:10 +00:00
|
|
|
suword(&job->_aiocb_private.status, 0);
|
|
|
|
aiocbe->uaiocb._aiocb_private.error = error;
|
|
|
|
suword(&job->_aiocb_private.error, error);
|
2000-11-18 21:01:04 +00:00
|
|
|
goto done;
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* No buffer for daemon I/O. */
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
aiocbe->bp = NULL;
|
|
|
|
|
|
|
|
ki->kaio_queue_count++;
|
2000-01-14 02:53:29 +00:00
|
|
|
if (lj)
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj->lioj_queue_count++;
|
2000-01-14 02:53:29 +00:00
|
|
|
s = splnet();
|
1997-11-29 01:33:10 +00:00
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist);
|
|
|
|
TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list);
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
1997-11-29 01:33:10 +00:00
|
|
|
aiocbe->jobstate = JOBST_JOBQGLOBAL;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
num_queue_count++;
|
1997-11-29 01:33:10 +00:00
|
|
|
error = 0;
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* If we don't have a free AIO process, and we are below our quota, then
|
|
|
|
* start one. Otherwise, depend on the subsequent I/O completions to
|
|
|
|
* pick-up this job. If we don't sucessfully create the new process
|
|
|
|
* (thread) due to resource issues, we return an error for now (EAGAIN),
|
|
|
|
* which is likely not the correct thing to do.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
2000-01-14 02:53:29 +00:00
|
|
|
s = splnet();
|
2002-01-14 07:26:33 +00:00
|
|
|
retryproc:
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_REMOVE(&aio_freeproc, aiop, list);
|
|
|
|
TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list);
|
2001-09-12 08:38:13 +00:00
|
|
|
aiop->aiothreadflags &= ~AIOP_FREE;
|
|
|
|
wakeup(aiop->aiothread);
|
1997-11-29 01:33:10 +00:00
|
|
|
} else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) &&
|
2000-01-14 02:53:29 +00:00
|
|
|
((ki->kaio_active_count + num_aio_resv_start) <
|
|
|
|
ki->kaio_maxactive_count)) {
|
1997-11-29 01:33:10 +00:00
|
|
|
num_aio_resv_start++;
|
|
|
|
if ((error = aio_newproc()) == 0) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
num_aio_resv_start--;
|
1997-11-29 01:33:10 +00:00
|
|
|
goto retryproc;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
num_aio_resv_start--;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
2000-11-18 21:01:04 +00:00
|
|
|
done:
|
2003-01-13 15:06:05 +00:00
|
|
|
return (error);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
|
|
|
* This routine queues an AIO request, checking for quotas.
|
|
|
|
*/
|
1997-07-06 02:40:43 +00:00
|
|
|
static int
|
2001-09-12 08:38:13 +00:00
|
|
|
aio_aqueue(struct thread *td, struct aiocb *job, int type)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct kaioinfo *ki;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if (p->p_aioinfo == NULL)
|
1997-07-06 02:40:43 +00:00
|
|
|
aio_init_aioinfo(p);
|
|
|
|
|
|
|
|
if (num_queue_count >= max_queue_count)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EAGAIN);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
ki = p->p_aioinfo;
|
|
|
|
if (ki->kaio_queue_count >= ki->kaio_qallowed_count)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EAGAIN);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
return _aio_aqueue(td, job, NULL, type);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Support the aio_return system call, as a side-effect, kernel resources are
|
|
|
|
* released.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
aio_return(struct thread *td, struct aio_return_args *uap)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
int s;
|
2002-04-07 01:28:34 +00:00
|
|
|
long jobref;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
struct aiocblist *cb, *ncb;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
struct aiocb *ujob;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct kaioinfo *ki;
|
|
|
|
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
ujob = uap->aiocbp;
|
|
|
|
jobref = fuword(&ujob->_aiocb_private.kernelinfo);
|
1997-11-29 01:33:10 +00:00
|
|
|
if (jobref == -1 || jobref == 0)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINVAL);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2002-04-08 04:57:56 +00:00
|
|
|
ki = p->p_aioinfo;
|
|
|
|
if (ki == NULL)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINVAL);
|
2002-01-14 07:26:33 +00:00
|
|
|
TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) {
|
2000-01-14 02:53:29 +00:00
|
|
|
if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) ==
|
|
|
|
jobref) {
|
1997-11-29 01:33:10 +00:00
|
|
|
if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
|
2001-12-31 02:03:39 +00:00
|
|
|
p->p_stats->p_ru.ru_oublock +=
|
2000-01-14 02:53:29 +00:00
|
|
|
cb->outputcharge;
|
1997-11-29 01:33:10 +00:00
|
|
|
cb->outputcharge = 0;
|
|
|
|
} else if (cb->uaiocb.aio_lio_opcode == LIO_READ) {
|
2001-12-31 02:03:39 +00:00
|
|
|
p->p_stats->p_ru.ru_inblock += cb->inputcharge;
|
1997-11-29 01:33:10 +00:00
|
|
|
cb->inputcharge = 0;
|
|
|
|
}
|
2002-04-08 04:57:56 +00:00
|
|
|
goto done;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
}
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
s = splbio();
|
2000-01-14 02:53:29 +00:00
|
|
|
for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = ncb) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
ncb = TAILQ_NEXT(cb, plist);
|
2000-01-14 02:53:29 +00:00
|
|
|
if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo)
|
|
|
|
== jobref) {
|
2002-04-08 04:57:56 +00:00
|
|
|
break;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
splx(s);
|
2002-04-08 04:57:56 +00:00
|
|
|
done:
|
|
|
|
if (cb != NULL) {
|
|
|
|
if (ujob == cb->uuaiocb) {
|
|
|
|
td->td_retval[0] =
|
|
|
|
cb->uaiocb._aiocb_private.status;
|
|
|
|
} else
|
|
|
|
td->td_retval[0] = EFAULT;
|
|
|
|
aio_free_entry(cb);
|
|
|
|
return (0);
|
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
return (EINVAL);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Allow a process to wakeup when any of the I/O requests are completed.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
aio_suspend(struct thread *td, struct aio_suspend_args *uap)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
1997-11-07 08:53:44 +00:00
|
|
|
struct timeval atv;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct timespec ts;
|
|
|
|
struct aiocb *const *cbptr, *cbp;
|
|
|
|
struct kaioinfo *ki;
|
|
|
|
struct aiocblist *cb;
|
|
|
|
int i;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
int njoblist;
|
1997-07-06 02:40:43 +00:00
|
|
|
int error, s, timo;
|
2002-04-07 01:28:34 +00:00
|
|
|
long *ijoblist;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
struct aiocb **ujoblist;
|
2004-08-13 17:43:53 +00:00
|
|
|
|
2003-01-12 09:40:23 +00:00
|
|
|
if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINVAL);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
timo = 0;
|
|
|
|
if (uap->timeout) {
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Get timespec struct. */
|
|
|
|
if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (error);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000)
|
|
|
|
return (EINVAL);
|
|
|
|
|
1998-12-15 17:38:33 +00:00
|
|
|
TIMESPEC_TO_TIMEVAL(&atv, &ts);
|
1997-07-06 02:40:43 +00:00
|
|
|
if (itimerfix(&atv))
|
|
|
|
return (EINVAL);
|
1998-03-30 09:56:58 +00:00
|
|
|
timo = tvtohz(&atv);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ki = p->p_aioinfo;
|
|
|
|
if (ki == NULL)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EAGAIN);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
njoblist = 0;
|
2003-02-19 05:47:46 +00:00
|
|
|
ijoblist = uma_zalloc(aiol_zone, M_WAITOK);
|
|
|
|
ujoblist = uma_zalloc(aiol_zone, M_WAITOK);
|
1997-07-06 02:40:43 +00:00
|
|
|
cbptr = uap->aiocbp;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
for (i = 0; i < uap->nent; i++) {
|
2002-05-25 18:39:42 +00:00
|
|
|
cbp = (struct aiocb *)(intptr_t)fuword(&cbptr[i]);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
if (cbp == 0)
|
|
|
|
continue;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
ujoblist[njoblist] = cbp;
|
|
|
|
ijoblist[njoblist] = fuword(&cbp->_aiocb_private.kernelinfo);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
njoblist++;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
if (njoblist == 0) {
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiol_zone, ijoblist);
|
|
|
|
uma_zfree(aiol_zone, ujoblist);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
error = 0;
|
2000-01-14 02:53:29 +00:00
|
|
|
for (;;) {
|
2002-01-14 07:26:33 +00:00
|
|
|
TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) {
|
2000-01-14 02:53:29 +00:00
|
|
|
for (i = 0; i < njoblist; i++) {
|
|
|
|
if (((intptr_t)
|
|
|
|
cb->uaiocb._aiocb_private.kernelinfo) ==
|
|
|
|
ijoblist[i]) {
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
if (ujoblist[i] != cb->uuaiocb)
|
|
|
|
error = EINVAL;
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiol_zone, ijoblist);
|
|
|
|
uma_zfree(aiol_zone, ujoblist);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (error);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
s = splbio();
|
2000-01-14 02:53:29 +00:00
|
|
|
for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb =
|
|
|
|
TAILQ_NEXT(cb, plist)) {
|
|
|
|
for (i = 0; i < njoblist; i++) {
|
|
|
|
if (((intptr_t)
|
|
|
|
cb->uaiocb._aiocb_private.kernelinfo) ==
|
|
|
|
ijoblist[i]) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
splx(s);
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
if (ujoblist[i] != cb->uuaiocb)
|
|
|
|
error = EINVAL;
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiol_zone, ijoblist);
|
|
|
|
uma_zfree(aiol_zone, ujoblist);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (error);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
ki->kaio_flags |= KAIO_WAKEUP;
|
2000-01-14 02:53:29 +00:00
|
|
|
error = tsleep(p, PRIBIO | PCATCH, "aiospn", timo);
|
2000-01-20 08:15:13 +00:00
|
|
|
splx(s);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-20 01:59:58 +00:00
|
|
|
if (error == ERESTART || error == EINTR) {
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiol_zone, ijoblist);
|
|
|
|
uma_zfree(aiol_zone, ujoblist);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINTR);
|
1997-07-06 02:40:43 +00:00
|
|
|
} else if (error == EWOULDBLOCK) {
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiol_zone, ijoblist);
|
|
|
|
uma_zfree(aiol_zone, ujoblist);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EAGAIN);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* NOTREACHED */
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINVAL);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
1997-06-16 00:27:26 +00:00
|
|
|
|
|
|
|
/*
|
2000-02-23 07:44:25 +00:00
|
|
|
* aio_cancel cancels any non-physio aio operations not currently in
|
|
|
|
* progress.
|
1997-06-16 00:27:26 +00:00
|
|
|
*/
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
aio_cancel(struct thread *td, struct aio_cancel_args *uap)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
2000-02-23 07:44:25 +00:00
|
|
|
struct kaioinfo *ki;
|
|
|
|
struct aiocblist *cbe, *cbn;
|
|
|
|
struct file *fp;
|
|
|
|
struct filedesc *fdp;
|
|
|
|
struct socket *so;
|
|
|
|
struct proc *po;
|
|
|
|
int s,error;
|
|
|
|
int cancelled=0;
|
|
|
|
int notcancelled=0;
|
|
|
|
struct vnode *vp;
|
|
|
|
|
|
|
|
fdp = p->p_fd;
|
2002-01-02 07:04:38 +00:00
|
|
|
if ((u_int)uap->fd >= fdp->fd_nfiles ||
|
|
|
|
(fp = fdp->fd_ofiles[uap->fd]) == NULL)
|
|
|
|
return (EBADF);
|
2000-02-23 07:44:25 +00:00
|
|
|
|
2004-08-13 17:43:53 +00:00
|
|
|
if (fp->f_type == DTYPE_VNODE) {
|
2003-06-22 08:41:43 +00:00
|
|
|
vp = fp->f_vnode;
|
2004-08-13 17:43:53 +00:00
|
|
|
|
2000-02-23 07:44:25 +00:00
|
|
|
if (vn_isdisk(vp,&error)) {
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = AIO_NOTCANCELED;
|
2004-08-13 17:43:53 +00:00
|
|
|
return (0);
|
2000-02-23 07:44:25 +00:00
|
|
|
}
|
|
|
|
} else if (fp->f_type == DTYPE_SOCKET) {
|
2003-01-13 00:33:17 +00:00
|
|
|
so = fp->f_data;
|
2000-02-23 07:44:25 +00:00
|
|
|
|
|
|
|
s = splnet();
|
|
|
|
|
|
|
|
for (cbe = TAILQ_FIRST(&so->so_aiojobq); cbe; cbe = cbn) {
|
|
|
|
cbn = TAILQ_NEXT(cbe, list);
|
|
|
|
if ((uap->aiocbp == NULL) ||
|
|
|
|
(uap->aiocbp == cbe->uuaiocb) ) {
|
|
|
|
po = cbe->userproc;
|
|
|
|
ki = po->p_aioinfo;
|
|
|
|
TAILQ_REMOVE(&so->so_aiojobq, cbe, list);
|
|
|
|
TAILQ_REMOVE(&ki->kaio_sockqueue, cbe, plist);
|
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe, plist);
|
|
|
|
if (ki->kaio_flags & KAIO_WAKEUP) {
|
|
|
|
wakeup(po);
|
|
|
|
}
|
|
|
|
cbe->jobstate = JOBST_JOBFINISHED;
|
|
|
|
cbe->uaiocb._aiocb_private.status=-1;
|
|
|
|
cbe->uaiocb._aiocb_private.error=ECANCELED;
|
|
|
|
cancelled++;
|
2000-04-16 18:53:38 +00:00
|
|
|
/* XXX cancelled, knote? */
|
2004-08-13 17:43:53 +00:00
|
|
|
if (cbe->uaiocb.aio_sigevent.sigev_notify ==
|
2001-03-07 03:37:06 +00:00
|
|
|
SIGEV_SIGNAL) {
|
|
|
|
PROC_LOCK(cbe->userproc);
|
2000-02-23 07:44:25 +00:00
|
|
|
psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo);
|
2001-03-07 03:37:06 +00:00
|
|
|
PROC_UNLOCK(cbe->userproc);
|
|
|
|
}
|
2004-08-13 17:43:53 +00:00
|
|
|
if (uap->aiocbp)
|
2000-02-23 07:44:25 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
splx(s);
|
|
|
|
|
|
|
|
if ((cancelled) && (uap->aiocbp)) {
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = AIO_CANCELED;
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
2000-02-23 07:44:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
ki=p->p_aioinfo;
|
2002-08-11 04:09:14 +00:00
|
|
|
if (ki == NULL)
|
2002-08-11 19:04:17 +00:00
|
|
|
goto done;
|
2000-02-23 07:44:25 +00:00
|
|
|
s = splnet();
|
|
|
|
|
|
|
|
for (cbe = TAILQ_FIRST(&ki->kaio_jobqueue); cbe; cbe = cbn) {
|
|
|
|
cbn = TAILQ_NEXT(cbe, plist);
|
|
|
|
|
|
|
|
if ((uap->fd == cbe->uaiocb.aio_fildes) &&
|
2004-08-13 17:43:53 +00:00
|
|
|
((uap->aiocbp == NULL ) ||
|
2000-02-23 07:44:25 +00:00
|
|
|
(uap->aiocbp == cbe->uuaiocb))) {
|
2004-08-13 17:43:53 +00:00
|
|
|
|
2000-02-23 07:44:25 +00:00
|
|
|
if (cbe->jobstate == JOBST_JOBQGLOBAL) {
|
|
|
|
TAILQ_REMOVE(&aio_jobs, cbe, list);
|
2004-08-13 17:43:53 +00:00
|
|
|
TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist);
|
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe,
|
|
|
|
plist);
|
2000-02-23 07:44:25 +00:00
|
|
|
cancelled++;
|
|
|
|
ki->kaio_queue_finished_count++;
|
|
|
|
cbe->jobstate = JOBST_JOBFINISHED;
|
|
|
|
cbe->uaiocb._aiocb_private.status = -1;
|
|
|
|
cbe->uaiocb._aiocb_private.error = ECANCELED;
|
2000-04-16 18:53:38 +00:00
|
|
|
/* XXX cancelled, knote? */
|
2004-08-13 17:43:53 +00:00
|
|
|
if (cbe->uaiocb.aio_sigevent.sigev_notify ==
|
2001-03-07 03:37:06 +00:00
|
|
|
SIGEV_SIGNAL) {
|
|
|
|
PROC_LOCK(cbe->userproc);
|
2000-02-23 07:44:25 +00:00
|
|
|
psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo);
|
2001-03-07 03:37:06 +00:00
|
|
|
PROC_UNLOCK(cbe->userproc);
|
|
|
|
}
|
2000-02-23 07:44:25 +00:00
|
|
|
} else {
|
|
|
|
notcancelled++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
splx(s);
|
2002-08-11 19:04:17 +00:00
|
|
|
done:
|
2000-02-23 07:44:25 +00:00
|
|
|
if (notcancelled) {
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = AIO_NOTCANCELED;
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
2000-02-23 07:44:25 +00:00
|
|
|
}
|
|
|
|
if (cancelled) {
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = AIO_CANCELED;
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
2000-02-23 07:44:25 +00:00
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = AIO_ALLDONE;
|
2000-02-23 07:44:25 +00:00
|
|
|
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
1997-06-16 00:27:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* aio_error is implemented in the kernel level for compatibility purposes only.
|
|
|
|
* For a user mode async implementation, it would be best to do it in a userland
|
|
|
|
* subroutine.
|
1997-06-16 00:27:26 +00:00
|
|
|
*/
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
aio_error(struct thread *td, struct aio_error_args *uap)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
int s;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct aiocblist *cb;
|
|
|
|
struct kaioinfo *ki;
|
2002-04-07 01:28:34 +00:00
|
|
|
long jobref;
|
1997-06-16 00:27:26 +00:00
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
ki = p->p_aioinfo;
|
|
|
|
if (ki == NULL)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINVAL);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
jobref = fuword(&uap->aiocbp->_aiocb_private.kernelinfo);
|
1997-11-29 01:33:10 +00:00
|
|
|
if ((jobref == -1) || (jobref == 0))
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINVAL);
|
1997-06-16 00:27:26 +00:00
|
|
|
|
2002-01-14 07:26:33 +00:00
|
|
|
TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) {
|
2000-01-14 02:53:29 +00:00
|
|
|
if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
|
|
|
|
jobref) {
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = cb->uaiocb._aiocb_private.error;
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
s = splnet();
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
for (cb = TAILQ_FIRST(&ki->kaio_jobqueue); cb; cb = TAILQ_NEXT(cb,
|
2000-01-20 01:59:58 +00:00
|
|
|
plist)) {
|
|
|
|
if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
|
|
|
|
jobref) {
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = EINPROGRESS;
|
2000-01-20 01:59:58 +00:00
|
|
|
splx(s);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
2000-01-20 01:59:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (cb = TAILQ_FIRST(&ki->kaio_sockqueue); cb; cb = TAILQ_NEXT(cb,
|
2000-01-14 02:53:29 +00:00
|
|
|
plist)) {
|
|
|
|
if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
|
|
|
|
jobref) {
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = EINPROGRESS;
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
1997-06-16 00:27:26 +00:00
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
splx(s);
|
1997-06-16 00:27:26 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
s = splbio();
|
2000-01-14 02:53:29 +00:00
|
|
|
for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = TAILQ_NEXT(cb,
|
|
|
|
plist)) {
|
|
|
|
if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
|
|
|
|
jobref) {
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = cb->uaiocb._aiocb_private.error;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
splx(s);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
for (cb = TAILQ_FIRST(&ki->kaio_bufqueue); cb; cb = TAILQ_NEXT(cb,
|
|
|
|
plist)) {
|
|
|
|
if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
|
|
|
|
jobref) {
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = EINPROGRESS;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
splx(s);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
splx(s);
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
#if (0)
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Hack for lio.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
status = fuword(&uap->aiocbp->_aiocb_private.status);
|
2000-01-14 02:53:29 +00:00
|
|
|
if (status == -1)
|
1997-07-06 02:40:43 +00:00
|
|
|
return fuword(&uap->aiocbp->_aiocb_private.error);
|
2000-01-14 02:53:29 +00:00
|
|
|
#endif
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINVAL);
|
1997-06-16 00:27:26 +00:00
|
|
|
}
|
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/* syscall - asynchronous read from a file (REALTIME) */
|
1997-06-16 00:27:26 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
aio_read(struct thread *td, struct aio_read_args *uap)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2001-12-29 07:13:47 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
return aio_aqueue(td, uap->aiocbp, LIO_READ);
|
1997-06-16 00:27:26 +00:00
|
|
|
}
|
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/* syscall - asynchronous write to a file (REALTIME) */
|
1997-06-16 00:27:26 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
aio_write(struct thread *td, struct aio_write_args *uap)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2001-12-29 07:13:47 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
return aio_aqueue(td, uap->aiocbp, LIO_WRITE);
|
1997-06-16 00:27:26 +00:00
|
|
|
}
|
|
|
|
|
2003-01-12 09:33:16 +00:00
|
|
|
/* syscall - list directed I/O (REALTIME) */
|
1997-06-16 00:27:26 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
lio_listio(struct thread *td, struct lio_listio_args *uap)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
1997-11-07 08:53:44 +00:00
|
|
|
int nent, nentqueued;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct aiocb *iocb, * const *cbptr;
|
|
|
|
struct aiocblist *cb;
|
|
|
|
struct kaioinfo *ki;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
struct aio_liojob *lj;
|
1997-07-06 02:40:43 +00:00
|
|
|
int error, runningcode;
|
1997-11-29 01:33:10 +00:00
|
|
|
int nerror;
|
1997-06-16 00:27:26 +00:00
|
|
|
int i;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
int s;
|
1997-06-16 00:27:26 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINVAL);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
nent = uap->nent;
|
2003-01-12 09:40:23 +00:00
|
|
|
if (nent < 0 || nent > AIO_LISTIO_MAX)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINVAL);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if (p->p_aioinfo == NULL)
|
1997-07-06 02:40:43 +00:00
|
|
|
aio_init_aioinfo(p);
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((nent + num_queue_count) > max_queue_count)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EAGAIN);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
ki = p->p_aioinfo;
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((nent + ki->kaio_queue_count) > ki->kaio_qallowed_count)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EAGAIN);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2003-02-19 05:47:46 +00:00
|
|
|
lj = uma_zalloc(aiolio_zone, M_WAITOK);
|
2000-01-14 02:53:29 +00:00
|
|
|
if (!lj)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EAGAIN);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
|
|
|
lj->lioj_flags = 0;
|
|
|
|
lj->lioj_buffer_count = 0;
|
|
|
|
lj->lioj_buffer_finished_count = 0;
|
|
|
|
lj->lioj_queue_count = 0;
|
|
|
|
lj->lioj_queue_finished_count = 0;
|
|
|
|
lj->lioj_ki = ki;
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Setup signal.
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
*/
|
|
|
|
if (uap->sig && (uap->mode == LIO_NOWAIT)) {
|
2000-01-14 02:53:29 +00:00
|
|
|
error = copyin(uap->sig, &lj->lioj_signal,
|
2004-08-13 17:43:53 +00:00
|
|
|
sizeof(lj->lioj_signal));
|
2001-04-18 22:18:39 +00:00
|
|
|
if (error) {
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiolio_zone, lj);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (error);
|
2001-04-18 22:18:39 +00:00
|
|
|
}
|
|
|
|
if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) {
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiolio_zone, lj);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINVAL);
|
2001-04-18 22:18:39 +00:00
|
|
|
}
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj->lioj_flags |= LIOJ_SIGNAL;
|
2002-10-14 01:21:37 +00:00
|
|
|
}
|
2001-04-18 22:18:39 +00:00
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list);
|
2000-01-14 02:53:29 +00:00
|
|
|
/*
|
|
|
|
* Get pointers to the list of I/O requests.
|
|
|
|
*/
|
1997-11-29 01:33:10 +00:00
|
|
|
nerror = 0;
|
|
|
|
nentqueued = 0;
|
1997-07-06 02:40:43 +00:00
|
|
|
cbptr = uap->acb_list;
|
2000-01-14 02:53:29 +00:00
|
|
|
for (i = 0; i < uap->nent; i++) {
|
2002-05-25 18:39:42 +00:00
|
|
|
iocb = (struct aiocb *)(intptr_t)fuword(&cbptr[i]);
|
2002-08-22 21:24:01 +00:00
|
|
|
if (((intptr_t)iocb != -1) && ((intptr_t)iocb != 0)) {
|
2001-09-12 08:38:13 +00:00
|
|
|
error = _aio_aqueue(td, iocb, lj, 0);
|
2000-01-14 02:53:29 +00:00
|
|
|
if (error == 0)
|
1997-11-29 01:33:10 +00:00
|
|
|
nentqueued++;
|
2000-01-14 02:53:29 +00:00
|
|
|
else
|
1997-11-29 01:33:10 +00:00
|
|
|
nerror++;
|
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
1997-10-09 04:14:41 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* If we haven't queued any, then just return error.
|
1997-10-09 04:14:41 +00:00
|
|
|
*/
|
2000-01-14 02:53:29 +00:00
|
|
|
if (nentqueued == 0)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
1997-10-09 04:14:41 +00:00
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Calculate the appropriate error return.
|
1997-10-09 04:14:41 +00:00
|
|
|
*/
|
1997-07-06 02:40:43 +00:00
|
|
|
runningcode = 0;
|
1997-11-29 01:33:10 +00:00
|
|
|
if (nerror)
|
1997-07-06 02:40:43 +00:00
|
|
|
runningcode = EIO;
|
|
|
|
|
|
|
|
if (uap->mode == LIO_WAIT) {
|
2000-01-14 02:53:29 +00:00
|
|
|
int command, found, jobref;
|
2004-08-13 17:43:53 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
for (;;) {
|
1997-11-29 01:33:10 +00:00
|
|
|
found = 0;
|
2000-01-14 02:53:29 +00:00
|
|
|
for (i = 0; i < uap->nent; i++) {
|
1997-10-09 04:14:41 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Fetch address of the control buf pointer in
|
|
|
|
* user space.
|
1997-10-09 04:14:41 +00:00
|
|
|
*/
|
2002-05-24 06:06:18 +00:00
|
|
|
iocb = (struct aiocb *)
|
2002-05-25 18:39:42 +00:00
|
|
|
(intptr_t)fuword(&cbptr[i]);
|
2000-01-14 02:53:29 +00:00
|
|
|
if (((intptr_t)iocb == -1) || ((intptr_t)iocb
|
|
|
|
== 0))
|
1997-11-29 01:33:10 +00:00
|
|
|
continue;
|
1997-10-09 04:14:41 +00:00
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Fetch the associated command from user space.
|
1997-10-09 04:14:41 +00:00
|
|
|
*/
|
1997-07-06 02:40:43 +00:00
|
|
|
command = fuword(&iocb->aio_lio_opcode);
|
1997-11-29 01:33:10 +00:00
|
|
|
if (command == LIO_NOP) {
|
|
|
|
found++;
|
1997-07-06 02:40:43 +00:00
|
|
|
continue;
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
1997-10-09 04:14:41 +00:00
|
|
|
|
2003-01-13 15:06:05 +00:00
|
|
|
jobref =
|
|
|
|
fuword(&iocb->_aiocb_private.kernelinfo);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2001-02-04 16:08:18 +00:00
|
|
|
TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) {
|
2000-01-14 02:53:29 +00:00
|
|
|
if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo)
|
|
|
|
== jobref) {
|
|
|
|
if (cb->uaiocb.aio_lio_opcode
|
|
|
|
== LIO_WRITE) {
|
2001-12-31 02:03:39 +00:00
|
|
|
p->p_stats->p_ru.ru_oublock
|
2000-01-14 02:53:29 +00:00
|
|
|
+=
|
|
|
|
cb->outputcharge;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
cb->outputcharge = 0;
|
2000-01-14 02:53:29 +00:00
|
|
|
} else if (cb->uaiocb.aio_lio_opcode
|
|
|
|
== LIO_READ) {
|
2001-12-31 02:03:39 +00:00
|
|
|
p->p_stats->p_ru.ru_inblock
|
2000-01-14 02:53:29 +00:00
|
|
|
+= cb->inputcharge;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
cb->inputcharge = 0;
|
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
found++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
s = splbio();
|
2001-02-04 16:08:18 +00:00
|
|
|
TAILQ_FOREACH(cb, &ki->kaio_bufdone, plist) {
|
2000-01-14 02:53:29 +00:00
|
|
|
if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo)
|
|
|
|
== jobref) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
found++;
|
|
|
|
break;
|
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
splx(s);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
1997-10-09 04:14:41 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* If all I/Os have been disposed of, then we can
|
|
|
|
* return.
|
1997-10-09 04:14:41 +00:00
|
|
|
*/
|
2000-01-14 02:53:29 +00:00
|
|
|
if (found == nentqueued)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (runningcode);
|
2004-08-13 17:43:53 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
ki->kaio_flags |= KAIO_WAKEUP;
|
2000-01-14 02:53:29 +00:00
|
|
|
error = tsleep(p, PRIBIO | PCATCH, "aiospn", 0);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if (error == EINTR)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINTR);
|
2000-01-14 02:53:29 +00:00
|
|
|
else if (error == EWOULDBLOCK)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EAGAIN);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-01-13 15:06:05 +00:00
|
|
|
return (runningcode);
|
1997-06-16 00:27:26 +00:00
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
/*
|
2001-02-06 09:25:10 +00:00
|
|
|
* This is a weird hack so that we can post a signal. It is safe to do so from
|
2000-01-14 02:53:29 +00:00
|
|
|
* a timeout routine, but *not* from an interrupt routine.
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
*/
|
|
|
|
static void
|
2000-01-14 02:53:29 +00:00
|
|
|
process_signal(void *aioj)
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
{
|
2000-01-14 02:53:29 +00:00
|
|
|
struct aiocblist *aiocbe = aioj;
|
|
|
|
struct aio_liojob *lj = aiocbe->lio;
|
|
|
|
struct aiocb *cb = &aiocbe->uaiocb;
|
|
|
|
|
|
|
|
if ((lj) && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL) &&
|
2001-03-07 03:37:06 +00:00
|
|
|
(lj->lioj_queue_count == lj->lioj_queue_finished_count)) {
|
|
|
|
PROC_LOCK(lj->lioj_ki->kaio_p);
|
2000-01-14 02:53:29 +00:00
|
|
|
psignal(lj->lioj_ki->kaio_p, lj->lioj_signal.sigev_signo);
|
2001-03-07 03:37:06 +00:00
|
|
|
PROC_UNLOCK(lj->lioj_ki->kaio_p);
|
2000-01-14 02:53:29 +00:00
|
|
|
lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
|
2001-03-07 03:37:06 +00:00
|
|
|
if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) {
|
|
|
|
PROC_LOCK(aiocbe->userproc);
|
2000-01-14 02:53:29 +00:00
|
|
|
psignal(aiocbe->userproc, cb->aio_sigevent.sigev_signo);
|
2001-03-07 03:37:06 +00:00
|
|
|
PROC_UNLOCK(aiocbe->userproc);
|
|
|
|
}
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Interrupt handler for physio, performs the necessary process wakeups, and
|
|
|
|
* signals.
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
*/
|
1997-11-29 01:33:10 +00:00
|
|
|
static void
|
2000-01-14 02:53:29 +00:00
|
|
|
aio_physwakeup(struct buf *bp)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
struct aiocblist *aiocbe;
|
1997-11-29 01:33:10 +00:00
|
|
|
struct proc *p;
|
|
|
|
struct kaioinfo *ki;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
struct aio_liojob *lj;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2001-03-06 15:54:38 +00:00
|
|
|
wakeup(bp);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
2003-09-10 15:48:51 +00:00
|
|
|
aiocbe = (struct aiocblist *)bp->b_caller1;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
if (aiocbe) {
|
2003-09-10 15:48:51 +00:00
|
|
|
p = aiocbe->userproc;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
|
|
|
aiocbe->jobstate = JOBST_JOBBFINISHED;
|
|
|
|
aiocbe->uaiocb._aiocb_private.status -= bp->b_resid;
|
|
|
|
aiocbe->uaiocb._aiocb_private.error = 0;
|
|
|
|
aiocbe->jobflags |= AIOCBLIST_DONE;
|
|
|
|
|
2000-04-02 15:24:56 +00:00
|
|
|
if (bp->b_ioflags & BIO_ERROR)
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
aiocbe->uaiocb._aiocb_private.error = bp->b_error;
|
|
|
|
|
|
|
|
lj = aiocbe->lio;
|
|
|
|
if (lj) {
|
|
|
|
lj->lioj_buffer_finished_count++;
|
2004-08-13 17:43:53 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* wakeup/signal if all of the interrupt jobs are done.
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
*/
|
2000-01-14 02:53:29 +00:00
|
|
|
if (lj->lioj_buffer_finished_count ==
|
|
|
|
lj->lioj_buffer_count) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Post a signal if it is called for.
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
*/
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((lj->lioj_flags &
|
|
|
|
(LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) ==
|
|
|
|
LIOJ_SIGNAL) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
|
2001-03-10 22:47:57 +00:00
|
|
|
aiocbe->timeouthandle =
|
|
|
|
timeout(process_signal,
|
|
|
|
aiocbe, 0);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
ki = p->p_aioinfo;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
if (ki) {
|
|
|
|
ki->kaio_buffer_finished_count++;
|
|
|
|
TAILQ_REMOVE(&aio_bufjobs, aiocbe, list);
|
|
|
|
TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist);
|
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist);
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2004-08-15 06:24:42 +00:00
|
|
|
KNOTE_UNLOCKED(&aiocbe->klist, 0);
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Do the wakeup. */
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
if (ki->kaio_flags & (KAIO_RUNDOWN|KAIO_WAKEUP)) {
|
|
|
|
ki->kaio_flags &= ~KAIO_WAKEUP;
|
|
|
|
wakeup(p);
|
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
|
|
|
|
if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL)
|
2001-03-10 22:47:57 +00:00
|
|
|
aiocbe->timeouthandle =
|
|
|
|
timeout(process_signal, aiocbe, 0);
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/* syscall - wait for the next completion of an aio request */
|
2000-01-14 02:53:29 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap)
|
2000-01-14 02:53:29 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct timeval atv;
|
|
|
|
struct timespec ts;
|
|
|
|
struct kaioinfo *ki;
|
|
|
|
struct aiocblist *cb = NULL;
|
|
|
|
int error, s, timo;
|
2004-08-13 17:43:53 +00:00
|
|
|
|
2000-02-23 07:44:25 +00:00
|
|
|
suword(uap->aiocbp, (int)NULL);
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
timo = 0;
|
|
|
|
if (uap->timeout) {
|
|
|
|
/* Get timespec struct. */
|
2001-03-05 01:30:23 +00:00
|
|
|
error = copyin(uap->timeout, &ts, sizeof(ts));
|
2000-01-14 02:53:29 +00:00
|
|
|
if (error)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (error);
|
2000-01-14 02:53:29 +00:00
|
|
|
|
|
|
|
if ((ts.tv_nsec < 0) || (ts.tv_nsec >= 1000000000))
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
TIMESPEC_TO_TIMEVAL(&atv, &ts);
|
|
|
|
if (itimerfix(&atv))
|
|
|
|
return (EINVAL);
|
|
|
|
timo = tvtohz(&atv);
|
|
|
|
}
|
|
|
|
|
|
|
|
ki = p->p_aioinfo;
|
|
|
|
if (ki == NULL)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EAGAIN);
|
2000-01-14 02:53:29 +00:00
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
if ((cb = TAILQ_FIRST(&ki->kaio_jobdone)) != 0) {
|
2001-12-10 03:34:06 +00:00
|
|
|
suword(uap->aiocbp, (uintptr_t)cb->uuaiocb);
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = cb->uaiocb._aiocb_private.status;
|
2000-01-14 02:53:29 +00:00
|
|
|
if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
|
2001-12-31 02:03:39 +00:00
|
|
|
p->p_stats->p_ru.ru_oublock +=
|
2000-01-14 02:53:29 +00:00
|
|
|
cb->outputcharge;
|
|
|
|
cb->outputcharge = 0;
|
|
|
|
} else if (cb->uaiocb.aio_lio_opcode == LIO_READ) {
|
2001-12-31 02:03:39 +00:00
|
|
|
p->p_stats->p_ru.ru_inblock += cb->inputcharge;
|
2000-01-14 02:53:29 +00:00
|
|
|
cb->inputcharge = 0;
|
|
|
|
}
|
|
|
|
aio_free_entry(cb);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (cb->uaiocb._aiocb_private.error);
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
s = splbio();
|
|
|
|
if ((cb = TAILQ_FIRST(&ki->kaio_bufdone)) != 0 ) {
|
|
|
|
splx(s);
|
2001-12-10 03:34:06 +00:00
|
|
|
suword(uap->aiocbp, (uintptr_t)cb->uuaiocb);
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = cb->uaiocb._aiocb_private.status;
|
2000-01-14 02:53:29 +00:00
|
|
|
aio_free_entry(cb);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (cb->uaiocb._aiocb_private.error);
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ki->kaio_flags |= KAIO_WAKEUP;
|
|
|
|
error = tsleep(p, PRIBIO | PCATCH, "aiowc", timo);
|
2000-02-23 07:44:25 +00:00
|
|
|
splx(s);
|
2000-01-14 02:53:29 +00:00
|
|
|
|
2000-02-23 07:44:25 +00:00
|
|
|
if (error == ERESTART)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINTR);
|
2000-02-23 07:44:25 +00:00
|
|
|
else if (error < 0)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (error);
|
2000-01-14 02:53:29 +00:00
|
|
|
else if (error == EINTR)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINTR);
|
2000-01-14 02:53:29 +00:00
|
|
|
else if (error == EWOULDBLOCK)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EAGAIN);
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
|
|
|
}
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/* kqueue attach function */
|
2000-04-16 18:53:38 +00:00
|
|
|
static int
|
|
|
|
filt_aioattach(struct knote *kn)
|
|
|
|
{
|
2002-08-06 19:01:08 +00:00
|
|
|
struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata;
|
2000-04-16 18:53:38 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The aiocbe pointer must be validated before using it, so
|
|
|
|
* registration is restricted to the kernel; the user cannot
|
|
|
|
* set EV_FLAG1.
|
|
|
|
*/
|
|
|
|
if ((kn->kn_flags & EV_FLAG1) == 0)
|
|
|
|
return (EPERM);
|
|
|
|
kn->kn_flags &= ~EV_FLAG1;
|
|
|
|
|
2004-08-15 06:24:42 +00:00
|
|
|
knlist_add(&aiocbe->klist, kn, 0);
|
2000-04-16 18:53:38 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/* kqueue detach function */
|
2000-04-16 18:53:38 +00:00
|
|
|
static void
|
|
|
|
filt_aiodetach(struct knote *kn)
|
|
|
|
{
|
2002-08-06 19:01:08 +00:00
|
|
|
struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata;
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2004-08-15 06:24:42 +00:00
|
|
|
knlist_remove(&aiocbe->klist, kn, 0);
|
2000-04-16 18:53:38 +00:00
|
|
|
}
|
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/* kqueue filter function */
|
2000-04-16 18:53:38 +00:00
|
|
|
/*ARGSUSED*/
|
|
|
|
static int
|
|
|
|
filt_aio(struct knote *kn, long hint)
|
|
|
|
{
|
2002-08-06 19:01:08 +00:00
|
|
|
struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata;
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2001-12-09 08:16:36 +00:00
|
|
|
kn->kn_data = aiocbe->uaiocb._aiocb_private.error;
|
2000-09-04 07:56:32 +00:00
|
|
|
if (aiocbe->jobstate != JOBST_JOBFINISHED &&
|
|
|
|
aiocbe->jobstate != JOBST_JOBBFINISHED)
|
2000-04-16 18:53:38 +00:00
|
|
|
return (0);
|
2004-08-13 17:43:53 +00:00
|
|
|
kn->kn_flags |= EV_EOF;
|
2000-04-16 18:53:38 +00:00
|
|
|
return (1);
|
|
|
|
}
|