2005-01-06 23:35:40 +00:00
|
|
|
/*-
|
1997-06-16 00:27:26 +00:00
|
|
|
* Copyright (c) 1997 John S. Dyson. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. John S. Dyson's name may not be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* DISCLAIMER: This code isn't warranted to do anything useful. Anything
|
|
|
|
* bad that happens because of using this software isn't the responsibility
|
|
|
|
* of the author. This software is distributed AS-IS.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
1998-03-28 11:51:01 +00:00
|
|
|
* This file contains support for the POSIX 1003.1B AIO/LIO facility.
|
1997-06-16 00:27:26 +00:00
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
1997-06-16 00:27:26 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2002-02-23 11:12:57 +00:00
|
|
|
#include <sys/malloc.h>
|
2000-05-05 09:59:14 +00:00
|
|
|
#include <sys/bio.h>
|
1999-02-25 15:54:06 +00:00
|
|
|
#include <sys/buf.h>
|
2003-03-24 21:15:35 +00:00
|
|
|
#include <sys/eventhandler.h>
|
1997-06-16 00:27:26 +00:00
|
|
|
#include <sys/sysproto.h>
|
|
|
|
#include <sys/filedesc.h>
|
|
|
|
#include <sys/kernel.h>
|
2004-05-30 20:34:58 +00:00
|
|
|
#include <sys/module.h>
|
2001-03-09 06:27:01 +00:00
|
|
|
#include <sys/kthread.h>
|
1997-06-16 00:27:26 +00:00
|
|
|
#include <sys/fcntl.h>
|
|
|
|
#include <sys/file.h>
|
2003-04-29 13:36:06 +00:00
|
|
|
#include <sys/limits.h>
|
1997-11-18 10:02:40 +00:00
|
|
|
#include <sys/lock.h>
|
2000-10-20 07:58:15 +00:00
|
|
|
#include <sys/mutex.h>
|
1997-06-16 00:27:26 +00:00
|
|
|
#include <sys/unistd.h>
|
2006-11-11 16:26:58 +00:00
|
|
|
#include <sys/posix4.h>
|
1997-06-16 00:27:26 +00:00
|
|
|
#include <sys/proc.h>
|
1998-08-17 17:28:10 +00:00
|
|
|
#include <sys/resourcevar.h>
|
1997-06-16 00:27:26 +00:00
|
|
|
#include <sys/signalvar.h>
|
2000-01-14 02:53:29 +00:00
|
|
|
#include <sys/protosw.h>
|
2006-01-22 05:59:27 +00:00
|
|
|
#include <sys/sema.h>
|
|
|
|
#include <sys/socket.h>
|
2000-01-14 02:53:29 +00:00
|
|
|
#include <sys/socketvar.h>
|
2001-12-29 07:13:47 +00:00
|
|
|
#include <sys/syscall.h>
|
|
|
|
#include <sys/sysent.h>
|
1997-10-09 04:14:41 +00:00
|
|
|
#include <sys/sysctl.h>
|
2002-03-25 21:52:04 +00:00
|
|
|
#include <sys/sx.h>
|
2006-01-22 05:59:27 +00:00
|
|
|
#include <sys/taskqueue.h>
|
1997-11-29 01:33:10 +00:00
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/conf.h>
|
2000-04-16 18:53:38 +00:00
|
|
|
#include <sys/event.h>
|
2006-03-23 08:46:42 +00:00
|
|
|
#include <sys/mount.h>
|
1997-06-16 00:27:26 +00:00
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
#include <machine/atomic.h>
|
|
|
|
|
1997-06-16 00:27:26 +00:00
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/vm_extern.h>
|
1997-07-06 02:40:43 +00:00
|
|
|
#include <vm/pmap.h>
|
|
|
|
#include <vm/vm_map.h>
|
2006-03-23 08:46:42 +00:00
|
|
|
#include <vm/vm_object.h>
|
2002-03-20 04:09:59 +00:00
|
|
|
#include <vm/uma.h>
|
1997-06-16 00:27:26 +00:00
|
|
|
#include <sys/aio.h>
|
1997-07-17 04:49:43 +00:00
|
|
|
|
2000-02-23 07:44:25 +00:00
|
|
|
#include "opt_vfs_aio.h"
|
1997-06-16 00:27:26 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/*
|
|
|
|
* Counter for allocating reference ids to new jobs. Wrapped to 1 on
|
2006-03-23 08:46:42 +00:00
|
|
|
* overflow. (XXX will be removed soon.)
|
2002-03-05 15:38:49 +00:00
|
|
|
*/
|
2006-03-23 08:46:42 +00:00
|
|
|
static u_long jobrefid;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2006-03-23 08:46:42 +00:00
|
|
|
/*
|
|
|
|
* Counter for aio_fsync.
|
|
|
|
*/
|
|
|
|
static uint64_t jobseqno;
|
|
|
|
|
|
|
|
#define JOBST_NULL 0
|
|
|
|
#define JOBST_JOBQSOCK 1
|
|
|
|
#define JOBST_JOBQGLOBAL 2
|
|
|
|
#define JOBST_JOBRUNNING 3
|
|
|
|
#define JOBST_JOBFINISHED 4
|
|
|
|
#define JOBST_JOBQBUF 5
|
|
|
|
#define JOBST_JOBQSYNC 6
|
1997-07-06 02:40:43 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#ifndef MAX_AIO_PER_PROC
|
1997-07-06 02:40:43 +00:00
|
|
|
#define MAX_AIO_PER_PROC 32
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MAX_AIO_QUEUE_PER_PROC
|
1997-07-06 02:40:43 +00:00
|
|
|
#define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MAX_AIO_PROCS
|
1997-11-29 01:33:10 +00:00
|
|
|
#define MAX_AIO_PROCS 32
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MAX_AIO_QUEUE
|
1997-07-06 02:40:43 +00:00
|
|
|
#define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef TARGET_AIO_PROCS
|
2000-01-14 02:53:29 +00:00
|
|
|
#define TARGET_AIO_PROCS 4
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MAX_BUF_AIO
|
2000-01-14 02:53:29 +00:00
|
|
|
#define MAX_BUF_AIO 16
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef AIOD_TIMEOUT_DEFAULT
|
2000-01-14 02:53:29 +00:00
|
|
|
#define AIOD_TIMEOUT_DEFAULT (10 * hz)
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef AIOD_LIFETIME_DEFAULT
|
2000-01-14 02:53:29 +00:00
|
|
|
#define AIOD_LIFETIME_DEFAULT (30 * hz)
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#endif
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2008-02-01 11:59:14 +00:00
|
|
|
FEATURE(aio, "Asynchronous I/O");
|
|
|
|
|
2005-02-10 12:23:29 +00:00
|
|
|
static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "Async IO management");
|
1997-10-09 04:14:41 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
static int max_aio_procs = MAX_AIO_PROCS;
|
1997-10-09 04:14:41 +00:00
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs,
|
2002-03-05 15:38:49 +00:00
|
|
|
CTLFLAG_RW, &max_aio_procs, 0,
|
|
|
|
"Maximum number of kernel threads to use for handling async IO ");
|
1997-10-09 04:14:41 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
static int num_aio_procs = 0;
|
1997-10-09 04:14:41 +00:00
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs,
|
2002-03-05 15:38:49 +00:00
|
|
|
CTLFLAG_RD, &num_aio_procs, 0,
|
|
|
|
"Number of presently active kernel threads for async IO");
|
1997-10-09 04:14:41 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/*
|
|
|
|
* The code will adjust the actual number of AIO processes towards this
|
|
|
|
* number when it gets a chance.
|
|
|
|
*/
|
|
|
|
static int target_aio_procs = TARGET_AIO_PROCS;
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs,
|
|
|
|
0, "Preferred number of ready kernel threads for async IO");
|
1997-10-09 04:14:41 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
static int max_queue_count = MAX_AIO_QUEUE;
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0,
|
|
|
|
"Maximum number of aio requests to queue, globally");
|
1997-10-09 04:14:41 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
static int num_queue_count = 0;
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0,
|
|
|
|
"Number of queued aio requests");
|
1997-10-09 04:14:41 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
static int num_buf_aio = 0;
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0,
|
|
|
|
"Number of aio requests presently handled by the buf subsystem");
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/* Number of async I/O thread in the process of being started */
|
2006-01-23 02:49:34 +00:00
|
|
|
/* XXX This should be local to aio_aqueue() */
|
2002-03-05 15:38:49 +00:00
|
|
|
static int num_aio_resv_start = 0;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
static int aiod_timeout;
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout, CTLFLAG_RW, &aiod_timeout, 0,
|
|
|
|
"Timeout value for synchronous aio operations");
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
static int aiod_lifetime;
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0,
|
|
|
|
"Maximum lifetime for idle aiod");
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
static int unloadable = 0;
|
2001-12-29 07:13:47 +00:00
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, unloadable, CTLFLAG_RW, &unloadable, 0,
|
|
|
|
"Allow unload of aio (not recommended)");
|
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
|
|
|
|
static int max_aio_per_proc = MAX_AIO_PER_PROC;
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc,
|
|
|
|
0, "Maximum active aio requests per process (stored in the process)");
|
|
|
|
|
|
|
|
static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC;
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW,
|
|
|
|
&max_aio_queue_per_proc, 0,
|
|
|
|
"Maximum queued aio requests per process (stored in the process)");
|
|
|
|
|
|
|
|
static int max_buf_aio = MAX_BUF_AIO;
|
|
|
|
SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0,
|
|
|
|
"Maximum buf aio requests per process (stored in the process)");
|
|
|
|
|
2005-10-30 02:12:49 +00:00
|
|
|
typedef struct oaiocb {
|
|
|
|
int aio_fildes; /* File descriptor */
|
|
|
|
off_t aio_offset; /* File offset for I/O */
|
|
|
|
volatile void *aio_buf; /* I/O buffer in process space */
|
|
|
|
size_t aio_nbytes; /* Number of bytes for I/O */
|
|
|
|
struct osigevent aio_sigevent; /* Signal to deliver */
|
|
|
|
int aio_lio_opcode; /* LIO opcode */
|
|
|
|
int aio_reqprio; /* Request priority -- ignored */
|
|
|
|
struct __aiocb_private _aiocb_private;
|
|
|
|
} oaiocb_t;
|
|
|
|
|
2006-01-24 07:24:24 +00:00
|
|
|
/*
|
|
|
|
* Below is a key of locks used to protect each member of struct aiocblist
|
|
|
|
* aioliojob and kaioinfo and any backends.
|
|
|
|
*
|
|
|
|
* * - need not protected
|
2006-05-09 00:10:11 +00:00
|
|
|
* a - locked by kaioinfo lock
|
2006-01-24 07:24:24 +00:00
|
|
|
* b - locked by backend lock, the backend lock can be null in some cases,
|
|
|
|
* for example, BIO belongs to this type, in this case, proc lock is
|
|
|
|
* reused.
|
|
|
|
* c - locked by aio_job_mtx, the lock for the generic file I/O backend.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Current, there is only two backends: BIO and generic file I/O.
|
|
|
|
* socket I/O is served by generic file I/O, this is not a good idea, since
|
|
|
|
* disk file I/O and any other types without O_NONBLOCK flag can block daemon
|
|
|
|
* threads, if there is no thread to serve socket I/O, the socket I/O will be
|
|
|
|
* delayed too long or starved, we should create some threads dedicated to
|
|
|
|
* sockets to do non-blocking I/O, same for pipe and fifo, for these I/O
|
|
|
|
* systems we really need non-blocking interface, fiddling O_NONBLOCK in file
|
|
|
|
* structure is not safe because there is race between userland and aio
|
|
|
|
* daemons.
|
|
|
|
*/
|
|
|
|
|
2002-01-06 21:03:39 +00:00
|
|
|
struct aiocblist {
|
2006-01-24 07:24:24 +00:00
|
|
|
TAILQ_ENTRY(aiocblist) list; /* (b) internal list of for backend */
|
|
|
|
TAILQ_ENTRY(aiocblist) plist; /* (a) list of jobs for each backend */
|
|
|
|
TAILQ_ENTRY(aiocblist) allist; /* (a) list of all jobs in proc */
|
|
|
|
int jobflags; /* (a) job flags */
|
|
|
|
int jobstate; /* (b) job state */
|
|
|
|
int inputcharge; /* (*) input blockes */
|
|
|
|
int outputcharge; /* (*) output blockes */
|
|
|
|
struct buf *bp; /* (*) private to BIO backend,
|
|
|
|
* buffer pointer
|
|
|
|
*/
|
|
|
|
struct proc *userproc; /* (*) user process */
|
|
|
|
struct ucred *cred; /* (*) active credential when created */
|
|
|
|
struct file *fd_file; /* (*) pointer to file structure */
|
|
|
|
struct aioliojob *lio; /* (*) optional lio job */
|
|
|
|
struct aiocb *uuaiocb; /* (*) pointer in userspace of aiocb */
|
|
|
|
struct knlist klist; /* (a) list of knotes */
|
|
|
|
struct aiocb uaiocb; /* (*) kernel I/O control block */
|
|
|
|
ksiginfo_t ksi; /* (a) realtime signal info */
|
2006-03-23 08:46:42 +00:00
|
|
|
struct task biotask; /* (*) private to BIO backend */
|
|
|
|
uint64_t seqno; /* (*) job number */
|
|
|
|
int pending; /* (a) number of pending I/O, aio_fsync only */
|
2002-01-06 21:03:39 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* jobflags */
|
2006-03-23 08:46:42 +00:00
|
|
|
#define AIOCBLIST_DONE 0x01
|
|
|
|
#define AIOCBLIST_BUFDONE 0x02
|
2006-01-22 05:59:27 +00:00
|
|
|
#define AIOCBLIST_RUNDOWN 0x04
|
2006-03-23 08:46:42 +00:00
|
|
|
#define AIOCBLIST_CHECKSYNC 0x08
|
2002-01-06 21:03:39 +00:00
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
|
|
|
* AIO process info
|
|
|
|
*/
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#define AIOP_FREE 0x1 /* proc on free queue */
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
struct aiothreadlist {
|
2006-01-24 07:24:24 +00:00
|
|
|
int aiothreadflags; /* (c) AIO proc flags */
|
|
|
|
TAILQ_ENTRY(aiothreadlist) list; /* (c) list of processes */
|
|
|
|
struct thread *aiothread; /* (*) the AIO thread */
|
1997-07-06 02:40:43 +00:00
|
|
|
};
|
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
/*
|
|
|
|
* data-structure for lio signal management
|
|
|
|
*/
|
2006-01-22 05:59:27 +00:00
|
|
|
struct aioliojob {
|
2006-01-24 07:24:24 +00:00
|
|
|
int lioj_flags; /* (a) listio flags */
|
|
|
|
int lioj_count; /* (a) listio flags */
|
|
|
|
int lioj_finished_count; /* (a) listio flags */
|
|
|
|
struct sigevent lioj_signal; /* (a) signal on all I/O done */
|
|
|
|
TAILQ_ENTRY(aioliojob) lioj_list; /* (a) lio list */
|
|
|
|
struct knlist klist; /* (a) list of knotes */
|
|
|
|
ksiginfo_t lioj_ksi; /* (a) Realtime signal info */
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
};
|
2006-01-22 05:59:27 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
|
2005-10-12 17:51:31 +00:00
|
|
|
#define LIOJ_KEVENT_POSTED 0x4 /* kevent triggered */
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* per process aio data structure
|
|
|
|
*/
|
1997-07-06 02:40:43 +00:00
|
|
|
struct kaioinfo {
|
2006-05-09 00:10:11 +00:00
|
|
|
struct mtx kaio_mtx; /* the lock to protect this struct */
|
2006-01-24 07:24:24 +00:00
|
|
|
int kaio_flags; /* (a) per process kaio flags */
|
|
|
|
int kaio_maxactive_count; /* (*) maximum number of AIOs */
|
|
|
|
int kaio_active_count; /* (c) number of currently used AIOs */
|
|
|
|
int kaio_qallowed_count; /* (*) maxiumu size of AIO queue */
|
|
|
|
int kaio_count; /* (a) size of AIO queue */
|
|
|
|
int kaio_ballowed_count; /* (*) maximum number of buffers */
|
|
|
|
int kaio_buffer_count; /* (a) number of physio buffers */
|
|
|
|
TAILQ_HEAD(,aiocblist) kaio_all; /* (a) all AIOs in the process */
|
|
|
|
TAILQ_HEAD(,aiocblist) kaio_done; /* (a) done queue for process */
|
|
|
|
TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */
|
|
|
|
TAILQ_HEAD(,aiocblist) kaio_jobqueue; /* (a) job queue for process */
|
|
|
|
TAILQ_HEAD(,aiocblist) kaio_bufqueue; /* (a) buffer job queue for process */
|
|
|
|
TAILQ_HEAD(,aiocblist) kaio_sockqueue; /* (a) queue for aios waiting on sockets,
|
2006-03-23 08:46:42 +00:00
|
|
|
* NOT USED YET.
|
2006-01-24 07:24:24 +00:00
|
|
|
*/
|
2006-03-23 08:46:42 +00:00
|
|
|
TAILQ_HEAD(,aiocblist) kaio_syncqueue; /* (a) queue for aio_fsync */
|
|
|
|
struct task kaio_task; /* (*) task to kick aio threads */
|
1997-07-06 02:40:43 +00:00
|
|
|
};
|
|
|
|
|
2006-05-09 00:10:11 +00:00
|
|
|
#define AIO_LOCK(ki) mtx_lock(&(ki)->kaio_mtx)
|
|
|
|
#define AIO_UNLOCK(ki) mtx_unlock(&(ki)->kaio_mtx)
|
|
|
|
#define AIO_LOCK_ASSERT(ki, f) mtx_assert(&(ki)->kaio_mtx, (f))
|
|
|
|
#define AIO_MTX(ki) (&(ki)->kaio_mtx)
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
#define KAIO_RUNDOWN 0x1 /* process is being run down */
|
|
|
|
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2006-01-24 07:24:24 +00:00
|
|
|
static TAILQ_HEAD(,aiothreadlist) aio_freeproc; /* (c) Idle daemons */
|
2006-01-22 05:59:27 +00:00
|
|
|
static struct sema aio_newproc_sem;
|
|
|
|
static struct mtx aio_job_mtx;
|
|
|
|
static struct mtx aio_sock_mtx;
|
2006-01-24 07:24:24 +00:00
|
|
|
static TAILQ_HEAD(,aiocblist) aio_jobs; /* (c) Async job list */
|
2006-01-22 05:59:27 +00:00
|
|
|
static struct unrhdr *aiod_unr;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
MFP4 (with some minor changes):
Implement the linux_io_* syscalls (AIO). They are only enabled if the native
AIO code is available (either compiled in to the kernel or as a module) at
the time the functions are used. If the AIO stuff is not available there
will be a ENOSYS.
From the submitter:
---snip---
DESIGN NOTES:
1. Linux permits a process to own multiple AIO queues (distinguished by
"context"), but FreeBSD creates only one single AIO queue per process.
My code maintains a request queue (STAILQ of queue(3)) per "context",
and throws all AIO requests of all contexts owned by a process into
the single FreeBSD per-process AIO queue.
When the process calls io_destroy(2), io_getevents(2), io_submit(2) and
io_cancel(2), my code can pick out requests owned by the specified context
from the single FreeBSD per-process AIO queue according to the per-context
request queues maintained by my code.
2. The request queue maintained by my code stores contrast information between
Linux IO control blocks (struct linux_iocb) and FreeBSD IO control blocks
(struct aiocb). FreeBSD IO control block actually exists in userland memory
space, required by FreeBSD native aio_XXXXXX(2).
3. It is quite troubling that the function io_getevents() of libaio-0.3.105
needs to use Linux-specific "struct aio_ring", which is a partial mirror
of context in user space. I would rather take the address of context in
kernel as the context ID, but the io_getevents() of libaio forces me to
take the address of the "ring" in user space as the context ID.
To my surprise, one comment line in the file "io_getevents.c" of
libaio-0.3.105 reads:
Ben will hate me for this
REFERENCE:
1. Linux kernel source code: http://www.kernel.org/pub/linux/kernel/v2.6/
(include/linux/aio_abi.h, fs/aio.c)
2. Linux manual pages: http://www.kernel.org/pub/linux/docs/manpages/
(io_setup(2), io_destroy(2), io_getevents(2), io_submit(2), io_cancel(2))
3. Linux Scalability Effort: http://lse.sourceforge.net/io/aio.html
The design notes: http://lse.sourceforge.net/io/aionotes.txt
4. The package libaio, both source and binary:
http://rpmfind.net/linux/rpm2html/search.php?query=libaio
Simple transparent interface to Linux AIO system calls.
5. Libaio-oracle: http://oss.oracle.com/projects/libaio-oracle/
POSIX AIO implementation based on Linux AIO system calls (depending on
libaio).
---snip---
Submitted by: Li, Xiao <intron@intron.ac>
2006-10-15 14:22:14 +00:00
|
|
|
void aio_init_aioinfo(struct proc *p);
|
2001-12-29 07:13:47 +00:00
|
|
|
static void aio_onceonly(void);
|
2000-01-14 02:53:29 +00:00
|
|
|
static int aio_free_entry(struct aiocblist *aiocbe);
|
|
|
|
static void aio_process(struct aiocblist *aiocbe);
|
2006-01-22 05:59:27 +00:00
|
|
|
static int aio_newproc(int *);
|
MFP4 (with some minor changes):
Implement the linux_io_* syscalls (AIO). They are only enabled if the native
AIO code is available (either compiled in to the kernel or as a module) at
the time the functions are used. If the AIO stuff is not available there
will be a ENOSYS.
From the submitter:
---snip---
DESIGN NOTES:
1. Linux permits a process to own multiple AIO queues (distinguished by
"context"), but FreeBSD creates only one single AIO queue per process.
My code maintains a request queue (STAILQ of queue(3)) per "context",
and throws all AIO requests of all contexts owned by a process into
the single FreeBSD per-process AIO queue.
When the process calls io_destroy(2), io_getevents(2), io_submit(2) and
io_cancel(2), my code can pick out requests owned by the specified context
from the single FreeBSD per-process AIO queue according to the per-context
request queues maintained by my code.
2. The request queue maintained by my code stores contrast information between
Linux IO control blocks (struct linux_iocb) and FreeBSD IO control blocks
(struct aiocb). FreeBSD IO control block actually exists in userland memory
space, required by FreeBSD native aio_XXXXXX(2).
3. It is quite troubling that the function io_getevents() of libaio-0.3.105
needs to use Linux-specific "struct aio_ring", which is a partial mirror
of context in user space. I would rather take the address of context in
kernel as the context ID, but the io_getevents() of libaio forces me to
take the address of the "ring" in user space as the context ID.
To my surprise, one comment line in the file "io_getevents.c" of
libaio-0.3.105 reads:
Ben will hate me for this
REFERENCE:
1. Linux kernel source code: http://www.kernel.org/pub/linux/kernel/v2.6/
(include/linux/aio_abi.h, fs/aio.c)
2. Linux manual pages: http://www.kernel.org/pub/linux/docs/manpages/
(io_setup(2), io_destroy(2), io_getevents(2), io_submit(2), io_cancel(2))
3. Linux Scalability Effort: http://lse.sourceforge.net/io/aio.html
The design notes: http://lse.sourceforge.net/io/aionotes.txt
4. The package libaio, both source and binary:
http://rpmfind.net/linux/rpm2html/search.php?query=libaio
Simple transparent interface to Linux AIO system calls.
5. Libaio-oracle: http://oss.oracle.com/projects/libaio-oracle/
POSIX AIO implementation based on Linux AIO system calls (depending on
libaio).
---snip---
Submitted by: Li, Xiao <intron@intron.ac>
2006-10-15 14:22:14 +00:00
|
|
|
int aio_aqueue(struct thread *td, struct aiocb *job,
|
2006-03-24 00:50:06 +00:00
|
|
|
struct aioliojob *lio, int type, int osigev);
|
2000-01-14 02:53:29 +00:00
|
|
|
static void aio_physwakeup(struct buf *bp);
|
2003-03-24 21:15:35 +00:00
|
|
|
static void aio_proc_rundown(void *arg, struct proc *p);
|
2006-08-15 12:10:57 +00:00
|
|
|
static void aio_proc_rundown_exec(void *arg, struct proc *p, struct image_params *imgp);
|
2000-01-14 02:53:29 +00:00
|
|
|
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
|
2006-01-22 05:59:27 +00:00
|
|
|
static void biohelper(void *, int);
|
|
|
|
static void aio_daemon(void *param);
|
2002-01-06 21:03:39 +00:00
|
|
|
static void aio_swake_cb(struct socket *, struct sockbuf *);
|
2001-12-29 07:13:47 +00:00
|
|
|
static int aio_unload(void);
|
2006-03-23 08:46:42 +00:00
|
|
|
static void aio_bio_done_notify(struct proc *userp, struct aiocblist *aiocbe, int type);
|
|
|
|
#define DONE_BUF 1
|
|
|
|
#define DONE_QUEUE 2
|
|
|
|
static int do_lio_listio(struct thread *td, struct lio_listio_args *uap, int oldsigev);
|
2006-03-24 00:50:06 +00:00
|
|
|
static int aio_kick(struct proc *userp);
|
2006-03-23 08:46:42 +00:00
|
|
|
static void aio_kick_nowait(struct proc *userp);
|
|
|
|
static void aio_kick_helper(void *context, int pending);
|
2001-12-29 07:13:47 +00:00
|
|
|
static int filt_aioattach(struct knote *kn);
|
|
|
|
static void filt_aiodetach(struct knote *kn);
|
|
|
|
static int filt_aio(struct knote *kn, long hint);
|
2005-10-12 17:51:31 +00:00
|
|
|
static int filt_lioattach(struct knote *kn);
|
|
|
|
static void filt_liodetach(struct knote *kn);
|
|
|
|
static int filt_lio(struct knote *kn, long hint);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/*
|
|
|
|
* Zones for:
|
|
|
|
* kaio Per process async io info
|
|
|
|
* aiop async io thread data
|
|
|
|
* aiocb async io jobs
|
|
|
|
* aiol list io job pointer - internal to aio_suspend XXX
|
|
|
|
* aiolio list io jobs
|
|
|
|
*/
|
2002-03-20 04:09:59 +00:00
|
|
|
static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiol_zone, aiolio_zone;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/* kqueue filters for aio */
|
2001-12-29 07:13:47 +00:00
|
|
|
static struct filterops aio_filtops =
|
|
|
|
{ 0, filt_aioattach, filt_aiodetach, filt_aio };
|
2005-10-12 17:51:31 +00:00
|
|
|
static struct filterops lio_filtops =
|
|
|
|
{ 0, filt_lioattach, filt_liodetach, filt_lio };
|
2001-12-29 07:13:47 +00:00
|
|
|
|
2003-03-24 21:15:35 +00:00
|
|
|
static eventhandler_tag exit_tag, exec_tag;
|
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
TASKQUEUE_DEFINE_THREAD(aiod_bio);
|
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/*
|
|
|
|
* Main operations function for use as a kernel module.
|
|
|
|
*/
|
2001-12-29 07:13:47 +00:00
|
|
|
static int
|
|
|
|
aio_modload(struct module *module, int cmd, void *arg)
|
|
|
|
{
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case MOD_LOAD:
|
|
|
|
aio_onceonly();
|
|
|
|
break;
|
|
|
|
case MOD_UNLOAD:
|
|
|
|
error = aio_unload();
|
|
|
|
break;
|
|
|
|
case MOD_SHUTDOWN:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static moduledata_t aio_mod = {
|
|
|
|
"aio",
|
|
|
|
&aio_modload,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
SYSCALL_MODULE_HELPER(aio_cancel);
|
|
|
|
SYSCALL_MODULE_HELPER(aio_error);
|
2006-03-23 08:46:42 +00:00
|
|
|
SYSCALL_MODULE_HELPER(aio_fsync);
|
2001-12-29 07:13:47 +00:00
|
|
|
SYSCALL_MODULE_HELPER(aio_read);
|
2006-03-23 08:46:42 +00:00
|
|
|
SYSCALL_MODULE_HELPER(aio_return);
|
|
|
|
SYSCALL_MODULE_HELPER(aio_suspend);
|
2001-12-29 07:13:47 +00:00
|
|
|
SYSCALL_MODULE_HELPER(aio_waitcomplete);
|
2006-03-23 08:46:42 +00:00
|
|
|
SYSCALL_MODULE_HELPER(aio_write);
|
2001-12-29 07:13:47 +00:00
|
|
|
SYSCALL_MODULE_HELPER(lio_listio);
|
2005-10-30 02:12:49 +00:00
|
|
|
SYSCALL_MODULE_HELPER(oaio_read);
|
|
|
|
SYSCALL_MODULE_HELPER(oaio_write);
|
|
|
|
SYSCALL_MODULE_HELPER(olio_listio);
|
2001-12-29 07:13:47 +00:00
|
|
|
|
|
|
|
DECLARE_MODULE(aio, aio_mod,
|
|
|
|
SI_SUB_VFS, SI_ORDER_ANY);
|
|
|
|
MODULE_VERSION(aio, 1);
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
|
|
|
* Startup initialization
|
|
|
|
*/
|
2001-03-05 01:30:23 +00:00
|
|
|
static void
|
2001-12-29 07:13:47 +00:00
|
|
|
aio_onceonly(void)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2001-12-29 07:13:47 +00:00
|
|
|
|
|
|
|
/* XXX: should probably just use so->callback */
|
|
|
|
aio_swake = &aio_swake_cb;
|
2003-03-24 21:15:35 +00:00
|
|
|
exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL,
|
|
|
|
EVENTHANDLER_PRI_ANY);
|
2006-08-15 12:10:57 +00:00
|
|
|
exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown_exec, NULL,
|
2003-03-24 21:15:35 +00:00
|
|
|
EVENTHANDLER_PRI_ANY);
|
2001-12-29 07:13:47 +00:00
|
|
|
kqueue_add_filteropts(EVFILT_AIO, &aio_filtops);
|
2005-10-12 17:51:31 +00:00
|
|
|
kqueue_add_filteropts(EVFILT_LIO, &lio_filtops);
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_INIT(&aio_freeproc);
|
2006-01-22 05:59:27 +00:00
|
|
|
sema_init(&aio_newproc_sem, 0, "aio_new_proc");
|
|
|
|
mtx_init(&aio_job_mtx, "aio_job", NULL, MTX_DEF);
|
|
|
|
mtx_init(&aio_sock_mtx, "aio_sock", NULL, MTX_DEF);
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_INIT(&aio_jobs);
|
2006-01-22 05:59:27 +00:00
|
|
|
aiod_unr = new_unrhdr(1, INT_MAX, NULL);
|
2002-03-20 04:09:59 +00:00
|
|
|
kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL,
|
|
|
|
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
|
|
|
|
aiop_zone = uma_zcreate("AIOP", sizeof(struct aiothreadlist), NULL,
|
|
|
|
NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
|
|
|
|
aiocb_zone = uma_zcreate("AIOCB", sizeof(struct aiocblist), NULL, NULL,
|
|
|
|
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
|
|
|
|
aiol_zone = uma_zcreate("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t) , NULL,
|
|
|
|
NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
|
2006-01-22 05:59:27 +00:00
|
|
|
aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aioliojob), NULL,
|
2002-03-20 04:09:59 +00:00
|
|
|
NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
|
|
|
|
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
|
1997-11-29 01:33:10 +00:00
|
|
|
jobrefid = 1;
|
2002-10-27 18:07:41 +00:00
|
|
|
async_io_version = _POSIX_VERSION;
|
2002-11-16 04:22:55 +00:00
|
|
|
p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, AIO_LISTIO_MAX);
|
2002-11-16 06:38:07 +00:00
|
|
|
p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE);
|
|
|
|
p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/*
|
|
|
|
* Callback for unload of AIO when used as a module.
|
|
|
|
*/
|
2001-12-29 07:13:47 +00:00
|
|
|
static int
|
|
|
|
aio_unload(void)
|
|
|
|
{
|
2004-08-15 06:24:42 +00:00
|
|
|
int error;
|
2001-12-29 07:13:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX: no unloads by default, it's too dangerous.
|
|
|
|
* perhaps we could do it if locked out callers and then
|
|
|
|
* did an aio_proc_rundown() on each process.
|
2005-11-08 17:43:05 +00:00
|
|
|
*
|
|
|
|
* jhb: aio_proc_rundown() needs to run on curproc though,
|
|
|
|
* so I don't think that would fly.
|
2001-12-29 07:13:47 +00:00
|
|
|
*/
|
|
|
|
if (!unloadable)
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
|
2004-08-15 06:24:42 +00:00
|
|
|
error = kqueue_del_filteropts(EVFILT_AIO);
|
2006-01-24 02:46:15 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
error = kqueue_del_filteropts(EVFILT_LIO);
|
2004-08-15 06:24:42 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
2002-10-27 18:07:41 +00:00
|
|
|
async_io_version = 0;
|
2001-12-29 07:13:47 +00:00
|
|
|
aio_swake = NULL;
|
2006-01-22 05:59:27 +00:00
|
|
|
taskqueue_free(taskqueue_aiod_bio);
|
|
|
|
delete_unrhdr(aiod_unr);
|
2006-01-24 02:46:15 +00:00
|
|
|
uma_zdestroy(kaio_zone);
|
|
|
|
uma_zdestroy(aiop_zone);
|
|
|
|
uma_zdestroy(aiocb_zone);
|
|
|
|
uma_zdestroy(aiol_zone);
|
|
|
|
uma_zdestroy(aiolio_zone);
|
2003-03-24 21:15:35 +00:00
|
|
|
EVENTHANDLER_DEREGISTER(process_exit, exit_tag);
|
|
|
|
EVENTHANDLER_DEREGISTER(process_exec, exec_tag);
|
2006-01-22 05:59:27 +00:00
|
|
|
mtx_destroy(&aio_job_mtx);
|
|
|
|
mtx_destroy(&aio_sock_mtx);
|
|
|
|
sema_destroy(&aio_newproc_sem);
|
2002-11-17 04:15:34 +00:00
|
|
|
p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, -1);
|
|
|
|
p31b_setcfg(CTL_P1003_1B_AIO_MAX, -1);
|
|
|
|
p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, -1);
|
2001-12-29 07:13:47 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Init the per-process aioinfo structure. The aioinfo limits are set
|
|
|
|
* per-process for user limit (resource) management.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
MFP4 (with some minor changes):
Implement the linux_io_* syscalls (AIO). They are only enabled if the native
AIO code is available (either compiled in to the kernel or as a module) at
the time the functions are used. If the AIO stuff is not available there
will be a ENOSYS.
From the submitter:
---snip---
DESIGN NOTES:
1. Linux permits a process to own multiple AIO queues (distinguished by
"context"), but FreeBSD creates only one single AIO queue per process.
My code maintains a request queue (STAILQ of queue(3)) per "context",
and throws all AIO requests of all contexts owned by a process into
the single FreeBSD per-process AIO queue.
When the process calls io_destroy(2), io_getevents(2), io_submit(2) and
io_cancel(2), my code can pick out requests owned by the specified context
from the single FreeBSD per-process AIO queue according to the per-context
request queues maintained by my code.
2. The request queue maintained by my code stores contrast information between
Linux IO control blocks (struct linux_iocb) and FreeBSD IO control blocks
(struct aiocb). FreeBSD IO control block actually exists in userland memory
space, required by FreeBSD native aio_XXXXXX(2).
3. It is quite troubling that the function io_getevents() of libaio-0.3.105
needs to use Linux-specific "struct aio_ring", which is a partial mirror
of context in user space. I would rather take the address of context in
kernel as the context ID, but the io_getevents() of libaio forces me to
take the address of the "ring" in user space as the context ID.
To my surprise, one comment line in the file "io_getevents.c" of
libaio-0.3.105 reads:
Ben will hate me for this
REFERENCE:
1. Linux kernel source code: http://www.kernel.org/pub/linux/kernel/v2.6/
(include/linux/aio_abi.h, fs/aio.c)
2. Linux manual pages: http://www.kernel.org/pub/linux/docs/manpages/
(io_setup(2), io_destroy(2), io_getevents(2), io_submit(2), io_cancel(2))
3. Linux Scalability Effort: http://lse.sourceforge.net/io/aio.html
The design notes: http://lse.sourceforge.net/io/aionotes.txt
4. The package libaio, both source and binary:
http://rpmfind.net/linux/rpm2html/search.php?query=libaio
Simple transparent interface to Linux AIO system calls.
5. Libaio-oracle: http://oss.oracle.com/projects/libaio-oracle/
POSIX AIO implementation based on Linux AIO system calls (depending on
libaio).
---snip---
Submitted by: Li, Xiao <intron@intron.ac>
2006-10-15 14:22:14 +00:00
|
|
|
void
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_init_aioinfo(struct proc *p)
|
|
|
|
{
|
1997-07-06 02:40:43 +00:00
|
|
|
struct kaioinfo *ki;
|
2003-01-13 15:06:05 +00:00
|
|
|
|
2005-05-30 19:33:33 +00:00
|
|
|
ki = uma_zalloc(kaio_zone, M_WAITOK);
|
2006-05-09 00:10:11 +00:00
|
|
|
mtx_init(&ki->kaio_mtx, "aiomtx", NULL, MTX_DEF);
|
2005-05-30 19:33:33 +00:00
|
|
|
ki->kaio_flags = 0;
|
|
|
|
ki->kaio_maxactive_count = max_aio_per_proc;
|
|
|
|
ki->kaio_active_count = 0;
|
|
|
|
ki->kaio_qallowed_count = max_aio_queue_per_proc;
|
2006-01-22 05:59:27 +00:00
|
|
|
ki->kaio_count = 0;
|
2005-05-30 19:33:33 +00:00
|
|
|
ki->kaio_ballowed_count = max_buf_aio;
|
|
|
|
ki->kaio_buffer_count = 0;
|
2006-01-22 05:59:27 +00:00
|
|
|
TAILQ_INIT(&ki->kaio_all);
|
|
|
|
TAILQ_INIT(&ki->kaio_done);
|
2005-05-30 19:33:33 +00:00
|
|
|
TAILQ_INIT(&ki->kaio_jobqueue);
|
|
|
|
TAILQ_INIT(&ki->kaio_bufqueue);
|
|
|
|
TAILQ_INIT(&ki->kaio_liojoblist);
|
|
|
|
TAILQ_INIT(&ki->kaio_sockqueue);
|
2006-03-23 08:46:42 +00:00
|
|
|
TAILQ_INIT(&ki->kaio_syncqueue);
|
|
|
|
TASK_INIT(&ki->kaio_task, 0, aio_kick_helper, p);
|
2005-05-30 19:33:33 +00:00
|
|
|
PROC_LOCK(p);
|
1997-07-06 02:40:43 +00:00
|
|
|
if (p->p_aioinfo == NULL) {
|
|
|
|
p->p_aioinfo = ki;
|
2005-05-30 19:33:33 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
} else {
|
|
|
|
PROC_UNLOCK(p);
|
2006-05-09 00:10:11 +00:00
|
|
|
mtx_destroy(&ki->kaio_mtx);
|
2005-05-30 19:33:33 +00:00
|
|
|
uma_zfree(kaio_zone, ki);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2004-08-13 17:43:53 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
while (num_aio_procs < target_aio_procs)
|
2006-01-22 05:59:27 +00:00
|
|
|
aio_newproc(NULL);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
2005-11-03 05:25:26 +00:00
|
|
|
static int
|
|
|
|
aio_sendsig(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi)
|
|
|
|
{
|
2006-05-09 00:10:11 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
PROC_LOCK(p);
|
2005-11-03 05:25:26 +00:00
|
|
|
if (!KSI_ONQ(ksi)) {
|
|
|
|
ksi->ksi_code = SI_ASYNCIO;
|
|
|
|
ksi->ksi_flags |= KSI_EXT | KSI_INS;
|
2006-05-09 00:10:11 +00:00
|
|
|
ret = psignal_event(p, sigev, ksi);
|
2005-11-03 05:25:26 +00:00
|
|
|
}
|
2006-05-09 00:10:11 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
return (ret);
|
2005-11-03 05:25:26 +00:00
|
|
|
}
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Free a job entry. Wait for completion if it is currently active, but don't
|
|
|
|
* delay forever. If we delay, we return a flag that says that we have to
|
|
|
|
* restart the queue scan.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
2001-03-05 01:30:23 +00:00
|
|
|
static int
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_free_entry(struct aiocblist *aiocbe)
|
|
|
|
{
|
1997-07-06 02:40:43 +00:00
|
|
|
struct kaioinfo *ki;
|
2006-01-22 05:59:27 +00:00
|
|
|
struct aioliojob *lj;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct proc *p;
|
|
|
|
|
|
|
|
p = aiocbe->userproc;
|
2006-01-22 05:59:27 +00:00
|
|
|
MPASS(curproc == p);
|
1997-07-06 02:40:43 +00:00
|
|
|
ki = p->p_aioinfo;
|
2006-01-22 05:59:27 +00:00
|
|
|
MPASS(ki != NULL);
|
|
|
|
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_LOCK_ASSERT(ki, MA_OWNED);
|
|
|
|
MPASS(aiocbe->jobstate == JOBST_JOBFINISHED);
|
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
atomic_subtract_int(&num_queue_count, 1);
|
|
|
|
|
|
|
|
ki->kaio_count--;
|
|
|
|
MPASS(ki->kaio_count >= 0);
|
|
|
|
|
2006-02-26 12:56:23 +00:00
|
|
|
TAILQ_REMOVE(&ki->kaio_done, aiocbe, plist);
|
|
|
|
TAILQ_REMOVE(&ki->kaio_all, aiocbe, allist);
|
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj = aiocbe->lio;
|
2006-01-22 05:59:27 +00:00
|
|
|
if (lj) {
|
|
|
|
lj->lioj_count--;
|
|
|
|
lj->lioj_finished_count--;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2006-01-23 02:49:34 +00:00
|
|
|
if (lj->lioj_count == 0) {
|
2006-01-22 05:59:27 +00:00
|
|
|
TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
|
|
|
|
/* lio is going away, we need to destroy any knotes */
|
|
|
|
knlist_delete(&lj->klist, curthread, 1);
|
2006-05-09 00:10:11 +00:00
|
|
|
PROC_LOCK(p);
|
2006-01-22 05:59:27 +00:00
|
|
|
sigqueue_take(&lj->lioj_ksi);
|
2006-05-09 00:10:11 +00:00
|
|
|
PROC_UNLOCK(p);
|
2006-01-22 05:59:27 +00:00
|
|
|
uma_zfree(aiolio_zone, lj);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
|
|
|
|
2000-04-16 18:53:38 +00:00
|
|
|
/* aiocbe is going away, we need to destroy any knotes */
|
2006-01-22 05:59:27 +00:00
|
|
|
knlist_delete(&aiocbe->klist, curthread, 1);
|
2006-05-09 00:10:11 +00:00
|
|
|
PROC_LOCK(p);
|
2006-01-22 05:59:27 +00:00
|
|
|
sigqueue_take(&aiocbe->ksi);
|
2006-05-09 00:10:11 +00:00
|
|
|
PROC_UNLOCK(p);
|
2006-01-22 05:59:27 +00:00
|
|
|
|
2006-03-23 08:46:42 +00:00
|
|
|
MPASS(aiocbe->bp == NULL);
|
2006-01-22 05:59:27 +00:00
|
|
|
aiocbe->jobstate = JOBST_NULL;
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
2005-11-08 17:43:05 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The thread argument here is used to find the owning process
|
|
|
|
* and is also passed to fo_close() which may pass it to various
|
|
|
|
* places such as devsw close() routines. Because of that, we
|
|
|
|
* need a thread pointer from the process owning the job that is
|
|
|
|
* persistent and won't disappear out from under us or move to
|
|
|
|
* another process.
|
|
|
|
*
|
|
|
|
* Currently, all the callers of this function call it to remove
|
|
|
|
* an aiocblist from the current process' job list either via a
|
|
|
|
* syscall or due to the current process calling exit() or
|
|
|
|
* execve(). Thus, we know that p == curproc. We also know that
|
|
|
|
* curthread can't exit since we are curthread.
|
|
|
|
*
|
|
|
|
* Therefore, we use curthread as the thread to pass to
|
|
|
|
* knlist_delete(). This does mean that it is possible for the
|
|
|
|
* thread pointer at close time to differ from the thread pointer
|
|
|
|
* at open time, but this is already true of file descriptors in
|
|
|
|
* a multithreaded process.
|
2001-09-12 08:38:13 +00:00
|
|
|
*/
|
2002-03-31 20:17:56 +00:00
|
|
|
fdrop(aiocbe->fd_file, curthread);
|
2002-11-07 20:46:37 +00:00
|
|
|
crfree(aiocbe->cred);
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiocb_zone, aiocbe);
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_LOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
2006-08-15 12:10:57 +00:00
|
|
|
static void
|
|
|
|
aio_proc_rundown_exec(void *arg, struct proc *p, struct image_params *imgp __unused)
|
|
|
|
{
|
|
|
|
aio_proc_rundown(arg, p);
|
|
|
|
}
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
2004-08-13 17:43:53 +00:00
|
|
|
* Rundown the jobs for a given process.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
2001-12-29 07:13:47 +00:00
|
|
|
static void
|
2003-03-24 21:15:35 +00:00
|
|
|
aio_proc_rundown(void *arg, struct proc *p)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
1997-07-06 02:40:43 +00:00
|
|
|
struct kaioinfo *ki;
|
2006-01-22 05:59:27 +00:00
|
|
|
struct aioliojob *lj;
|
|
|
|
struct aiocblist *cbe, *cbn;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct file *fp;
|
|
|
|
struct socket *so;
|
2006-01-24 07:24:24 +00:00
|
|
|
int remove;
|
2000-01-14 02:53:29 +00:00
|
|
|
|
2005-11-08 17:43:05 +00:00
|
|
|
KASSERT(curthread->td_proc == p,
|
|
|
|
("%s: called on non-curproc", __func__));
|
1997-07-06 02:40:43 +00:00
|
|
|
ki = p->p_aioinfo;
|
|
|
|
if (ki == NULL)
|
|
|
|
return;
|
|
|
|
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_LOCK(ki);
|
2006-02-26 12:56:23 +00:00
|
|
|
ki->kaio_flags |= KAIO_RUNDOWN;
|
2006-01-22 05:59:27 +00:00
|
|
|
|
|
|
|
restart:
|
1997-10-09 04:14:41 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/*
|
2006-01-22 05:59:27 +00:00
|
|
|
* Try to cancel all pending requests. This code simulates
|
|
|
|
* aio_cancel on all pending I/O requests.
|
2000-01-14 02:53:29 +00:00
|
|
|
*/
|
2006-01-22 05:59:27 +00:00
|
|
|
TAILQ_FOREACH_SAFE(cbe, &ki->kaio_jobqueue, plist, cbn) {
|
2006-01-24 07:24:24 +00:00
|
|
|
remove = 0;
|
2006-01-22 05:59:27 +00:00
|
|
|
mtx_lock(&aio_job_mtx);
|
|
|
|
if (cbe->jobstate == JOBST_JOBQGLOBAL) {
|
|
|
|
TAILQ_REMOVE(&aio_jobs, cbe, list);
|
2006-01-24 07:24:24 +00:00
|
|
|
remove = 1;
|
|
|
|
} else if (cbe->jobstate == JOBST_JOBQSOCK) {
|
|
|
|
fp = cbe->fd_file;
|
|
|
|
MPASS(fp->f_type == DTYPE_SOCKET);
|
|
|
|
so = fp->f_data;
|
|
|
|
TAILQ_REMOVE(&so->so_aiojobq, cbe, list);
|
|
|
|
remove = 1;
|
2006-03-23 08:46:42 +00:00
|
|
|
} else if (cbe->jobstate == JOBST_JOBQSYNC) {
|
|
|
|
TAILQ_REMOVE(&ki->kaio_syncqueue, cbe, list);
|
|
|
|
remove = 1;
|
2006-01-24 07:24:24 +00:00
|
|
|
}
|
|
|
|
mtx_unlock(&aio_job_mtx);
|
|
|
|
|
|
|
|
if (remove) {
|
2006-01-22 05:59:27 +00:00
|
|
|
cbe->jobstate = JOBST_JOBFINISHED;
|
|
|
|
cbe->uaiocb._aiocb_private.status = -1;
|
|
|
|
cbe->uaiocb._aiocb_private.error = ECANCELED;
|
|
|
|
TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist);
|
|
|
|
aio_bio_done_notify(p, cbe, DONE_QUEUE);
|
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
/* Wait for all running I/O to be finished */
|
|
|
|
if (TAILQ_FIRST(&ki->kaio_bufqueue) ||
|
|
|
|
TAILQ_FIRST(&ki->kaio_jobqueue)) {
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
ki->kaio_flags |= KAIO_WAKEUP;
|
2006-05-09 00:10:11 +00:00
|
|
|
msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO, "aioprn", hz);
|
2006-01-22 05:59:27 +00:00
|
|
|
goto restart;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
/* Free all completed I/O requests. */
|
|
|
|
while ((cbe = TAILQ_FIRST(&ki->kaio_done)) != NULL)
|
|
|
|
aio_free_entry(cbe);
|
|
|
|
|
|
|
|
while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) {
|
2006-01-23 02:49:34 +00:00
|
|
|
if (lj->lioj_count == 0) {
|
2000-01-14 02:53:29 +00:00
|
|
|
TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
|
2006-01-22 05:59:27 +00:00
|
|
|
knlist_delete(&lj->klist, curthread, 1);
|
2006-05-09 00:10:11 +00:00
|
|
|
PROC_LOCK(p);
|
2006-01-22 05:59:27 +00:00
|
|
|
sigqueue_take(&lj->lioj_ksi);
|
2006-05-09 00:10:11 +00:00
|
|
|
PROC_UNLOCK(p);
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiolio_zone, lj);
|
2000-01-14 02:53:29 +00:00
|
|
|
} else {
|
2006-01-23 02:49:34 +00:00
|
|
|
panic("LIO job not cleaned up: C:%d, FC:%d\n",
|
|
|
|
lj->lioj_count, lj->lioj_finished_count);
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
2006-03-23 08:46:42 +00:00
|
|
|
taskqueue_drain(taskqueue_aiod_bio, &ki->kaio_task);
|
2007-08-20 11:53:26 +00:00
|
|
|
mtx_destroy(&ki->kaio_mtx);
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(kaio_zone, ki);
|
1997-10-09 04:14:41 +00:00
|
|
|
p->p_aioinfo = NULL;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Select a job to run (called by an AIO daemon).
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
static struct aiocblist *
|
2001-09-12 08:38:13 +00:00
|
|
|
aio_selectjob(struct aiothreadlist *aiop)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
1997-07-06 02:40:43 +00:00
|
|
|
struct aiocblist *aiocbe;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct kaioinfo *ki;
|
|
|
|
struct proc *userp;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
mtx_assert(&aio_job_mtx, MA_OWNED);
|
2005-11-08 17:43:05 +00:00
|
|
|
TAILQ_FOREACH(aiocbe, &aio_jobs, list) {
|
1997-07-06 02:40:43 +00:00
|
|
|
userp = aiocbe->userproc;
|
|
|
|
ki = userp->p_aioinfo;
|
|
|
|
|
|
|
|
if (ki->kaio_active_count < ki->kaio_maxactive_count) {
|
|
|
|
TAILQ_REMOVE(&aio_jobs, aiocbe, list);
|
2006-01-22 05:59:27 +00:00
|
|
|
/* Account for currently active jobs. */
|
|
|
|
ki->kaio_active_count++;
|
|
|
|
aiocbe->jobstate = JOBST_JOBRUNNING;
|
|
|
|
break;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
}
|
2006-01-22 05:59:27 +00:00
|
|
|
return (aiocbe);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
2006-03-23 08:46:42 +00:00
|
|
|
/*
|
|
|
|
* Move all data to a permanent storage device, this code
|
|
|
|
* simulates fsync syscall.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
aio_fsync_vnode(struct thread *td, struct vnode *vp)
|
|
|
|
{
|
|
|
|
struct mount *mp;
|
|
|
|
int vfslocked;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
|
|
|
goto drop;
|
2008-01-10 01:10:58 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
2006-03-23 08:46:42 +00:00
|
|
|
if (vp->v_object != NULL) {
|
|
|
|
VM_OBJECT_LOCK(vp->v_object);
|
|
|
|
vm_object_page_clean(vp->v_object, 0, 0, 0);
|
|
|
|
VM_OBJECT_UNLOCK(vp->v_object);
|
|
|
|
}
|
|
|
|
error = VOP_FSYNC(vp, MNT_WAIT, td);
|
|
|
|
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(vp, 0);
|
2006-03-23 08:46:42 +00:00
|
|
|
vn_finished_write(mp);
|
|
|
|
drop:
|
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* The AIO processing activity. This is the code that does the I/O request for
|
|
|
|
* the non-physio version of the operations. The normal vn operations are used,
|
|
|
|
* and this code should work in all instances for every type of file, including
|
|
|
|
* pipes, sockets, fifos, and regular files.
|
2006-01-22 05:59:27 +00:00
|
|
|
*
|
2006-01-24 07:24:24 +00:00
|
|
|
* XXX I don't think it works well for socket, pipe, and fifo.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
2001-03-05 01:30:23 +00:00
|
|
|
static void
|
1997-11-29 01:33:10 +00:00
|
|
|
aio_process(struct aiocblist *aiocbe)
|
|
|
|
{
|
2002-11-07 20:46:37 +00:00
|
|
|
struct ucred *td_savedcred;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct aiocb *cb;
|
|
|
|
struct file *fp;
|
2006-01-22 05:59:27 +00:00
|
|
|
struct socket *so;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct uio auio;
|
|
|
|
struct iovec aiov;
|
|
|
|
int cnt;
|
|
|
|
int error;
|
1997-11-29 01:33:10 +00:00
|
|
|
int oublock_st, oublock_end;
|
|
|
|
int inblock_st, inblock_end;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
td = curthread;
|
2002-11-07 20:46:37 +00:00
|
|
|
td_savedcred = td->td_ucred;
|
|
|
|
td->td_ucred = aiocbe->cred;
|
1997-07-06 02:40:43 +00:00
|
|
|
cb = &aiocbe->uaiocb;
|
2002-04-14 03:04:19 +00:00
|
|
|
fp = aiocbe->fd_file;
|
2000-01-14 02:53:29 +00:00
|
|
|
|
2006-03-23 08:46:42 +00:00
|
|
|
if (cb->aio_lio_opcode == LIO_SYNC) {
|
|
|
|
error = 0;
|
|
|
|
cnt = 0;
|
|
|
|
if (fp->f_vnode != NULL)
|
|
|
|
error = aio_fsync_vnode(td, fp->f_vnode);
|
|
|
|
cb->_aiocb_private.error = error;
|
|
|
|
cb->_aiocb_private.status = 0;
|
|
|
|
td->td_ucred = td_savedcred;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2001-12-09 08:16:36 +00:00
|
|
|
aiov.iov_base = (void *)(uintptr_t)cb->aio_buf;
|
1997-07-06 02:40:43 +00:00
|
|
|
aiov.iov_len = cb->aio_nbytes;
|
|
|
|
|
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
2002-04-04 02:13:20 +00:00
|
|
|
auio.uio_offset = cb->aio_offset;
|
1997-07-06 02:40:43 +00:00
|
|
|
auio.uio_resid = cb->aio_nbytes;
|
|
|
|
cnt = cb->aio_nbytes;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
2001-09-12 08:38:13 +00:00
|
|
|
auio.uio_td = td;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2007-06-01 01:12:45 +00:00
|
|
|
inblock_st = td->td_ru.ru_inblock;
|
|
|
|
oublock_st = td->td_ru.ru_oublock;
|
2000-11-18 21:01:04 +00:00
|
|
|
/*
|
2006-01-23 02:49:34 +00:00
|
|
|
* aio_aqueue() acquires a reference to the file that is
|
2002-04-04 02:13:20 +00:00
|
|
|
* released in aio_free_entry().
|
2000-11-18 21:01:04 +00:00
|
|
|
*/
|
1997-07-06 02:40:43 +00:00
|
|
|
if (cb->aio_lio_opcode == LIO_READ) {
|
|
|
|
auio.uio_rw = UIO_READ;
|
2007-08-20 11:53:26 +00:00
|
|
|
if (auio.uio_resid == 0)
|
|
|
|
error = 0;
|
|
|
|
else
|
|
|
|
error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td);
|
1997-07-06 02:40:43 +00:00
|
|
|
} else {
|
2006-01-27 08:02:25 +00:00
|
|
|
if (fp->f_type == DTYPE_VNODE)
|
|
|
|
bwillwrite();
|
1997-07-06 02:40:43 +00:00
|
|
|
auio.uio_rw = UIO_WRITE;
|
2001-09-12 08:38:13 +00:00
|
|
|
error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2007-06-01 01:12:45 +00:00
|
|
|
inblock_end = td->td_ru.ru_inblock;
|
|
|
|
oublock_end = td->td_ru.ru_oublock;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
aiocbe->inputcharge = inblock_end - inblock_st;
|
|
|
|
aiocbe->outputcharge = oublock_end - oublock_st;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((error) && (auio.uio_resid != cnt)) {
|
|
|
|
if (error == ERESTART || error == EINTR || error == EWOULDBLOCK)
|
|
|
|
error = 0;
|
2001-03-07 03:37:06 +00:00
|
|
|
if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) {
|
2006-01-22 05:59:27 +00:00
|
|
|
int sigpipe = 1;
|
|
|
|
if (fp->f_type == DTYPE_SOCKET) {
|
|
|
|
so = fp->f_data;
|
|
|
|
if (so->so_options & SO_NOSIGPIPE)
|
|
|
|
sigpipe = 0;
|
|
|
|
}
|
|
|
|
if (sigpipe) {
|
|
|
|
PROC_LOCK(aiocbe->userproc);
|
|
|
|
psignal(aiocbe->userproc, SIGPIPE);
|
|
|
|
PROC_UNLOCK(aiocbe->userproc);
|
|
|
|
}
|
2001-03-07 03:37:06 +00:00
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cnt -= auio.uio_resid;
|
|
|
|
cb->_aiocb_private.error = error;
|
|
|
|
cb->_aiocb_private.status = cnt;
|
2002-11-07 20:46:37 +00:00
|
|
|
td->td_ucred = td_savedcred;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
2005-10-12 17:51:31 +00:00
|
|
|
static void
|
2006-01-22 05:59:27 +00:00
|
|
|
aio_bio_done_notify(struct proc *userp, struct aiocblist *aiocbe, int type)
|
|
|
|
{
|
|
|
|
struct aioliojob *lj;
|
2005-10-12 17:51:31 +00:00
|
|
|
struct kaioinfo *ki;
|
2006-03-23 08:46:42 +00:00
|
|
|
struct aiocblist *scb, *scbn;
|
2006-01-22 05:59:27 +00:00
|
|
|
int lj_done;
|
2005-10-12 17:51:31 +00:00
|
|
|
|
|
|
|
ki = userp->p_aioinfo;
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_LOCK_ASSERT(ki, MA_OWNED);
|
2005-10-12 17:51:31 +00:00
|
|
|
lj = aiocbe->lio;
|
|
|
|
lj_done = 0;
|
|
|
|
if (lj) {
|
2006-01-22 05:59:27 +00:00
|
|
|
lj->lioj_finished_count++;
|
|
|
|
if (lj->lioj_count == lj->lioj_finished_count)
|
2005-10-12 17:51:31 +00:00
|
|
|
lj_done = 1;
|
|
|
|
}
|
2006-01-22 05:59:27 +00:00
|
|
|
if (type == DONE_QUEUE) {
|
|
|
|
aiocbe->jobflags |= AIOCBLIST_DONE;
|
|
|
|
} else {
|
|
|
|
aiocbe->jobflags |= AIOCBLIST_BUFDONE;
|
|
|
|
}
|
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_done, aiocbe, plist);
|
|
|
|
aiocbe->jobstate = JOBST_JOBFINISHED;
|
2006-02-26 12:56:23 +00:00
|
|
|
|
|
|
|
if (ki->kaio_flags & KAIO_RUNDOWN)
|
|
|
|
goto notification_done;
|
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
|
|
|
|
aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID)
|
|
|
|
aio_sendsig(userp, &aiocbe->uaiocb.aio_sigevent, &aiocbe->ksi);
|
2005-10-12 17:51:31 +00:00
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
KNOTE_LOCKED(&aiocbe->klist, 1);
|
2005-10-12 17:51:31 +00:00
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
if (lj_done) {
|
|
|
|
if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
|
|
|
|
lj->lioj_flags |= LIOJ_KEVENT_POSTED;
|
|
|
|
KNOTE_LOCKED(&lj->klist, 1);
|
|
|
|
}
|
|
|
|
if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED))
|
|
|
|
== LIOJ_SIGNAL
|
|
|
|
&& (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
|
|
|
|
lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
|
|
|
|
aio_sendsig(userp, &lj->lioj_signal, &lj->lioj_ksi);
|
|
|
|
lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
|
2005-10-12 17:51:31 +00:00
|
|
|
}
|
|
|
|
}
|
2006-02-26 12:56:23 +00:00
|
|
|
|
|
|
|
notification_done:
|
2006-03-23 08:46:42 +00:00
|
|
|
if (aiocbe->jobflags & AIOCBLIST_CHECKSYNC) {
|
|
|
|
TAILQ_FOREACH_SAFE(scb, &ki->kaio_syncqueue, list, scbn) {
|
2006-03-24 00:50:06 +00:00
|
|
|
if (aiocbe->fd_file == scb->fd_file &&
|
2006-03-23 08:46:42 +00:00
|
|
|
aiocbe->seqno < scb->seqno) {
|
|
|
|
if (--scb->pending == 0) {
|
|
|
|
mtx_lock(&aio_job_mtx);
|
|
|
|
scb->jobstate = JOBST_JOBQGLOBAL;
|
|
|
|
TAILQ_REMOVE(&ki->kaio_syncqueue, scb, list);
|
|
|
|
TAILQ_INSERT_TAIL(&aio_jobs, scb, list);
|
|
|
|
aio_kick_nowait(userp);
|
|
|
|
mtx_unlock(&aio_job_mtx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2006-02-26 12:56:23 +00:00
|
|
|
if (ki->kaio_flags & KAIO_WAKEUP) {
|
2006-01-22 05:59:27 +00:00
|
|
|
ki->kaio_flags &= ~KAIO_WAKEUP;
|
|
|
|
wakeup(&userp->p_aioinfo);
|
2005-10-12 17:51:31 +00:00
|
|
|
}
|
|
|
|
}
|
2006-01-22 05:59:27 +00:00
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
* The AIO daemon, most of the actual work is done in aio_process,
|
|
|
|
* but the setup (and address space mgmt) is done in this routine.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
static void
|
2006-01-22 05:59:27 +00:00
|
|
|
aio_daemon(void *_id)
|
1997-07-06 02:40:43 +00:00
|
|
|
{
|
2000-01-14 02:53:29 +00:00
|
|
|
struct aiocblist *aiocbe;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct aiothreadlist *aiop;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct kaioinfo *ki;
|
|
|
|
struct proc *curcp, *mycp, *userp;
|
|
|
|
struct vmspace *myvm, *tmpvm;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td = curthread;
|
2006-01-22 05:59:27 +00:00
|
|
|
int id = (intptr_t)_id;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
/*
|
1997-11-29 01:33:10 +00:00
|
|
|
* Local copies of curproc (cp) and vmspace (myvm)
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
mycp = td->td_proc;
|
1997-11-29 01:33:10 +00:00
|
|
|
myvm = mycp->p_vmspace;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2003-04-17 22:37:48 +00:00
|
|
|
KASSERT(mycp->p_textvp == NULL, ("kthread has a textvp"));
|
1997-07-06 02:40:43 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Allocate and ready the aio control info. There is one aiop structure
|
|
|
|
* per daemon.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
2003-02-19 05:47:46 +00:00
|
|
|
aiop = uma_zalloc(aiop_zone, M_WAITOK);
|
2001-09-12 08:38:13 +00:00
|
|
|
aiop->aiothread = td;
|
2006-03-23 08:46:42 +00:00
|
|
|
aiop->aiothreadflags = 0;
|
2000-01-14 02:53:29 +00:00
|
|
|
|
2006-01-24 02:50:42 +00:00
|
|
|
/* The daemon resides in its own pgrp. */
|
2006-01-24 02:46:15 +00:00
|
|
|
setsid(td, NULL);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wakeup parent process. (Parent sleeps to keep from blasting away
|
2001-09-12 08:38:13 +00:00
|
|
|
* and creating too many daemons.)
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
2006-01-22 05:59:27 +00:00
|
|
|
sema_post(&aio_newproc_sem);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
mtx_lock(&aio_job_mtx);
|
2000-01-14 02:53:29 +00:00
|
|
|
for (;;) {
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
|
|
|
* curcp is the current daemon process context.
|
|
|
|
* userp is the current user process context.
|
|
|
|
*/
|
|
|
|
curcp = mycp;
|
1997-10-11 01:07:03 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
|
|
|
* Take daemon off of free queue
|
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
if (aiop->aiothreadflags & AIOP_FREE) {
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_REMOVE(&aio_freeproc, aiop, list);
|
2001-09-12 08:38:13 +00:00
|
|
|
aiop->aiothreadflags &= ~AIOP_FREE;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Check for jobs.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
1999-01-27 21:50:00 +00:00
|
|
|
while ((aiocbe = aio_selectjob(aiop)) != NULL) {
|
2006-01-22 05:59:27 +00:00
|
|
|
mtx_unlock(&aio_job_mtx);
|
1997-07-06 02:40:43 +00:00
|
|
|
userp = aiocbe->userproc;
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Connect to process address space for user program.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
if (userp != curcp) {
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Save the current address space that we are
|
|
|
|
* connected to.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
tmpvm = mycp->p_vmspace;
|
2004-08-13 17:43:53 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Point to the new user address space, and
|
|
|
|
* refer to it.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
mycp->p_vmspace = userp->p_vmspace;
|
2004-07-27 03:53:41 +00:00
|
|
|
atomic_add_int(&mycp->p_vmspace->vm_refcnt, 1);
|
2004-08-13 17:43:53 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Activate the new mapping. */
|
2002-02-07 20:58:47 +00:00
|
|
|
pmap_activate(FIRST_THREAD_IN_PROC(mycp));
|
2004-08-13 17:43:53 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* If the old address space wasn't the daemons
|
|
|
|
* own address space, then we need to remove the
|
|
|
|
* daemon's reference from the other process
|
|
|
|
* that it was acting on behalf of.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
1997-07-06 02:40:43 +00:00
|
|
|
if (tmpvm != myvm) {
|
|
|
|
vmspace_free(tmpvm);
|
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
curcp = userp;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
ki = userp->p_aioinfo;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Do the I/O function. */
|
1997-07-06 02:40:43 +00:00
|
|
|
aio_process(aiocbe);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
2006-01-22 09:25:52 +00:00
|
|
|
mtx_lock(&aio_job_mtx);
|
|
|
|
/* Decrement the active job count. */
|
|
|
|
ki->kaio_active_count--;
|
|
|
|
mtx_unlock(&aio_job_mtx);
|
|
|
|
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_LOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist);
|
2005-10-12 17:51:31 +00:00
|
|
|
aio_bio_done_notify(userp, aiocbe, DONE_QUEUE);
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
|
|
|
|
mtx_lock(&aio_job_mtx);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Disconnect from user address space.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
|
|
|
if (curcp != mycp) {
|
2006-01-22 05:59:27 +00:00
|
|
|
|
|
|
|
mtx_unlock(&aio_job_mtx);
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Get the user address space to disconnect from. */
|
1997-11-29 01:33:10 +00:00
|
|
|
tmpvm = mycp->p_vmspace;
|
2004-08-13 17:43:53 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Get original address space for daemon. */
|
1997-11-29 01:33:10 +00:00
|
|
|
mycp->p_vmspace = myvm;
|
2004-08-13 17:43:53 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Activate the daemon's address space. */
|
2002-02-07 20:58:47 +00:00
|
|
|
pmap_activate(FIRST_THREAD_IN_PROC(mycp));
|
2000-01-14 02:53:29 +00:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (tmpvm == myvm) {
|
|
|
|
printf("AIOD: vmspace problem -- %d\n",
|
|
|
|
mycp->p_pid);
|
|
|
|
}
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
#endif
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Remove our vmspace reference. */
|
1997-07-06 02:40:43 +00:00
|
|
|
vmspace_free(tmpvm);
|
2004-08-13 17:43:53 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
curcp = mycp;
|
2006-01-22 05:59:27 +00:00
|
|
|
|
|
|
|
mtx_lock(&aio_job_mtx);
|
|
|
|
/*
|
|
|
|
* We have to restart to avoid race, we only sleep if
|
|
|
|
* no job can be selected, that should be
|
|
|
|
* curcp == mycp.
|
|
|
|
*/
|
|
|
|
continue;
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
mtx_assert(&aio_job_mtx, MA_OWNED);
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
|
2001-09-12 08:38:13 +00:00
|
|
|
aiop->aiothreadflags |= AIOP_FREE;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* If daemon is inactive for a long time, allow it to exit,
|
|
|
|
* thereby freeing resources.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
2006-01-22 05:59:27 +00:00
|
|
|
if (msleep(aiop->aiothread, &aio_job_mtx, PRIBIO, "aiordy",
|
|
|
|
aiod_lifetime)) {
|
2002-01-20 18:59:58 +00:00
|
|
|
if (TAILQ_EMPTY(&aio_jobs)) {
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((aiop->aiothreadflags & AIOP_FREE) &&
|
2000-01-14 02:53:29 +00:00
|
|
|
(num_aio_procs > target_aio_procs)) {
|
1997-11-29 01:33:10 +00:00
|
|
|
TAILQ_REMOVE(&aio_freeproc, aiop, list);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
num_aio_procs--;
|
2006-01-22 05:59:27 +00:00
|
|
|
mtx_unlock(&aio_job_mtx);
|
|
|
|
uma_zfree(aiop_zone, aiop);
|
|
|
|
free_unr(aiod_unr, id);
|
2000-01-14 02:53:29 +00:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (mycp->p_vmspace->vm_refcnt <= 1) {
|
|
|
|
printf("AIOD: bad vm refcnt for"
|
|
|
|
" exiting daemon: %d\n",
|
|
|
|
mycp->p_vmspace->vm_refcnt);
|
|
|
|
}
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
#endif
|
2007-10-20 23:23:23 +00:00
|
|
|
kproc_exit(0);
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
}
|
2006-01-22 05:59:27 +00:00
|
|
|
mtx_unlock(&aio_job_mtx);
|
|
|
|
panic("shouldn't be here\n");
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-01-22 05:59:27 +00:00
|
|
|
* Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
|
2000-01-14 02:53:29 +00:00
|
|
|
* AIO daemon modifies its environment itself.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
static int
|
2006-01-22 05:59:27 +00:00
|
|
|
aio_newproc(int *start)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
1997-07-06 02:40:43 +00:00
|
|
|
int error;
|
2001-03-09 06:27:01 +00:00
|
|
|
struct proc *p;
|
2006-01-22 05:59:27 +00:00
|
|
|
int id;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
id = alloc_unr(aiod_unr);
|
2007-10-20 23:23:23 +00:00
|
|
|
error = kproc_create(aio_daemon, (void *)(intptr_t)id, &p,
|
2006-01-22 05:59:27 +00:00
|
|
|
RFNOWAIT, 0, "aiod%d", id);
|
|
|
|
if (error == 0) {
|
|
|
|
/*
|
|
|
|
* Wait until daemon is started.
|
|
|
|
*/
|
|
|
|
sema_wait(&aio_newproc_sem);
|
|
|
|
mtx_lock(&aio_job_mtx);
|
|
|
|
num_aio_procs++;
|
|
|
|
if (start != NULL)
|
2006-01-23 23:46:30 +00:00
|
|
|
(*start)--;
|
2006-01-22 05:59:27 +00:00
|
|
|
mtx_unlock(&aio_job_mtx);
|
|
|
|
} else {
|
|
|
|
free_unr(aiod_unr, id);
|
|
|
|
}
|
2003-01-13 15:06:05 +00:00
|
|
|
return (error);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2001-03-05 01:30:23 +00:00
|
|
|
* Try the high-performance, low-overhead physio method for eligible
|
|
|
|
* VCHR devices. This method doesn't use an aio helper thread, and
|
2004-08-13 17:43:53 +00:00
|
|
|
* thus has very low overhead.
|
2001-03-05 01:30:23 +00:00
|
|
|
*
|
2006-01-23 02:49:34 +00:00
|
|
|
* Assumes that the caller, aio_aqueue(), has incremented the file
|
2001-03-05 01:30:23 +00:00
|
|
|
* structure's reference count, preventing its deallocation for the
|
2004-08-13 17:43:53 +00:00
|
|
|
* duration of this call.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
2001-03-05 01:30:23 +00:00
|
|
|
static int
|
2000-01-14 02:53:29 +00:00
|
|
|
aio_qphysio(struct proc *p, struct aiocblist *aiocbe)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
|
|
|
struct aiocb *cb;
|
|
|
|
struct file *fp;
|
|
|
|
struct buf *bp;
|
|
|
|
struct vnode *vp;
|
|
|
|
struct kaioinfo *ki;
|
2006-01-22 05:59:27 +00:00
|
|
|
struct aioliojob *lj;
|
|
|
|
int error;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
cb = &aiocbe->uaiocb;
|
2002-02-12 17:40:41 +00:00
|
|
|
fp = aiocbe->fd_file;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2004-08-13 17:43:53 +00:00
|
|
|
if (fp->f_type != DTYPE_VNODE)
|
1999-11-07 13:09:09 +00:00
|
|
|
return (-1);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2003-06-22 08:41:43 +00:00
|
|
|
vp = fp->f_vnode;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
|
2000-01-17 21:18:39 +00:00
|
|
|
/*
|
|
|
|
* If its not a disk, we don't want to return a positive error.
|
|
|
|
* It causes the aio code to not fall through to try the thread
|
|
|
|
* way when you're talking to a regular file.
|
|
|
|
*/
|
|
|
|
if (!vn_isdisk(vp, &error)) {
|
|
|
|
if (error == ENOTBLK)
|
|
|
|
return (-1);
|
|
|
|
else
|
|
|
|
return (error);
|
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2006-02-22 00:05:12 +00:00
|
|
|
if (vp->v_bufobj.bo_bsize == 0)
|
|
|
|
return (-1);
|
|
|
|
|
2004-10-26 07:39:12 +00:00
|
|
|
if (cb->aio_nbytes % vp->v_bufobj.bo_bsize)
|
1999-11-07 13:09:09 +00:00
|
|
|
return (-1);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2005-10-12 17:51:31 +00:00
|
|
|
if (cb->aio_nbytes > vp->v_rdev->si_iosize_max)
|
|
|
|
return (-1);
|
|
|
|
|
2001-03-10 22:47:57 +00:00
|
|
|
if (cb->aio_nbytes >
|
|
|
|
MAXPHYS - (((vm_offset_t) cb->aio_buf) & PAGE_MASK))
|
1999-11-07 13:09:09 +00:00
|
|
|
return (-1);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
ki = p->p_aioinfo;
|
2004-08-13 17:43:53 +00:00
|
|
|
if (ki->kaio_buffer_count >= ki->kaio_ballowed_count)
|
1999-11-07 13:09:09 +00:00
|
|
|
return (-1);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Create and build a buffer header for a transfer. */
|
1999-01-21 08:29:12 +00:00
|
|
|
bp = (struct buf *)getpbuf(NULL);
|
2001-03-10 22:47:57 +00:00
|
|
|
BUF_KERNPROC(bp);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_LOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
ki->kaio_count++;
|
|
|
|
ki->kaio_buffer_count++;
|
|
|
|
lj = aiocbe->lio;
|
|
|
|
if (lj)
|
|
|
|
lj->lioj_count++;
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Get a copy of the kva from the physical buffer.
|
1997-11-29 01:33:10 +00:00
|
|
|
*/
|
2003-04-05 23:02:58 +00:00
|
|
|
error = 0;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
bp->b_bcount = cb->aio_nbytes;
|
|
|
|
bp->b_bufsize = cb->aio_nbytes;
|
|
|
|
bp->b_iodone = aio_physwakeup;
|
|
|
|
bp->b_saveaddr = bp->b_data;
|
2001-12-09 08:16:36 +00:00
|
|
|
bp->b_data = (void *)(uintptr_t)cb->aio_buf;
|
2003-10-21 13:18:19 +00:00
|
|
|
bp->b_offset = cb->aio_offset;
|
|
|
|
bp->b_iooffset = cb->aio_offset;
|
1997-11-29 01:33:10 +00:00
|
|
|
bp->b_blkno = btodb(cb->aio_offset);
|
2003-04-04 06:26:28 +00:00
|
|
|
bp->b_iocmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2003-01-20 17:46:48 +00:00
|
|
|
/*
|
|
|
|
* Bring buffer into kernel space.
|
|
|
|
*/
|
|
|
|
if (vmapbuf(bp) < 0) {
|
|
|
|
error = EFAULT;
|
|
|
|
goto doerror;
|
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_LOCK(ki);
|
1997-11-29 01:33:10 +00:00
|
|
|
aiocbe->bp = bp;
|
2003-09-10 15:48:51 +00:00
|
|
|
bp->b_caller1 = (void *)aiocbe;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist);
|
2006-01-22 05:59:27 +00:00
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist);
|
1997-11-29 01:33:10 +00:00
|
|
|
aiocbe->jobstate = JOBST_JOBQBUF;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
cb->_aiocb_private.status = cb->aio_nbytes;
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
|
|
|
|
atomic_add_int(&num_queue_count, 1);
|
|
|
|
atomic_add_int(&num_buf_aio, 1);
|
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
bp->b_error = 0;
|
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
TASK_INIT(&aiocbe->biotask, 0, biohelper, aiocbe);
|
2004-08-13 17:43:53 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Perform transfer. */
|
2004-10-29 07:16:37 +00:00
|
|
|
dev_strategy(vp->v_rdev, bp);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
|
|
|
doerror:
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_LOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
ki->kaio_count--;
|
1997-11-29 01:33:10 +00:00
|
|
|
ki->kaio_buffer_count--;
|
2000-01-14 02:53:29 +00:00
|
|
|
if (lj)
|
2006-01-22 05:59:27 +00:00
|
|
|
lj->lioj_count--;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
aiocbe->bp = NULL;
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
1999-01-21 08:29:12 +00:00
|
|
|
relpbuf(bp, NULL);
|
1997-11-29 01:33:10 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Wake up aio requests that may be serviceable now.
|
|
|
|
*/
|
2002-01-06 21:03:39 +00:00
|
|
|
static void
|
2001-12-29 07:13:47 +00:00
|
|
|
aio_swake_cb(struct socket *so, struct sockbuf *sb)
|
2000-01-14 02:53:29 +00:00
|
|
|
{
|
2006-01-22 05:59:27 +00:00
|
|
|
struct aiocblist *cb, *cbn;
|
2006-03-23 08:46:42 +00:00
|
|
|
int opcode;
|
2000-01-14 02:53:29 +00:00
|
|
|
|
2006-01-24 07:24:24 +00:00
|
|
|
if (sb == &so->so_snd)
|
2000-01-14 02:53:29 +00:00
|
|
|
opcode = LIO_WRITE;
|
2006-01-24 07:24:24 +00:00
|
|
|
else
|
2000-01-14 02:53:29 +00:00
|
|
|
opcode = LIO_READ;
|
|
|
|
|
2006-01-24 07:24:24 +00:00
|
|
|
SOCKBUF_LOCK(sb);
|
|
|
|
sb->sb_flags &= ~SB_AIO;
|
|
|
|
mtx_lock(&aio_job_mtx);
|
2005-11-08 17:43:05 +00:00
|
|
|
TAILQ_FOREACH_SAFE(cb, &so->so_aiojobq, list, cbn) {
|
2000-01-14 02:53:29 +00:00
|
|
|
if (opcode == cb->uaiocb.aio_lio_opcode) {
|
2006-01-22 09:39:59 +00:00
|
|
|
if (cb->jobstate != JOBST_JOBQSOCK)
|
2006-01-22 05:59:27 +00:00
|
|
|
panic("invalid queue value");
|
2006-01-24 07:24:24 +00:00
|
|
|
/* XXX
|
|
|
|
* We don't have actual sockets backend yet,
|
|
|
|
* so we simply move the requests to the generic
|
|
|
|
* file I/O backend.
|
2006-01-22 05:59:27 +00:00
|
|
|
*/
|
2006-01-24 07:24:24 +00:00
|
|
|
TAILQ_REMOVE(&so->so_aiojobq, cb, list);
|
2006-01-22 05:59:27 +00:00
|
|
|
TAILQ_INSERT_TAIL(&aio_jobs, cb, list);
|
2006-03-23 08:46:42 +00:00
|
|
|
aio_kick_nowait(cb->userproc);
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
|
|
|
}
|
2006-01-24 07:24:24 +00:00
|
|
|
mtx_unlock(&aio_job_mtx);
|
|
|
|
SOCKBUF_UNLOCK(sb);
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Queue a new AIO request. Choosing either the threaded or direct physio VCHR
|
|
|
|
* technique is done in this code.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
MFP4 (with some minor changes):
Implement the linux_io_* syscalls (AIO). They are only enabled if the native
AIO code is available (either compiled in to the kernel or as a module) at
the time the functions are used. If the AIO stuff is not available there
will be a ENOSYS.
From the submitter:
---snip---
DESIGN NOTES:
1. Linux permits a process to own multiple AIO queues (distinguished by
"context"), but FreeBSD creates only one single AIO queue per process.
My code maintains a request queue (STAILQ of queue(3)) per "context",
and throws all AIO requests of all contexts owned by a process into
the single FreeBSD per-process AIO queue.
When the process calls io_destroy(2), io_getevents(2), io_submit(2) and
io_cancel(2), my code can pick out requests owned by the specified context
from the single FreeBSD per-process AIO queue according to the per-context
request queues maintained by my code.
2. The request queue maintained by my code stores contrast information between
Linux IO control blocks (struct linux_iocb) and FreeBSD IO control blocks
(struct aiocb). FreeBSD IO control block actually exists in userland memory
space, required by FreeBSD native aio_XXXXXX(2).
3. It is quite troubling that the function io_getevents() of libaio-0.3.105
needs to use Linux-specific "struct aio_ring", which is a partial mirror
of context in user space. I would rather take the address of context in
kernel as the context ID, but the io_getevents() of libaio forces me to
take the address of the "ring" in user space as the context ID.
To my surprise, one comment line in the file "io_getevents.c" of
libaio-0.3.105 reads:
Ben will hate me for this
REFERENCE:
1. Linux kernel source code: http://www.kernel.org/pub/linux/kernel/v2.6/
(include/linux/aio_abi.h, fs/aio.c)
2. Linux manual pages: http://www.kernel.org/pub/linux/docs/manpages/
(io_setup(2), io_destroy(2), io_getevents(2), io_submit(2), io_cancel(2))
3. Linux Scalability Effort: http://lse.sourceforge.net/io/aio.html
The design notes: http://lse.sourceforge.net/io/aionotes.txt
4. The package libaio, both source and binary:
http://rpmfind.net/linux/rpm2html/search.php?query=libaio
Simple transparent interface to Linux AIO system calls.
5. Libaio-oracle: http://oss.oracle.com/projects/libaio-oracle/
POSIX AIO implementation based on Linux AIO system calls (depending on
libaio).
---snip---
Submitted by: Li, Xiao <intron@intron.ac>
2006-10-15 14:22:14 +00:00
|
|
|
int
|
2006-01-23 02:49:34 +00:00
|
|
|
aio_aqueue(struct thread *td, struct aiocb *job, struct aioliojob *lj,
|
2006-03-24 00:50:06 +00:00
|
|
|
int type, int oldsigev)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct file *fp;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct socket *so;
|
2006-03-24 00:50:06 +00:00
|
|
|
struct aiocblist *aiocbe, *cb;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct kaioinfo *ki;
|
2000-11-21 19:36:36 +00:00
|
|
|
struct kevent kev;
|
2004-10-01 05:54:06 +00:00
|
|
|
struct sockbuf *sb;
|
2006-01-22 05:59:27 +00:00
|
|
|
int opcode;
|
|
|
|
int error;
|
2006-09-24 04:47:47 +00:00
|
|
|
int fd, kqfd;
|
2006-01-22 05:59:27 +00:00
|
|
|
int jid;
|
|
|
|
|
2006-01-23 02:49:34 +00:00
|
|
|
if (p->p_aioinfo == NULL)
|
|
|
|
aio_init_aioinfo(p);
|
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
ki = p->p_aioinfo;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2006-01-23 02:49:34 +00:00
|
|
|
suword(&job->_aiocb_private.status, -1);
|
|
|
|
suword(&job->_aiocb_private.error, 0);
|
|
|
|
suword(&job->_aiocb_private.kernelinfo, -1);
|
|
|
|
|
|
|
|
if (num_queue_count >= max_queue_count ||
|
|
|
|
ki->kaio_count >= ki->kaio_qallowed_count) {
|
|
|
|
suword(&job->_aiocb_private.error, EAGAIN);
|
|
|
|
return (EAGAIN);
|
|
|
|
}
|
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
aiocbe = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO);
|
1997-11-29 01:33:10 +00:00
|
|
|
aiocbe->inputcharge = 0;
|
|
|
|
aiocbe->outputcharge = 0;
|
2006-05-09 00:10:11 +00:00
|
|
|
knlist_init(&aiocbe->klist, AIO_MTX(ki), NULL, NULL, NULL);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2005-10-30 02:12:49 +00:00
|
|
|
if (oldsigev) {
|
|
|
|
bzero(&aiocbe->uaiocb, sizeof(struct aiocb));
|
|
|
|
error = copyin(job, &aiocbe->uaiocb, sizeof(struct oaiocb));
|
|
|
|
bcopy(&aiocbe->uaiocb.__spare__, &aiocbe->uaiocb.aio_sigevent,
|
|
|
|
sizeof(struct osigevent));
|
|
|
|
} else {
|
|
|
|
error = copyin(job, &aiocbe->uaiocb, sizeof(struct aiocb));
|
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
if (error) {
|
1997-11-29 01:33:10 +00:00
|
|
|
suword(&job->_aiocb_private.error, error);
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiocb_zone, aiocbe);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (error);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2006-01-23 10:27:15 +00:00
|
|
|
|
|
|
|
if (aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT &&
|
|
|
|
aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL &&
|
|
|
|
aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID &&
|
|
|
|
aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) {
|
|
|
|
suword(&job->_aiocb_private.error, EINVAL);
|
|
|
|
uma_zfree(aiocb_zone, aiocbe);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
2005-11-03 05:25:26 +00:00
|
|
|
if ((aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL ||
|
|
|
|
aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) &&
|
2001-04-18 22:18:39 +00:00
|
|
|
!_SIG_VALID(aiocbe->uaiocb.aio_sigevent.sigev_signo)) {
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiocb_zone, aiocbe);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINVAL);
|
2001-04-18 22:18:39 +00:00
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2005-11-03 05:25:26 +00:00
|
|
|
ksiginfo_init(&aiocbe->ksi);
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Save userspace address of the job info. */
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
aiocbe->uuaiocb = job;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Get the opcode. */
|
|
|
|
if (type != LIO_NOP)
|
1997-10-09 04:14:41 +00:00
|
|
|
aiocbe->uaiocb.aio_lio_opcode = type;
|
|
|
|
opcode = aiocbe->uaiocb.aio_lio_opcode;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2005-11-08 17:43:05 +00:00
|
|
|
/* Fetch the file object for the specified file descriptor. */
|
1997-07-06 02:40:43 +00:00
|
|
|
fd = aiocbe->uaiocb.aio_fildes;
|
2005-11-08 17:43:05 +00:00
|
|
|
switch (opcode) {
|
|
|
|
case LIO_WRITE:
|
|
|
|
error = fget_write(td, fd, &fp);
|
|
|
|
break;
|
|
|
|
case LIO_READ:
|
|
|
|
error = fget_read(td, fd, &fp);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
error = fget(td, fd, &fp);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2005-11-08 17:43:05 +00:00
|
|
|
if (error) {
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiocb_zone, aiocbe);
|
2006-01-26 08:37:02 +00:00
|
|
|
suword(&job->_aiocb_private.error, error);
|
2006-01-06 16:34:22 +00:00
|
|
|
return (error);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2006-03-23 08:46:42 +00:00
|
|
|
|
|
|
|
if (opcode == LIO_SYNC && fp->f_vnode == NULL) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto aqueue_fail;
|
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2006-03-24 00:50:06 +00:00
|
|
|
if (opcode != LIO_SYNC && aiocbe->uaiocb.aio_offset == -1LL) {
|
2002-04-07 07:17:59 +00:00
|
|
|
error = EINVAL;
|
|
|
|
goto aqueue_fail;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2006-01-22 05:59:27 +00:00
|
|
|
|
2006-03-23 08:46:42 +00:00
|
|
|
aiocbe->fd_file = fp;
|
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
mtx_lock(&aio_job_mtx);
|
2006-03-23 08:46:42 +00:00
|
|
|
jid = jobrefid++;
|
|
|
|
aiocbe->seqno = jobseqno++;
|
2006-01-22 05:59:27 +00:00
|
|
|
mtx_unlock(&aio_job_mtx);
|
|
|
|
error = suword(&job->_aiocb_private.kernelinfo, jid);
|
|
|
|
if (error) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto aqueue_fail;
|
|
|
|
}
|
|
|
|
aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid;
|
2004-08-13 17:43:53 +00:00
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
if (opcode == LIO_NOP) {
|
2002-03-31 20:17:56 +00:00
|
|
|
fdrop(fp, td);
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiocb_zone, aiocbe);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2006-03-23 08:46:42 +00:00
|
|
|
if ((opcode != LIO_READ) && (opcode != LIO_WRITE) &&
|
|
|
|
(opcode != LIO_SYNC)) {
|
2002-04-07 07:17:59 +00:00
|
|
|
error = EINVAL;
|
|
|
|
goto aqueue_fail;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
2006-09-24 04:47:47 +00:00
|
|
|
if (aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT)
|
2005-06-04 19:16:33 +00:00
|
|
|
goto no_kqueue;
|
2006-09-24 04:47:47 +00:00
|
|
|
kqfd = aiocbe->uaiocb.aio_sigevent.sigev_notify_kqueue;
|
2002-08-06 19:01:08 +00:00
|
|
|
kev.ident = (uintptr_t)aiocbe->uuaiocb;
|
2000-11-21 19:36:36 +00:00
|
|
|
kev.filter = EVFILT_AIO;
|
|
|
|
kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
|
2002-08-06 19:01:08 +00:00
|
|
|
kev.data = (intptr_t)aiocbe;
|
2006-01-22 05:59:27 +00:00
|
|
|
kev.udata = aiocbe->uaiocb.aio_sigevent.sigev_value.sival_ptr;
|
2006-09-24 04:47:47 +00:00
|
|
|
error = kqfd_register(kqfd, &kev, td, 1);
|
2000-04-16 18:53:38 +00:00
|
|
|
aqueue_fail:
|
2000-11-21 19:36:36 +00:00
|
|
|
if (error) {
|
2002-03-31 20:17:56 +00:00
|
|
|
fdrop(fp, td);
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiocb_zone, aiocbe);
|
2006-01-22 05:59:27 +00:00
|
|
|
suword(&job->_aiocb_private.error, error);
|
2000-11-21 19:36:36 +00:00
|
|
|
goto done;
|
2000-04-16 18:53:38 +00:00
|
|
|
}
|
2000-11-21 19:36:36 +00:00
|
|
|
no_kqueue:
|
2000-04-16 18:53:38 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
suword(&job->_aiocb_private.error, EINPROGRESS);
|
|
|
|
aiocbe->uaiocb._aiocb_private.error = EINPROGRESS;
|
1997-07-06 02:40:43 +00:00
|
|
|
aiocbe->userproc = p;
|
2002-11-07 20:46:37 +00:00
|
|
|
aiocbe->cred = crhold(td->td_ucred);
|
1997-07-06 02:40:43 +00:00
|
|
|
aiocbe->jobflags = 0;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
aiocbe->lio = lj;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2006-03-23 08:46:42 +00:00
|
|
|
if (opcode == LIO_SYNC)
|
|
|
|
goto queueit;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if (fp->f_type == DTYPE_SOCKET) {
|
|
|
|
/*
|
|
|
|
* Alternate queueing for socket ops: Reach down into the
|
|
|
|
* descriptor to get the socket data. Then check to see if the
|
|
|
|
* socket is ready to be read or written (based on the requested
|
|
|
|
* operation).
|
|
|
|
*
|
|
|
|
* If it is not ready for io, then queue the aiocbe on the
|
|
|
|
* socket, and set the flags so we get a call when sbnotify()
|
|
|
|
* happens.
|
2004-10-01 05:54:06 +00:00
|
|
|
*
|
|
|
|
* Note if opcode is neither LIO_WRITE nor LIO_READ we lock
|
|
|
|
* and unlock the snd sockbuf for no reason.
|
2000-01-14 02:53:29 +00:00
|
|
|
*/
|
2003-01-13 00:33:17 +00:00
|
|
|
so = fp->f_data;
|
2004-10-01 05:54:06 +00:00
|
|
|
sb = (opcode == LIO_READ) ? &so->so_rcv : &so->so_snd;
|
|
|
|
SOCKBUF_LOCK(sb);
|
2000-01-14 02:53:29 +00:00
|
|
|
if (((opcode == LIO_READ) && (!soreadable(so))) || ((opcode ==
|
|
|
|
LIO_WRITE) && (!sowriteable(so)))) {
|
2006-01-24 07:24:24 +00:00
|
|
|
sb->sb_flags |= SB_AIO;
|
|
|
|
|
|
|
|
mtx_lock(&aio_job_mtx);
|
2000-01-14 02:53:29 +00:00
|
|
|
TAILQ_INSERT_TAIL(&so->so_aiojobq, aiocbe, list);
|
2006-01-24 07:24:24 +00:00
|
|
|
mtx_unlock(&aio_job_mtx);
|
2006-01-22 05:59:27 +00:00
|
|
|
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_LOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist);
|
2006-01-24 07:24:24 +00:00
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist);
|
2006-01-22 05:59:27 +00:00
|
|
|
aiocbe->jobstate = JOBST_JOBQSOCK;
|
|
|
|
ki->kaio_count++;
|
|
|
|
if (lj)
|
|
|
|
lj->lioj_count++;
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
2004-10-01 05:54:06 +00:00
|
|
|
SOCKBUF_UNLOCK(sb);
|
2006-01-22 05:59:27 +00:00
|
|
|
atomic_add_int(&num_queue_count, 1);
|
2000-11-18 21:01:04 +00:00
|
|
|
error = 0;
|
|
|
|
goto done;
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
2004-10-01 05:54:06 +00:00
|
|
|
SOCKBUF_UNLOCK(sb);
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if ((error = aio_qphysio(p, aiocbe)) == 0)
|
2000-11-18 21:01:04 +00:00
|
|
|
goto done;
|
2006-01-22 05:59:27 +00:00
|
|
|
#if 0
|
2000-11-18 21:01:04 +00:00
|
|
|
if (error > 0) {
|
1997-11-29 01:33:10 +00:00
|
|
|
aiocbe->uaiocb._aiocb_private.error = error;
|
|
|
|
suword(&job->_aiocb_private.error, error);
|
2000-11-18 21:01:04 +00:00
|
|
|
goto done;
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
2006-01-22 05:59:27 +00:00
|
|
|
#endif
|
2006-03-23 08:46:42 +00:00
|
|
|
queueit:
|
2000-01-14 02:53:29 +00:00
|
|
|
/* No buffer for daemon I/O. */
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
aiocbe->bp = NULL;
|
2006-03-23 08:46:42 +00:00
|
|
|
atomic_add_int(&num_queue_count, 1);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_LOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
ki->kaio_count++;
|
2000-01-14 02:53:29 +00:00
|
|
|
if (lj)
|
2006-01-22 05:59:27 +00:00
|
|
|
lj->lioj_count++;
|
1997-11-29 01:33:10 +00:00
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist);
|
2006-01-22 05:59:27 +00:00
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist);
|
2006-03-23 08:46:42 +00:00
|
|
|
if (opcode == LIO_SYNC) {
|
2006-03-24 00:50:06 +00:00
|
|
|
TAILQ_FOREACH(cb, &ki->kaio_jobqueue, plist) {
|
|
|
|
if (cb->fd_file == aiocbe->fd_file &&
|
|
|
|
cb->uaiocb.aio_lio_opcode != LIO_SYNC &&
|
|
|
|
cb->seqno < aiocbe->seqno) {
|
|
|
|
cb->jobflags |= AIOCBLIST_CHECKSYNC;
|
|
|
|
aiocbe->pending++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
TAILQ_FOREACH(cb, &ki->kaio_bufqueue, plist) {
|
|
|
|
if (cb->fd_file == aiocbe->fd_file &&
|
|
|
|
cb->uaiocb.aio_lio_opcode != LIO_SYNC &&
|
|
|
|
cb->seqno < aiocbe->seqno) {
|
|
|
|
cb->jobflags |= AIOCBLIST_CHECKSYNC;
|
|
|
|
aiocbe->pending++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (aiocbe->pending != 0) {
|
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, aiocbe, list);
|
|
|
|
aiocbe->jobstate = JOBST_JOBQSYNC;
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
2006-03-24 00:50:06 +00:00
|
|
|
goto done;
|
|
|
|
}
|
2006-03-23 08:46:42 +00:00
|
|
|
}
|
2006-03-24 00:50:06 +00:00
|
|
|
mtx_lock(&aio_job_mtx);
|
|
|
|
TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list);
|
|
|
|
aiocbe->jobstate = JOBST_JOBQGLOBAL;
|
|
|
|
aio_kick_nowait(p);
|
|
|
|
mtx_unlock(&aio_job_mtx);
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
2006-03-23 08:46:42 +00:00
|
|
|
error = 0;
|
|
|
|
done:
|
|
|
|
return (error);
|
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2006-03-23 08:46:42 +00:00
|
|
|
static void
|
|
|
|
aio_kick_nowait(struct proc *userp)
|
|
|
|
{
|
|
|
|
struct kaioinfo *ki = userp->p_aioinfo;
|
|
|
|
struct aiothreadlist *aiop;
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2006-03-23 08:46:42 +00:00
|
|
|
mtx_assert(&aio_job_mtx, MA_OWNED);
|
|
|
|
if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
|
|
|
|
TAILQ_REMOVE(&aio_freeproc, aiop, list);
|
|
|
|
aiop->aiothreadflags &= ~AIOP_FREE;
|
|
|
|
wakeup(aiop->aiothread);
|
2006-03-24 00:50:06 +00:00
|
|
|
} else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) &&
|
|
|
|
((ki->kaio_active_count + num_aio_resv_start) <
|
|
|
|
ki->kaio_maxactive_count)) {
|
2006-03-23 08:46:42 +00:00
|
|
|
taskqueue_enqueue(taskqueue_aiod_bio, &ki->kaio_task);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-03-24 00:50:06 +00:00
|
|
|
static int
|
2006-03-23 08:46:42 +00:00
|
|
|
aio_kick(struct proc *userp)
|
|
|
|
{
|
|
|
|
struct kaioinfo *ki = userp->p_aioinfo;
|
|
|
|
struct aiothreadlist *aiop;
|
2006-03-24 00:50:06 +00:00
|
|
|
int error, ret = 0;
|
2006-03-23 08:46:42 +00:00
|
|
|
|
|
|
|
mtx_assert(&aio_job_mtx, MA_OWNED);
|
2002-01-14 07:26:33 +00:00
|
|
|
retryproc:
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
|
1997-07-06 02:40:43 +00:00
|
|
|
TAILQ_REMOVE(&aio_freeproc, aiop, list);
|
2001-09-12 08:38:13 +00:00
|
|
|
aiop->aiothreadflags &= ~AIOP_FREE;
|
|
|
|
wakeup(aiop->aiothread);
|
1997-11-29 01:33:10 +00:00
|
|
|
} else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) &&
|
2000-01-14 02:53:29 +00:00
|
|
|
((ki->kaio_active_count + num_aio_resv_start) <
|
|
|
|
ki->kaio_maxactive_count)) {
|
1997-11-29 01:33:10 +00:00
|
|
|
num_aio_resv_start++;
|
2006-01-22 05:59:27 +00:00
|
|
|
mtx_unlock(&aio_job_mtx);
|
|
|
|
error = aio_newproc(&num_aio_resv_start);
|
|
|
|
mtx_lock(&aio_job_mtx);
|
|
|
|
if (error) {
|
|
|
|
num_aio_resv_start--;
|
2005-11-08 17:43:05 +00:00
|
|
|
goto retryproc;
|
2006-01-22 05:59:27 +00:00
|
|
|
}
|
2006-03-24 00:50:06 +00:00
|
|
|
} else {
|
|
|
|
ret = -1;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2006-03-24 00:50:06 +00:00
|
|
|
return (ret);
|
2006-03-23 08:46:42 +00:00
|
|
|
}
|
2006-01-22 05:59:27 +00:00
|
|
|
|
2006-03-23 08:46:42 +00:00
|
|
|
static void
|
|
|
|
aio_kick_helper(void *context, int pending)
|
|
|
|
{
|
|
|
|
struct proc *userp = context;
|
|
|
|
|
|
|
|
mtx_lock(&aio_job_mtx);
|
2006-03-24 00:50:06 +00:00
|
|
|
while (--pending >= 0) {
|
|
|
|
if (aio_kick(userp))
|
|
|
|
break;
|
|
|
|
}
|
2006-03-23 08:46:42 +00:00
|
|
|
mtx_unlock(&aio_job_mtx);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Support the aio_return system call, as a side-effect, kernel resources are
|
|
|
|
* released.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
aio_return(struct thread *td, struct aio_return_args *uap)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
2006-01-22 05:59:27 +00:00
|
|
|
struct aiocblist *cb;
|
|
|
|
struct aiocb *uaiocb;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct kaioinfo *ki;
|
2006-01-22 05:59:27 +00:00
|
|
|
int status, error;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2002-04-08 04:57:56 +00:00
|
|
|
ki = p->p_aioinfo;
|
|
|
|
if (ki == NULL)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINVAL);
|
2006-01-22 05:59:27 +00:00
|
|
|
uaiocb = uap->aiocbp;
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_LOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
TAILQ_FOREACH(cb, &ki->kaio_done, plist) {
|
|
|
|
if (cb->uuaiocb == uaiocb)
|
2002-04-08 04:57:56 +00:00
|
|
|
break;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
2002-04-08 04:57:56 +00:00
|
|
|
if (cb != NULL) {
|
2006-01-22 05:59:27 +00:00
|
|
|
MPASS(cb->jobstate == JOBST_JOBFINISHED);
|
|
|
|
status = cb->uaiocb._aiocb_private.status;
|
|
|
|
error = cb->uaiocb._aiocb_private.error;
|
|
|
|
td->td_retval[0] = status;
|
2005-10-12 17:51:31 +00:00
|
|
|
if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
|
2007-06-01 01:12:45 +00:00
|
|
|
td->td_ru.ru_oublock += cb->outputcharge;
|
2005-10-12 17:51:31 +00:00
|
|
|
cb->outputcharge = 0;
|
|
|
|
} else if (cb->uaiocb.aio_lio_opcode == LIO_READ) {
|
2007-06-01 01:12:45 +00:00
|
|
|
td->td_ru.ru_inblock += cb->inputcharge;
|
2005-10-12 17:51:31 +00:00
|
|
|
cb->inputcharge = 0;
|
|
|
|
}
|
2002-04-08 04:57:56 +00:00
|
|
|
aio_free_entry(cb);
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
suword(&uaiocb->_aiocb_private.error, error);
|
|
|
|
suword(&uaiocb->_aiocb_private.status, status);
|
2006-01-26 08:37:02 +00:00
|
|
|
} else {
|
2006-01-22 05:59:27 +00:00
|
|
|
error = EINVAL;
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
2006-01-26 08:37:02 +00:00
|
|
|
}
|
2006-01-22 05:59:27 +00:00
|
|
|
return (error);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Allow a process to wakeup when any of the I/O requests are completed.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
aio_suspend(struct thread *td, struct aio_suspend_args *uap)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
1997-11-07 08:53:44 +00:00
|
|
|
struct timeval atv;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct timespec ts;
|
|
|
|
struct aiocb *const *cbptr, *cbp;
|
|
|
|
struct kaioinfo *ki;
|
2006-01-22 05:59:27 +00:00
|
|
|
struct aiocblist *cb, *cbfirst;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
struct aiocb **ujoblist;
|
2006-01-22 05:59:27 +00:00
|
|
|
int njoblist;
|
|
|
|
int error;
|
|
|
|
int timo;
|
|
|
|
int i;
|
2004-08-13 17:43:53 +00:00
|
|
|
|
2003-01-12 09:40:23 +00:00
|
|
|
if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINVAL);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
timo = 0;
|
|
|
|
if (uap->timeout) {
|
2000-01-14 02:53:29 +00:00
|
|
|
/* Get timespec struct. */
|
|
|
|
if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (error);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000)
|
|
|
|
return (EINVAL);
|
|
|
|
|
1998-12-15 17:38:33 +00:00
|
|
|
TIMESPEC_TO_TIMEVAL(&atv, &ts);
|
1997-07-06 02:40:43 +00:00
|
|
|
if (itimerfix(&atv))
|
|
|
|
return (EINVAL);
|
1998-03-30 09:56:58 +00:00
|
|
|
timo = tvtohz(&atv);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ki = p->p_aioinfo;
|
|
|
|
if (ki == NULL)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EAGAIN);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
njoblist = 0;
|
2003-02-19 05:47:46 +00:00
|
|
|
ujoblist = uma_zalloc(aiol_zone, M_WAITOK);
|
1997-07-06 02:40:43 +00:00
|
|
|
cbptr = uap->aiocbp;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
for (i = 0; i < uap->nent; i++) {
|
2002-05-25 18:39:42 +00:00
|
|
|
cbp = (struct aiocb *)(intptr_t)fuword(&cbptr[i]);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
if (cbp == 0)
|
|
|
|
continue;
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
ujoblist[njoblist] = cbp;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
njoblist++;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
if (njoblist == 0) {
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiol_zone, ujoblist);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
Fix error handling for VCHR type I/O. Also, fix another spl problem, and
remove alot of overly verbose debugging statements.
ioproclist {
int aioprocflags; /* AIO proc flags */
TAILQ_ENTRY(aioproclist) list; /* List of processes */
struct proc *aioproc; /* The AIO thread */
TAILQ_HEAD (,aiocblist) jobtorun; /* suggested job to run */
};
/*
* data-structure for lio signal management
*/
struct aio_liojob {
int lioj_flags;
int lioj_buffer_count;
int lioj_buffer_finished_count;
int lioj_queue_count;
int lioj_queue_finished_count;
struct sigevent lioj_signal; /* signal on all I/O done */
TAILQ_ENTRY (aio_liojob) lioj_list;
struct kaioinfo *lioj_ki;
};
#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
/*
* per process aio data structure
*/
struct kaioinfo {
int kaio_flags; /* per process kaio flags */
int kaio_maxactive_count; /* maximum number of AIOs */
int kaio_active_count; /* number of currently used AIOs */
int kaio_qallowed_count; /* maxiumu size of AIO queue */
int kaio_queue_count; /* size of AIO queue */
int kaio_ballowed_count; /* maximum number of buffers */
int kaio_queue_finished_count; /* number of daemon jobs finished */
int kaio_buffer_count; /* number of physio buffers */
int kaio_buffer_finished_count; /* count of I/O done */
struct proc *kaio_p; /* process that uses this kaio block */
TAILQ_HEAD (,aio_liojob) kaio_liojoblist; /* list of lio jobs */
TAILQ_HEAD (,aiocblist) kaio_jobqueue; /* job queue for process */
TAILQ_HEAD (,aiocblist) kaio_jobdone; /* done queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufqueue; /* buffer job queue for process */
TAILQ_HEAD (,aiocblist) kaio_bufdone; /* buffer done queue for process */
};
#define KAIO_RUNDOWN 0x1 /* process is being run down */
#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant
event */
TAILQ_HEAD (,aioproclist) aio_freeproc, aio_activeproc;
TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
TAILQ_HEAD(,aiocblist) aio_freejobs; /* Pool of free jobs */
static void aio_init_aioinfo(struct proc *p) ;
static void aio_onceonly(void *) ;
static int aio_free_entry(struct aiocblist *aiocbe);
static void aio_process(struct aiocblist *aiocbe);
static int aio_newproc(void) ;
static int aio_aqueue(struct proc *p, struct aiocb *job, int type) ;
static void aio_physwakeup(struct buf *bp);
static int aio_fphysio(struct proc *p, struct aiocblist *aiocbe, int type);
static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
static void aio_daemon(void *uproc);
SYSINIT(aio, SI_SUB_VFS, SI_ORDER_ANY, aio_onceonly, NULL);
static vm_zone_t kaio_zone=0, aiop_zone=0,
aiocb_zone=0, aiol_zone=0, aiolio_zone=0;
/*
* Single AIOD vmspace shared amongst all of them
*/
static struct vmspace *aiovmspace = NULL;
/*
* Startup initialization
*/
void
aio_onceonly(void *na)
{
TAILQ_INIT(&aio_freeproc);
TAILQ_INIT(&aio_activeproc);
TAILQ_INIT(&aio_jobs);
TAILQ_INIT(&aio_bufjobs);
TAILQ_INIT(&aio_freejobs);
kaio_zone = zinit("AIO", sizeof (struct kaioinfo), 0, 0, 1);
aiop_zone = zinit("AIOP", sizeof (struct aioproclist), 0, 0, 1);
aiocb_zone = zinit("AIOCB", sizeof (struct aiocblist), 0, 0, 1);
aiol_zone = zinit("AIOL", AIO_LISTIO_MAX * sizeof (int), 0, 0, 1);
aiolio_zone = zinit("AIOLIO",
AIO_LISTIO_MAX * sizeof (struct aio_liojob), 0, 0, 1);
aiod_timeout = AIOD_TIMEOUT_DEFAULT;
aiod_lifetime = AIOD_LIFETIME_DEFAULT;
jobrefid = 1;
}
/*
* Init the per-process aioinfo structure.
* The aioinfo limits are set per-process for user limit (resource) management.
*/
void
aio_init_aioinfo(struct proc *p)
{
struct kaioinfo *ki;
if (p->p_aioinfo == NULL) {
ki = zalloc(kaio_zone);
p->p_aioinfo = ki
1997-12-01 07:01:45 +00:00
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_LOCK(ki);
|
2000-01-14 02:53:29 +00:00
|
|
|
for (;;) {
|
2006-01-22 05:59:27 +00:00
|
|
|
cbfirst = NULL;
|
|
|
|
error = 0;
|
|
|
|
TAILQ_FOREACH(cb, &ki->kaio_all, allist) {
|
2000-01-14 02:53:29 +00:00
|
|
|
for (i = 0; i < njoblist; i++) {
|
2006-01-22 05:59:27 +00:00
|
|
|
if (cb->uuaiocb == ujoblist[i]) {
|
|
|
|
if (cbfirst == NULL)
|
|
|
|
cbfirst = cb;
|
|
|
|
if (cb->jobstate == JOBST_JOBFINISHED)
|
|
|
|
goto RETURN;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2006-01-22 05:59:27 +00:00
|
|
|
/* All tasks were finished. */
|
|
|
|
if (cbfirst == NULL)
|
|
|
|
break;
|
1997-07-06 02:40:43 +00:00
|
|
|
|
1997-11-29 01:33:10 +00:00
|
|
|
ki->kaio_flags |= KAIO_WAKEUP;
|
2006-05-09 00:10:11 +00:00
|
|
|
error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
|
2006-01-22 05:59:27 +00:00
|
|
|
"aiospn", timo);
|
|
|
|
if (error == ERESTART)
|
|
|
|
error = EINTR;
|
|
|
|
if (error)
|
|
|
|
break;
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
2006-01-22 05:59:27 +00:00
|
|
|
RETURN:
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
uma_zfree(aiol_zone, ujoblist);
|
|
|
|
return (error);
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
1997-06-16 00:27:26 +00:00
|
|
|
|
|
|
|
/*
|
2000-02-23 07:44:25 +00:00
|
|
|
* aio_cancel cancels any non-physio aio operations not currently in
|
|
|
|
* progress.
|
1997-06-16 00:27:26 +00:00
|
|
|
*/
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
aio_cancel(struct thread *td, struct aio_cancel_args *uap)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
2000-02-23 07:44:25 +00:00
|
|
|
struct kaioinfo *ki;
|
|
|
|
struct aiocblist *cbe, *cbn;
|
|
|
|
struct file *fp;
|
|
|
|
struct socket *so;
|
2006-01-22 05:59:27 +00:00
|
|
|
int error;
|
2006-01-24 07:24:24 +00:00
|
|
|
int remove;
|
2006-01-22 05:59:27 +00:00
|
|
|
int cancelled = 0;
|
|
|
|
int notcancelled = 0;
|
2000-02-23 07:44:25 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
|
2005-11-08 17:43:05 +00:00
|
|
|
/* Lookup file object. */
|
2006-01-22 05:59:27 +00:00
|
|
|
error = fget(td, uap->fd, &fp);
|
2005-11-08 17:43:05 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2000-02-23 07:44:25 +00:00
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
ki = p->p_aioinfo;
|
|
|
|
if (ki == NULL)
|
|
|
|
goto done;
|
|
|
|
|
2004-08-13 17:43:53 +00:00
|
|
|
if (fp->f_type == DTYPE_VNODE) {
|
2003-06-22 08:41:43 +00:00
|
|
|
vp = fp->f_vnode;
|
2006-01-22 05:59:27 +00:00
|
|
|
if (vn_isdisk(vp, &error)) {
|
2005-11-08 17:43:05 +00:00
|
|
|
fdrop(fp, td);
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = AIO_NOTCANCELED;
|
2004-08-13 17:43:53 +00:00
|
|
|
return (0);
|
2000-02-23 07:44:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_LOCK(ki);
|
2005-11-08 17:43:05 +00:00
|
|
|
TAILQ_FOREACH_SAFE(cbe, &ki->kaio_jobqueue, plist, cbn) {
|
2000-02-23 07:44:25 +00:00
|
|
|
if ((uap->fd == cbe->uaiocb.aio_fildes) &&
|
2006-01-22 05:59:27 +00:00
|
|
|
((uap->aiocbp == NULL) ||
|
2000-02-23 07:44:25 +00:00
|
|
|
(uap->aiocbp == cbe->uuaiocb))) {
|
2006-01-24 07:24:24 +00:00
|
|
|
remove = 0;
|
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
mtx_lock(&aio_job_mtx);
|
2000-02-23 07:44:25 +00:00
|
|
|
if (cbe->jobstate == JOBST_JOBQGLOBAL) {
|
|
|
|
TAILQ_REMOVE(&aio_jobs, cbe, list);
|
2006-01-24 07:24:24 +00:00
|
|
|
remove = 1;
|
|
|
|
} else if (cbe->jobstate == JOBST_JOBQSOCK) {
|
|
|
|
MPASS(fp->f_type == DTYPE_SOCKET);
|
|
|
|
so = fp->f_data;
|
|
|
|
TAILQ_REMOVE(&so->so_aiojobq, cbe, list);
|
|
|
|
remove = 1;
|
2006-03-23 08:46:42 +00:00
|
|
|
} else if (cbe->jobstate == JOBST_JOBQSYNC) {
|
|
|
|
TAILQ_REMOVE(&ki->kaio_syncqueue, cbe, list);
|
|
|
|
remove = 1;
|
2006-01-24 07:24:24 +00:00
|
|
|
}
|
|
|
|
mtx_unlock(&aio_job_mtx);
|
|
|
|
|
|
|
|
if (remove) {
|
2006-01-22 05:59:27 +00:00
|
|
|
TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist);
|
2000-02-23 07:44:25 +00:00
|
|
|
cbe->uaiocb._aiocb_private.status = -1;
|
|
|
|
cbe->uaiocb._aiocb_private.error = ECANCELED;
|
2006-01-22 05:59:27 +00:00
|
|
|
aio_bio_done_notify(p, cbe, DONE_QUEUE);
|
|
|
|
cancelled++;
|
2000-02-23 07:44:25 +00:00
|
|
|
} else {
|
|
|
|
notcancelled++;
|
|
|
|
}
|
2006-01-24 07:24:24 +00:00
|
|
|
if (uap->aiocbp != NULL)
|
|
|
|
break;
|
2000-02-23 07:44:25 +00:00
|
|
|
}
|
|
|
|
}
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
|
2002-08-11 19:04:17 +00:00
|
|
|
done:
|
2005-11-08 17:43:05 +00:00
|
|
|
fdrop(fp, td);
|
2006-01-24 07:24:24 +00:00
|
|
|
|
|
|
|
if (uap->aiocbp != NULL) {
|
|
|
|
if (cancelled) {
|
|
|
|
td->td_retval[0] = AIO_CANCELED;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-02-23 07:44:25 +00:00
|
|
|
if (notcancelled) {
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = AIO_NOTCANCELED;
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
2000-02-23 07:44:25 +00:00
|
|
|
}
|
2006-01-24 07:24:24 +00:00
|
|
|
|
2000-02-23 07:44:25 +00:00
|
|
|
if (cancelled) {
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = AIO_CANCELED;
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
2000-02-23 07:44:25 +00:00
|
|
|
}
|
2006-01-24 07:24:24 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = AIO_ALLDONE;
|
2000-02-23 07:44:25 +00:00
|
|
|
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
1997-06-16 00:27:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2007-03-05 13:10:58 +00:00
|
|
|
* aio_error is implemented in the kernel level for compatibility purposes
|
|
|
|
* only. For a user mode async implementation, it would be best to do it in
|
|
|
|
* a userland subroutine.
|
1997-06-16 00:27:26 +00:00
|
|
|
*/
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
aio_error(struct thread *td, struct aio_error_args *uap)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct aiocblist *cb;
|
|
|
|
struct kaioinfo *ki;
|
2006-01-22 05:59:27 +00:00
|
|
|
int status;
|
1997-06-16 00:27:26 +00:00
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
ki = p->p_aioinfo;
|
2006-01-22 05:59:27 +00:00
|
|
|
if (ki == NULL) {
|
|
|
|
td->td_retval[0] = EINVAL;
|
|
|
|
return (0);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
|
|
|
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_LOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
TAILQ_FOREACH(cb, &ki->kaio_all, allist) {
|
|
|
|
if (cb->uuaiocb == uap->aiocbp) {
|
|
|
|
if (cb->jobstate == JOBST_JOBFINISHED)
|
|
|
|
td->td_retval[0] =
|
|
|
|
cb->uaiocb._aiocb_private.error;
|
|
|
|
else
|
|
|
|
td->td_retval[0] = EINPROGRESS;
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (0);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
}
|
|
|
|
}
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
1997-07-06 02:40:43 +00:00
|
|
|
/*
|
2006-01-23 02:49:34 +00:00
|
|
|
* Hack for failure of aio_aqueue.
|
1997-07-06 02:40:43 +00:00
|
|
|
*/
|
|
|
|
status = fuword(&uap->aiocbp->_aiocb_private.status);
|
2006-01-22 05:59:27 +00:00
|
|
|
if (status == -1) {
|
|
|
|
td->td_retval[0] = fuword(&uap->aiocbp->_aiocb_private.error);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
td->td_retval[0] = EINVAL;
|
|
|
|
return (0);
|
1997-06-16 00:27:26 +00:00
|
|
|
}
|
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/* syscall - asynchronous read from a file (REALTIME) */
|
2005-10-30 02:12:49 +00:00
|
|
|
int
|
|
|
|
oaio_read(struct thread *td, struct oaio_read_args *uap)
|
|
|
|
{
|
|
|
|
|
2006-03-24 00:50:06 +00:00
|
|
|
return aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ, 1);
|
2005-10-30 02:12:49 +00:00
|
|
|
}
|
|
|
|
|
1997-06-16 00:27:26 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
aio_read(struct thread *td, struct aio_read_args *uap)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2001-12-29 07:13:47 +00:00
|
|
|
|
2006-03-24 00:50:06 +00:00
|
|
|
return aio_aqueue(td, uap->aiocbp, NULL, LIO_READ, 0);
|
1997-06-16 00:27:26 +00:00
|
|
|
}
|
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/* syscall - asynchronous write to a file (REALTIME) */
|
2005-10-30 02:12:49 +00:00
|
|
|
int
|
|
|
|
oaio_write(struct thread *td, struct oaio_write_args *uap)
|
|
|
|
{
|
|
|
|
|
2006-03-24 00:50:06 +00:00
|
|
|
return aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE, 1);
|
2005-10-30 02:12:49 +00:00
|
|
|
}
|
|
|
|
|
1997-06-16 00:27:26 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
aio_write(struct thread *td, struct aio_write_args *uap)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2001-12-29 07:13:47 +00:00
|
|
|
|
2006-03-24 00:50:06 +00:00
|
|
|
return aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITE, 0);
|
2005-10-30 02:12:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* syscall - list directed I/O (REALTIME) */
|
|
|
|
int
|
|
|
|
olio_listio(struct thread *td, struct olio_listio_args *uap)
|
|
|
|
{
|
|
|
|
return do_lio_listio(td, (struct lio_listio_args *)uap, 1);
|
1997-06-16 00:27:26 +00:00
|
|
|
}
|
|
|
|
|
2003-01-12 09:33:16 +00:00
|
|
|
/* syscall - list directed I/O (REALTIME) */
|
1997-06-16 00:27:26 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
lio_listio(struct thread *td, struct lio_listio_args *uap)
|
2005-10-30 02:12:49 +00:00
|
|
|
{
|
|
|
|
return do_lio_listio(td, uap, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
do_lio_listio(struct thread *td, struct lio_listio_args *uap, int oldsigev)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
1997-07-06 02:40:43 +00:00
|
|
|
struct aiocb *iocb, * const *cbptr;
|
|
|
|
struct kaioinfo *ki;
|
2006-01-22 05:59:27 +00:00
|
|
|
struct aioliojob *lj;
|
2005-10-12 17:51:31 +00:00
|
|
|
struct kevent kev;
|
2006-01-22 05:59:27 +00:00
|
|
|
int nent;
|
|
|
|
int error;
|
1997-11-29 01:33:10 +00:00
|
|
|
int nerror;
|
1997-06-16 00:27:26 +00:00
|
|
|
int i;
|
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINVAL);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
|
|
|
nent = uap->nent;
|
2003-01-12 09:40:23 +00:00
|
|
|
if (nent < 0 || nent > AIO_LISTIO_MAX)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (EINVAL);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
if (p->p_aioinfo == NULL)
|
1997-07-06 02:40:43 +00:00
|
|
|
aio_init_aioinfo(p);
|
|
|
|
|
|
|
|
ki = p->p_aioinfo;
|
|
|
|
|
2003-02-19 05:47:46 +00:00
|
|
|
lj = uma_zalloc(aiolio_zone, M_WAITOK);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
lj->lioj_flags = 0;
|
2006-01-22 05:59:27 +00:00
|
|
|
lj->lioj_count = 0;
|
|
|
|
lj->lioj_finished_count = 0;
|
2006-05-09 00:10:11 +00:00
|
|
|
knlist_init(&lj->klist, AIO_MTX(ki), NULL, NULL, NULL);
|
2005-11-03 05:25:26 +00:00
|
|
|
ksiginfo_init(&lj->lioj_ksi);
|
2005-10-12 17:51:31 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
/*
|
2000-01-14 02:53:29 +00:00
|
|
|
* Setup signal.
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
*/
|
|
|
|
if (uap->sig && (uap->mode == LIO_NOWAIT)) {
|
2005-10-30 02:12:49 +00:00
|
|
|
bzero(&lj->lioj_signal, sizeof(&lj->lioj_signal));
|
2000-01-14 02:53:29 +00:00
|
|
|
error = copyin(uap->sig, &lj->lioj_signal,
|
2005-10-30 02:12:49 +00:00
|
|
|
oldsigev ? sizeof(struct osigevent) :
|
|
|
|
sizeof(struct sigevent));
|
2001-04-18 22:18:39 +00:00
|
|
|
if (error) {
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiolio_zone, lj);
|
2003-01-13 15:06:05 +00:00
|
|
|
return (error);
|
2001-04-18 22:18:39 +00:00
|
|
|
}
|
2005-10-12 17:51:31 +00:00
|
|
|
|
|
|
|
if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
|
|
|
|
/* Assume only new style KEVENT */
|
|
|
|
kev.filter = EVFILT_LIO;
|
|
|
|
kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
|
2006-06-02 17:45:48 +00:00
|
|
|
kev.ident = (uintptr_t)uap->acb_list; /* something unique */
|
2005-10-12 17:51:31 +00:00
|
|
|
kev.data = (intptr_t)lj;
|
2006-01-22 05:59:27 +00:00
|
|
|
/* pass user defined sigval data */
|
|
|
|
kev.udata = lj->lioj_signal.sigev_value.sival_ptr;
|
2006-09-24 04:47:47 +00:00
|
|
|
error = kqfd_register(
|
|
|
|
lj->lioj_signal.sigev_notify_kqueue, &kev, td, 1);
|
2005-10-12 17:51:31 +00:00
|
|
|
if (error) {
|
|
|
|
uma_zfree(aiolio_zone, lj);
|
|
|
|
return (error);
|
|
|
|
}
|
2006-01-22 05:59:27 +00:00
|
|
|
} else if (lj->lioj_signal.sigev_notify == SIGEV_NONE) {
|
|
|
|
;
|
2006-01-23 10:27:15 +00:00
|
|
|
} else if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
|
|
|
|
lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID) {
|
|
|
|
if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) {
|
|
|
|
uma_zfree(aiolio_zone, lj);
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
lj->lioj_flags |= LIOJ_SIGNAL;
|
|
|
|
} else {
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(aiolio_zone, lj);
|
2005-10-12 17:51:31 +00:00
|
|
|
return EINVAL;
|
2001-04-18 22:18:39 +00:00
|
|
|
}
|
2006-01-22 05:59:27 +00:00
|
|
|
}
|
2005-10-12 17:51:31 +00:00
|
|
|
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_LOCK(ki);
|
2001-04-18 22:18:39 +00:00
|
|
|
TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list);
|
2006-01-22 05:59:27 +00:00
|
|
|
/*
|
|
|
|
* Add extra aiocb count to avoid the lio to be freed
|
|
|
|
* by other threads doing aio_waitcomplete or aio_return,
|
|
|
|
* and prevent event from being sent until we have queued
|
|
|
|
* all tasks.
|
|
|
|
*/
|
|
|
|
lj->lioj_count = 1;
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
/*
|
|
|
|
* Get pointers to the list of I/O requests.
|
|
|
|
*/
|
1997-11-29 01:33:10 +00:00
|
|
|
nerror = 0;
|
1997-07-06 02:40:43 +00:00
|
|
|
cbptr = uap->acb_list;
|
2000-01-14 02:53:29 +00:00
|
|
|
for (i = 0; i < uap->nent; i++) {
|
2002-05-25 18:39:42 +00:00
|
|
|
iocb = (struct aiocb *)(intptr_t)fuword(&cbptr[i]);
|
2002-08-22 21:24:01 +00:00
|
|
|
if (((intptr_t)iocb != -1) && ((intptr_t)iocb != 0)) {
|
2006-03-24 00:50:06 +00:00
|
|
|
error = aio_aqueue(td, iocb, lj, LIO_NOP, oldsigev);
|
2006-01-22 05:59:27 +00:00
|
|
|
if (error != 0)
|
1997-11-29 01:33:10 +00:00
|
|
|
nerror++;
|
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
error = 0;
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_LOCK(ki);
|
1997-07-06 02:40:43 +00:00
|
|
|
if (uap->mode == LIO_WAIT) {
|
2006-01-22 05:59:27 +00:00
|
|
|
while (lj->lioj_count - 1 != lj->lioj_finished_count) {
|
1997-11-29 01:33:10 +00:00
|
|
|
ki->kaio_flags |= KAIO_WAKEUP;
|
2006-05-09 00:10:11 +00:00
|
|
|
error = msleep(&p->p_aioinfo, AIO_MTX(ki),
|
2006-01-22 05:59:27 +00:00
|
|
|
PRIBIO | PCATCH, "aiospn", 0);
|
|
|
|
if (error == ERESTART)
|
|
|
|
error = EINTR;
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (lj->lioj_count - 1 == lj->lioj_finished_count) {
|
|
|
|
if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) {
|
|
|
|
lj->lioj_flags |= LIOJ_KEVENT_POSTED;
|
|
|
|
KNOTE_LOCKED(&lj->klist, 1);
|
|
|
|
}
|
|
|
|
if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED))
|
|
|
|
== LIOJ_SIGNAL
|
|
|
|
&& (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL ||
|
|
|
|
lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) {
|
|
|
|
aio_sendsig(p, &lj->lioj_signal,
|
|
|
|
&lj->lioj_ksi);
|
|
|
|
lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
|
|
|
|
}
|
1997-07-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
}
|
2006-01-22 05:59:27 +00:00
|
|
|
lj->lioj_count--;
|
|
|
|
if (lj->lioj_count == 0) {
|
|
|
|
TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
|
|
|
|
knlist_delete(&lj->klist, curthread, 1);
|
2006-05-09 00:10:11 +00:00
|
|
|
PROC_LOCK(p);
|
2006-01-22 05:59:27 +00:00
|
|
|
sigqueue_take(&lj->lioj_ksi);
|
|
|
|
PROC_UNLOCK(p);
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
uma_zfree(aiolio_zone, lj);
|
|
|
|
} else
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
1997-07-06 02:40:43 +00:00
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
if (nerror)
|
|
|
|
return (EIO);
|
|
|
|
return (error);
|
1997-06-16 00:27:26 +00:00
|
|
|
}
|
1997-11-29 01:33:10 +00:00
|
|
|
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
/*
|
2006-01-22 05:59:27 +00:00
|
|
|
* Called from interrupt thread for physio, we should return as fast
|
|
|
|
* as possible, so we schedule a biohelper task.
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
*/
|
1997-11-29 01:33:10 +00:00
|
|
|
static void
|
2000-01-14 02:53:29 +00:00
|
|
|
aio_physwakeup(struct buf *bp)
|
1997-11-29 01:33:10 +00:00
|
|
|
{
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
struct aiocblist *aiocbe;
|
|
|
|
|
2003-09-10 15:48:51 +00:00
|
|
|
aiocbe = (struct aiocblist *)bp->b_caller1;
|
2006-01-22 05:59:27 +00:00
|
|
|
taskqueue_enqueue(taskqueue_aiod_bio, &aiocbe->biotask);
|
|
|
|
}
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
/*
|
|
|
|
* Task routine to perform heavy tasks, process wakeup, and signals.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
biohelper(void *context, int pending)
|
|
|
|
{
|
|
|
|
struct aiocblist *aiocbe = context;
|
|
|
|
struct buf *bp;
|
|
|
|
struct proc *userp;
|
2006-02-26 12:56:23 +00:00
|
|
|
struct kaioinfo *ki;
|
2006-01-22 05:59:27 +00:00
|
|
|
int nblks;
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
bp = aiocbe->bp;
|
|
|
|
userp = aiocbe->userproc;
|
2006-02-26 12:56:23 +00:00
|
|
|
ki = userp->p_aioinfo;
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_LOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
aiocbe->uaiocb._aiocb_private.status -= bp->b_resid;
|
|
|
|
aiocbe->uaiocb._aiocb_private.error = 0;
|
|
|
|
if (bp->b_ioflags & BIO_ERROR)
|
|
|
|
aiocbe->uaiocb._aiocb_private.error = bp->b_error;
|
|
|
|
nblks = btodb(aiocbe->uaiocb.aio_nbytes);
|
|
|
|
if (aiocbe->uaiocb.aio_lio_opcode == LIO_WRITE)
|
|
|
|
aiocbe->outputcharge += nblks;
|
|
|
|
else
|
|
|
|
aiocbe->inputcharge += nblks;
|
|
|
|
aiocbe->bp = NULL;
|
|
|
|
TAILQ_REMOVE(&userp->p_aioinfo->kaio_bufqueue, aiocbe, plist);
|
2006-02-26 12:56:23 +00:00
|
|
|
ki->kaio_buffer_count--;
|
2006-01-22 05:59:27 +00:00
|
|
|
aio_bio_done_notify(userp, aiocbe, DONE_BUF);
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
Finish up the vast majority of the AIO/LIO functionality. Proper signal
support was missing in the previous version of the AIO code. More
tunables added, and very efficient support for VCHR files has been added.
Kernel threads are not used for VCHR files, all work for such files is
done for the requesting process directly. Some attempt has been made to
charge the requesting process for resource utilization, but more work
is needed. aio_fsync is still missing (but the original fsync system
call can be used for now.) aio_cancel is essentially a noop, but that
is okay per POSIX. More aio_cancel functionality can be added later,
if it is found to be needed.
The functions implemented include:
aio_read, aio_write, lio_listio, aio_error, aio_return,
aio_cancel, aio_suspend.
The code has been implemented to support the POSIX spec 1003.1b
(formerly known as POSIX 1003.4 spec) features of the above. The
async I/O features are truly async, with the VCHR mode of operation
being essentially the same as physio (for appropriate files) for
maximum efficiency. This code also supports the signal capability,
is highly tunable, allowing management of resource usage, and
has been written to allow a per process usage quota.
Both the O'Reilly POSIX.4 book and the actual POSIX 1003.1b document
were the reference specs used. Any filedescriptor can be used with
these new system calls. I know of no exceptions where these
system calls will not work. (TTY's will also probably work.)
1997-11-30 04:36:31 +00:00
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
/* Release mapping into kernel space. */
|
|
|
|
vunmapbuf(bp);
|
|
|
|
relpbuf(bp, NULL);
|
|
|
|
atomic_subtract_int(&num_buf_aio, 1);
|
1997-11-29 01:33:10 +00:00
|
|
|
}
|
2000-01-14 02:53:29 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/* syscall - wait for the next completion of an aio request */
|
2000-01-14 02:53:29 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap)
|
2000-01-14 02:53:29 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
2000-01-14 02:53:29 +00:00
|
|
|
struct timeval atv;
|
|
|
|
struct timespec ts;
|
|
|
|
struct kaioinfo *ki;
|
2006-01-22 05:59:27 +00:00
|
|
|
struct aiocblist *cb;
|
|
|
|
struct aiocb *uuaiocb;
|
|
|
|
int error, status, timo;
|
2004-08-13 17:43:53 +00:00
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
suword(uap->aiocbp, (long)NULL);
|
2000-02-23 07:44:25 +00:00
|
|
|
|
2000-01-14 02:53:29 +00:00
|
|
|
timo = 0;
|
|
|
|
if (uap->timeout) {
|
|
|
|
/* Get timespec struct. */
|
2001-03-05 01:30:23 +00:00
|
|
|
error = copyin(uap->timeout, &ts, sizeof(ts));
|
2000-01-14 02:53:29 +00:00
|
|
|
if (error)
|
2003-01-13 15:06:05 +00:00
|
|
|
return (error);
|
2000-01-14 02:53:29 +00:00
|
|
|
|
|
|
|
if ((ts.tv_nsec < 0) || (ts.tv_nsec >= 1000000000))
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
TIMESPEC_TO_TIMEVAL(&atv, &ts);
|
|
|
|
if (itimerfix(&atv))
|
|
|
|
return (EINVAL);
|
|
|
|
timo = tvtohz(&atv);
|
|
|
|
}
|
|
|
|
|
2006-01-15 01:55:45 +00:00
|
|
|
if (p->p_aioinfo == NULL)
|
2005-11-08 23:48:32 +00:00
|
|
|
aio_init_aioinfo(p);
|
2006-01-15 01:55:45 +00:00
|
|
|
ki = p->p_aioinfo;
|
2000-01-14 02:53:29 +00:00
|
|
|
|
2006-01-22 05:59:27 +00:00
|
|
|
error = 0;
|
|
|
|
cb = NULL;
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_LOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
while ((cb = TAILQ_FIRST(&ki->kaio_done)) == NULL) {
|
2000-01-14 02:53:29 +00:00
|
|
|
ki->kaio_flags |= KAIO_WAKEUP;
|
2006-05-09 00:10:11 +00:00
|
|
|
error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH,
|
2006-01-22 05:59:27 +00:00
|
|
|
"aiowc", timo);
|
2006-02-26 12:56:23 +00:00
|
|
|
if (timo && error == ERESTART)
|
2006-01-22 05:59:27 +00:00
|
|
|
error = EINTR;
|
|
|
|
if (error)
|
|
|
|
break;
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
2006-01-22 05:59:27 +00:00
|
|
|
|
|
|
|
if (cb != NULL) {
|
|
|
|
MPASS(cb->jobstate == JOBST_JOBFINISHED);
|
|
|
|
uuaiocb = cb->uuaiocb;
|
|
|
|
status = cb->uaiocb._aiocb_private.status;
|
|
|
|
error = cb->uaiocb._aiocb_private.error;
|
|
|
|
td->td_retval[0] = status;
|
|
|
|
if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
|
2007-06-01 01:12:45 +00:00
|
|
|
td->td_ru.ru_oublock += cb->outputcharge;
|
2006-01-22 05:59:27 +00:00
|
|
|
cb->outputcharge = 0;
|
|
|
|
} else if (cb->uaiocb.aio_lio_opcode == LIO_READ) {
|
2007-06-01 01:12:45 +00:00
|
|
|
td->td_ru.ru_inblock += cb->inputcharge;
|
2006-01-22 05:59:27 +00:00
|
|
|
cb->inputcharge = 0;
|
|
|
|
}
|
|
|
|
aio_free_entry(cb);
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
suword(uap->aiocbp, (long)uuaiocb);
|
|
|
|
suword(&uuaiocb->_aiocb_private.error, error);
|
|
|
|
suword(&uuaiocb->_aiocb_private.status, status);
|
|
|
|
} else
|
2006-05-09 00:10:11 +00:00
|
|
|
AIO_UNLOCK(ki);
|
2006-01-22 05:59:27 +00:00
|
|
|
|
|
|
|
return (error);
|
2000-01-14 02:53:29 +00:00
|
|
|
}
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2006-03-23 08:46:42 +00:00
|
|
|
int
|
|
|
|
aio_fsync(struct thread *td, struct aio_fsync_args *uap)
|
|
|
|
{
|
|
|
|
struct proc *p = td->td_proc;
|
|
|
|
struct kaioinfo *ki;
|
|
|
|
|
|
|
|
if (uap->op != O_SYNC) /* XXX lack of O_DSYNC */
|
|
|
|
return (EINVAL);
|
|
|
|
ki = p->p_aioinfo;
|
|
|
|
if (ki == NULL)
|
|
|
|
aio_init_aioinfo(p);
|
2006-03-24 00:50:06 +00:00
|
|
|
return aio_aqueue(td, uap->aiocbp, NULL, LIO_SYNC, 0);
|
2006-03-23 08:46:42 +00:00
|
|
|
}
|
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/* kqueue attach function */
|
2000-04-16 18:53:38 +00:00
|
|
|
static int
|
|
|
|
filt_aioattach(struct knote *kn)
|
|
|
|
{
|
2002-08-06 19:01:08 +00:00
|
|
|
struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata;
|
2000-04-16 18:53:38 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The aiocbe pointer must be validated before using it, so
|
|
|
|
* registration is restricted to the kernel; the user cannot
|
|
|
|
* set EV_FLAG1.
|
|
|
|
*/
|
|
|
|
if ((kn->kn_flags & EV_FLAG1) == 0)
|
|
|
|
return (EPERM);
|
2008-01-24 17:10:19 +00:00
|
|
|
kn->kn_ptr.p_aio = aiocbe;
|
2000-04-16 18:53:38 +00:00
|
|
|
kn->kn_flags &= ~EV_FLAG1;
|
|
|
|
|
2004-08-15 06:24:42 +00:00
|
|
|
knlist_add(&aiocbe->klist, kn, 0);
|
2000-04-16 18:53:38 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/* kqueue detach function */
|
2000-04-16 18:53:38 +00:00
|
|
|
static void
|
|
|
|
filt_aiodetach(struct knote *kn)
|
|
|
|
{
|
2008-01-24 17:10:19 +00:00
|
|
|
struct aiocblist *aiocbe = kn->kn_ptr.p_aio;
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2005-10-12 17:51:31 +00:00
|
|
|
if (!knlist_empty(&aiocbe->klist))
|
|
|
|
knlist_remove(&aiocbe->klist, kn, 0);
|
2000-04-16 18:53:38 +00:00
|
|
|
}
|
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/* kqueue filter function */
|
2000-04-16 18:53:38 +00:00
|
|
|
/*ARGSUSED*/
|
|
|
|
static int
|
|
|
|
filt_aio(struct knote *kn, long hint)
|
|
|
|
{
|
2008-01-24 17:10:19 +00:00
|
|
|
struct aiocblist *aiocbe = kn->kn_ptr.p_aio;
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2001-12-09 08:16:36 +00:00
|
|
|
kn->kn_data = aiocbe->uaiocb._aiocb_private.error;
|
2006-01-22 05:59:27 +00:00
|
|
|
if (aiocbe->jobstate != JOBST_JOBFINISHED)
|
2000-04-16 18:53:38 +00:00
|
|
|
return (0);
|
2004-08-13 17:43:53 +00:00
|
|
|
kn->kn_flags |= EV_EOF;
|
2000-04-16 18:53:38 +00:00
|
|
|
return (1);
|
|
|
|
}
|
2005-10-12 17:51:31 +00:00
|
|
|
|
|
|
|
/* kqueue attach function */
|
|
|
|
static int
|
|
|
|
filt_lioattach(struct knote *kn)
|
|
|
|
{
|
2006-01-22 05:59:27 +00:00
|
|
|
struct aioliojob * lj = (struct aioliojob *)kn->kn_sdata;
|
2005-10-12 17:51:31 +00:00
|
|
|
|
|
|
|
/*
|
2006-01-22 05:59:27 +00:00
|
|
|
* The aioliojob pointer must be validated before using it, so
|
2005-10-12 17:51:31 +00:00
|
|
|
* registration is restricted to the kernel; the user cannot
|
|
|
|
* set EV_FLAG1.
|
|
|
|
*/
|
|
|
|
if ((kn->kn_flags & EV_FLAG1) == 0)
|
|
|
|
return (EPERM);
|
2008-01-24 17:10:19 +00:00
|
|
|
kn->kn_ptr.p_lio = lj;
|
2005-10-12 17:51:31 +00:00
|
|
|
kn->kn_flags &= ~EV_FLAG1;
|
|
|
|
|
|
|
|
knlist_add(&lj->klist, kn, 0);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* kqueue detach function */
|
|
|
|
static void
|
|
|
|
filt_liodetach(struct knote *kn)
|
|
|
|
{
|
2008-01-24 17:10:19 +00:00
|
|
|
struct aioliojob * lj = kn->kn_ptr.p_lio;
|
2005-10-12 17:51:31 +00:00
|
|
|
|
|
|
|
if (!knlist_empty(&lj->klist))
|
|
|
|
knlist_remove(&lj->klist, kn, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* kqueue filter function */
|
|
|
|
/*ARGSUSED*/
|
|
|
|
static int
|
|
|
|
filt_lio(struct knote *kn, long hint)
|
|
|
|
{
|
2008-01-24 17:10:19 +00:00
|
|
|
struct aioliojob * lj = kn->kn_ptr.p_lio;
|
2006-01-22 05:59:27 +00:00
|
|
|
|
2005-10-12 17:51:31 +00:00
|
|
|
return (lj->lioj_flags & LIOJ_KEVENT_POSTED);
|
|
|
|
}
|