freebsd-dev/zfs_config.h.in

302 lines
6.9 KiB
C
Raw Normal View History

/* zfs_config.h.in. Generated from configure.ac by autoheader. */
/* invalidate_bdev() wants 1 arg */
#undef HAVE_1ARG_INVALIDATE_BDEV
/* bio_end_io_t wants 2 args */
#undef HAVE_2ARGS_BIO_END_IO_T
/* security_inode_init_security wants 6 args */
#undef HAVE_6ARGS_SECURITY_INODE_INIT_SECURITY
/* struct block_device_operations use bdevs */
#undef HAVE_BDEV_BLOCK_DEVICE_OPERATIONS
/* bdev_logical_block_size() is available */
#undef HAVE_BDEV_LOGICAL_BLOCK_SIZE
Add backing_device_info per-filesystem For a long time now the kernel has been moving away from using the pdflush daemon to write 'old' dirty pages to disk. The primary reason for this is because the pdflush daemon is single threaded and can be a limiting factor for performance. Since pdflush sequentially walks the dirty inode list for each super block any delay in processing can slow down dirty page writeback for all filesystems. The replacement for pdflush is called bdi (backing device info). The bdi system involves creating a per-filesystem control structure each with its own private sets of queues to manage writeback. The advantage is greater parallelism which improves performance and prevents a single filesystem from slowing writeback to the others. For a long time both systems co-existed in the kernel so it wasn't strictly required to implement the bdi scheme. However, as of Linux 2.6.36 kernels the pdflush functionality has been retired. Since ZFS already bypasses the page cache for most I/O this is only an issue for mmap(2) writes which must go through the page cache. Even then adding this missing support for newer kernels was overlooked because there are other mechanisms which can trigger writeback. However, there is one critical case where not implementing the bdi functionality can cause problems. If an application handles a page fault it can enter the balance_dirty_pages() callpath. This will result in the application hanging until the number of dirty pages in the system drops below the dirty ratio. Without a registered backing_device_info for the filesystem the dirty pages will not get written out. Thus the application will hang. As mentioned above this was less of an issue with older kernels because pdflush would eventually write out the dirty pages. This change adds a backing_device_info structure to the zfs_sb_t which is already allocated per-super block. It is then registered when the filesystem mounted and unregistered on unmount. It will not be registered for mounted snapshots which are read-only. This change will result in flush-<pool> thread being dynamically created and destroyed per-mounted filesystem for writeback. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #174
2011-08-02 01:24:40 +00:00
/* struct super_block has s_bdi */
#undef HAVE_BDI
/* bdi_setup_and_register() is available */
#undef HAVE_BDI_SETUP_AND_REGISTER
/* bio_empy_barrier() is defined */
#undef HAVE_BIO_EMPTY_BARRIER
/* REQ_FAILFAST_MASK is defined */
#undef HAVE_BIO_REQ_FAILFAST_MASK
/* BIO_RW_FAILFAST is defined */
#undef HAVE_BIO_RW_FAILFAST
/* BIO_RW_FAILFAST_* are defined */
#undef HAVE_BIO_RW_FAILFAST_DTD
/* BIO_RW_SYNC is defined */
#undef HAVE_BIO_RW_SYNC
/* BIO_RW_SYNCIO is defined */
#undef HAVE_BIO_RW_SYNCIO
/* blkdev_get_by_path() is available */
#undef HAVE_BLKDEV_GET_BY_PATH
/* blk_end_request() is available */
#undef HAVE_BLK_END_REQUEST
/* blk_end_request() is GPL-only */
#undef HAVE_BLK_END_REQUEST_GPL_ONLY
/* blk_fetch_request() is available */
#undef HAVE_BLK_FETCH_REQUEST
/* blk_queue_discard() is available */
#undef HAVE_BLK_QUEUE_DISCARD
/* blk_queue_flush() is available */
#undef HAVE_BLK_QUEUE_FLUSH
/* blk_queue_flush() is GPL-only */
#undef HAVE_BLK_QUEUE_FLUSH_GPL_ONLY
/* blk_queue_io_opt() is available */
#undef HAVE_BLK_QUEUE_IO_OPT
/* blk_queue_max_hw_sectors() is available */
#undef HAVE_BLK_QUEUE_MAX_HW_SECTORS
/* blk_queue_max_segments() is available */
#undef HAVE_BLK_QUEUE_MAX_SEGMENTS
/* blk_queue_nonrot() is available */
#undef HAVE_BLK_QUEUE_NONROT
/* blk_queue_physical_block_size() is available */
#undef HAVE_BLK_QUEUE_PHYSICAL_BLOCK_SIZE
/* blk_requeue_request() is available */
#undef HAVE_BLK_REQUEUE_REQUEST
/* blk_rq_bytes() is available */
#undef HAVE_BLK_RQ_BYTES
/* blk_rq_bytes() is GPL-only */
#undef HAVE_BLK_RQ_BYTES_GPL_ONLY
/* blk_rq_pos() is available */
#undef HAVE_BLK_RQ_POS
/* blk_rq_sectors() is available */
#undef HAVE_BLK_RQ_SECTORS
/* security_inode_init_security wants callback */
#undef HAVE_CALLBACK_SECURITY_INODE_INIT_SECURITY
/* check_disk_size_change() is available */
#undef HAVE_CHECK_DISK_SIZE_CHANGE
/* super_block uses const struct xattr_hander */
#undef HAVE_CONST_XATTR_HANDLER
/* xattr_handler->get() wants dentry */
#undef HAVE_DENTRY_XATTR_GET
/* xattr_handler->set() wants dentry */
#undef HAVE_DENTRY_XATTR_SET
/* Define to 1 if you have the <dlfcn.h> header file. */
#undef HAVE_DLFCN_H
/* d_obtain_alias() is available */
#undef HAVE_D_OBTAIN_ALIAS
/* sops->evict_inode() exists */
#undef HAVE_EVICT_INODE
/* fops->fallocate() exists */
#undef HAVE_FILE_FALLOCATE
/* kernel defines fmode_t */
#undef HAVE_FMODE_T
Linux 3.1 compat, super_block->s_shrink The Linux 3.1 kernel has introduced the concept of per-filesystem shrinkers which are directly assoicated with a super block. Prior to this change there was one shared global shrinker. The zfs code relied on being able to call the global shrinker when the arc_meta_limit was exceeded. This would cause the VFS to drop references on a fraction of the dentries in the dcache. The ARC could then safely reclaim the memory used by these entries and honor the arc_meta_limit. Unfortunately, when per-filesystem shrinkers were added the old interfaces were made unavailable. This change adds support to use the new per-filesystem shrinker interface so we can continue to honor the arc_meta_limit. The major benefit of the new interface is that we can now target only the zfs filesystem for dentry and inode pruning. Thus we can minimize any impact on the caching of other filesystems. In the context of making this change several other important issues related to managing the ARC were addressed, they include: * The dnlc_reduce_cache() function which was called by the ARC to drop dentries for the Posix layer was replaced with a generic zfs_prune_t callback. The ZPL layer now registers a callback to drop these dentries removing a layering violation which dates back to the Solaris code. This callback can also be used by other ARC consumers such as Lustre. arc_add_prune_callback() arc_remove_prune_callback() * The arc_reduce_dnlc_percent module option has been changed to arc_meta_prune for clarity. The dnlc functions are specific to Solaris's VFS and have already been largely eliminated already. The replacement tunable now represents the number of bytes the prune callback will request when invoked. * Less aggressively invoke the prune callback. We used to call this whenever we exceeded the arc_meta_limit however that's not strictly correct since it results in over zeleous reclaim of dentries and inodes. It is now only called once the arc_meta_limit is exceeded and every effort has been made to evict other data from the ARC cache. * More promptly manage exceeding the arc_meta_limit. When reading meta data in to the cache if a buffer was unable to be recycled notify the arc_reclaim thread to invoke the required prune. * Added arcstat_prune kstat which is incremented when the ARC is forced to request that a consumer prune its cache. Remember this will only occur when the ARC has no other choice. If it can evict buffers safely without invoking the prune callback it will. * This change is also expected to resolve the unexpect collapses of the ARC cache. This would occur because when exceeded just the arc_meta_limit reclaim presure would be excerted on the arc_c value via arc_shrink(). This effectively shrunk the entire cache when really we just needed to reclaim meta data. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #466 Closes #292
2011-12-22 20:20:43 +00:00
/* sops->free_cached_objects() exists */
#undef HAVE_FREE_CACHED_OBJECTS
/* fops->fsync() with range */
#undef HAVE_FSYNC_RANGE
/* fops->fsync() without dentry */
#undef HAVE_FSYNC_WITHOUT_DENTRY
/* fops->fsync() with dentry */
#undef HAVE_FSYNC_WITH_DENTRY
/* blk_disk_ro() is available */
#undef HAVE_GET_DISK_RO
/* Define to 1 if licensed under the GPL */
#undef HAVE_GPL_ONLY_SYMBOLS
/* fops->fallocate() exists */
#undef HAVE_INODE_FALLOCATE
/* insert_inode_locked() is available */
#undef HAVE_INSERT_INODE_LOCKED
/* Define to 1 if you have the <inttypes.h> header file. */
#undef HAVE_INTTYPES_H
/* result=stropts.h Define to 1 if ioctl() defined in <stropts.h> */
#undef HAVE_IOCTL_IN_STROPTS_H
/* Define to 1 if ioctl() defined in <sys/ioctl.h> */
#undef HAVE_IOCTL_IN_SYS_IOCTL_H
/* Define to 1 if ioctl() defined in <unistd.h> */
#undef HAVE_IOCTL_IN_UNISTD_H
/* kernel defines KOBJ_NAME_LEN */
#undef HAVE_KOBJ_NAME_LEN
/* Define if you have libblkid */
#undef HAVE_LIBBLKID
/* Define if you have selinux */
#undef HAVE_LIBSELINUX
/* Define if you have libuuid */
#undef HAVE_LIBUUID
/* Define to 1 if you have the `z' library (-lz). */
#undef HAVE_LIBZ
/* Define to 1 if you have the <memory.h> header file. */
#undef HAVE_MEMORY_H
Linux compat 2.6.39: mount_nodev() The .get_sb callback has been replaced by a .mount callback in the file_system_type structure. When using the new interface the caller must now use the mount_nodev() helper. Unfortunately, the new interface no longer passes the vfsmount down to the zfs layers. This poses a problem for the existing implementation because we currently save this pointer in the super block for latter use. It provides our only entry point in to the namespace layer for manipulating certain mount options. This needed to be done originally to allow commands like 'zfs set atime=off tank' to work properly. It also allowed me to keep more of the original Solaris code unmodified. Under Solaris there is a 1-to-1 mapping between a mount point and a file system so this is a fairly natural thing to do. However, under Linux they many be multiple entries in the namespace which reference the same filesystem. Thus keeping a back reference from the filesystem to the namespace is complicated. Rather than introduce some ugly hack to get the vfsmount and continue as before. I'm leveraging this API change to update the ZFS code to do things in a more natural way for Linux. This has the upside that is resolves the compatibility issue for the long term and fixes several other minor bugs which have been reported. This commit updates the code to remove this vfsmount back reference entirely. All modifications to filesystem mount options are now passed in to the kernel via a '-o remount'. This is the expected Linux mechanism and allows the namespace to properly handle any options which apply to it before passing them on to the file system itself. Aside from fixing the compatibility issue, removing the vfsmount has had the benefit of simplifying the code. This change which fairly involved has turned out nicely. Closes #246 Closes #217 Closes #187 Closes #248 Closes #231
2011-05-19 18:44:07 +00:00
/* mount_nodev() is available */
#undef HAVE_MOUNT_NODEV
Linux 3.1 compat, super_block->s_shrink The Linux 3.1 kernel has introduced the concept of per-filesystem shrinkers which are directly assoicated with a super block. Prior to this change there was one shared global shrinker. The zfs code relied on being able to call the global shrinker when the arc_meta_limit was exceeded. This would cause the VFS to drop references on a fraction of the dentries in the dcache. The ARC could then safely reclaim the memory used by these entries and honor the arc_meta_limit. Unfortunately, when per-filesystem shrinkers were added the old interfaces were made unavailable. This change adds support to use the new per-filesystem shrinker interface so we can continue to honor the arc_meta_limit. The major benefit of the new interface is that we can now target only the zfs filesystem for dentry and inode pruning. Thus we can minimize any impact on the caching of other filesystems. In the context of making this change several other important issues related to managing the ARC were addressed, they include: * The dnlc_reduce_cache() function which was called by the ARC to drop dentries for the Posix layer was replaced with a generic zfs_prune_t callback. The ZPL layer now registers a callback to drop these dentries removing a layering violation which dates back to the Solaris code. This callback can also be used by other ARC consumers such as Lustre. arc_add_prune_callback() arc_remove_prune_callback() * The arc_reduce_dnlc_percent module option has been changed to arc_meta_prune for clarity. The dnlc functions are specific to Solaris's VFS and have already been largely eliminated already. The replacement tunable now represents the number of bytes the prune callback will request when invoked. * Less aggressively invoke the prune callback. We used to call this whenever we exceeded the arc_meta_limit however that's not strictly correct since it results in over zeleous reclaim of dentries and inodes. It is now only called once the arc_meta_limit is exceeded and every effort has been made to evict other data from the ARC cache. * More promptly manage exceeding the arc_meta_limit. When reading meta data in to the cache if a buffer was unable to be recycled notify the arc_reclaim thread to invoke the required prune. * Added arcstat_prune kstat which is incremented when the ARC is forced to request that a consumer prune its cache. Remember this will only occur when the ARC has no other choice. If it can evict buffers safely without invoking the prune callback it will. * This change is also expected to resolve the unexpect collapses of the ARC cache. This would occur because when exceeded just the arc_meta_limit reclaim presure would be excerted on the arc_c value via arc_shrink(). This effectively shrunk the entire cache when really we just needed to reclaim meta data. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #466 Closes #292
2011-12-22 20:20:43 +00:00
/* sops->nr_cached_objects() exists */
#undef HAVE_NR_CACHED_OBJECTS
/* open_bdev_exclusive() is available */
#undef HAVE_OPEN_BDEV_EXCLUSIVE
/* REQ_SYNC is defined */
#undef HAVE_REQ_SYNC
/* rq_for_each_segment() is available */
#undef HAVE_RQ_FOR_EACH_SEGMENT
/* rq_is_sync() is available */
#undef HAVE_RQ_IS_SYNC
/* set_nlink() is available */
#undef HAVE_SET_NLINK
/* sops->show_options() with dentry */
#undef HAVE_SHOW_OPTIONS_WITH_DENTRY
Linux 3.1 compat, super_block->s_shrink The Linux 3.1 kernel has introduced the concept of per-filesystem shrinkers which are directly assoicated with a super block. Prior to this change there was one shared global shrinker. The zfs code relied on being able to call the global shrinker when the arc_meta_limit was exceeded. This would cause the VFS to drop references on a fraction of the dentries in the dcache. The ARC could then safely reclaim the memory used by these entries and honor the arc_meta_limit. Unfortunately, when per-filesystem shrinkers were added the old interfaces were made unavailable. This change adds support to use the new per-filesystem shrinker interface so we can continue to honor the arc_meta_limit. The major benefit of the new interface is that we can now target only the zfs filesystem for dentry and inode pruning. Thus we can minimize any impact on the caching of other filesystems. In the context of making this change several other important issues related to managing the ARC were addressed, they include: * The dnlc_reduce_cache() function which was called by the ARC to drop dentries for the Posix layer was replaced with a generic zfs_prune_t callback. The ZPL layer now registers a callback to drop these dentries removing a layering violation which dates back to the Solaris code. This callback can also be used by other ARC consumers such as Lustre. arc_add_prune_callback() arc_remove_prune_callback() * The arc_reduce_dnlc_percent module option has been changed to arc_meta_prune for clarity. The dnlc functions are specific to Solaris's VFS and have already been largely eliminated already. The replacement tunable now represents the number of bytes the prune callback will request when invoked. * Less aggressively invoke the prune callback. We used to call this whenever we exceeded the arc_meta_limit however that's not strictly correct since it results in over zeleous reclaim of dentries and inodes. It is now only called once the arc_meta_limit is exceeded and every effort has been made to evict other data from the ARC cache. * More promptly manage exceeding the arc_meta_limit. When reading meta data in to the cache if a buffer was unable to be recycled notify the arc_reclaim thread to invoke the required prune. * Added arcstat_prune kstat which is incremented when the ARC is forced to request that a consumer prune its cache. Remember this will only occur when the ARC has no other choice. If it can evict buffers safely without invoking the prune callback it will. * This change is also expected to resolve the unexpect collapses of the ARC cache. This would occur because when exceeded just the arc_meta_limit reclaim presure would be excerted on the arc_c value via arc_shrink(). This effectively shrunk the entire cache when really we just needed to reclaim meta data. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #466 Closes #292
2011-12-22 20:20:43 +00:00
/* struct super_block has s_shrink */
#undef HAVE_SHRINK
/* Define to 1 if you have the <stdint.h> header file. */
#undef HAVE_STDINT_H
/* Define to 1 if you have the <stdlib.h> header file. */
#undef HAVE_STDLIB_H
/* Define to 1 if you have the <strings.h> header file. */
#undef HAVE_STRINGS_H
/* Define to 1 if you have the <string.h> header file. */
#undef HAVE_STRING_H
/* Define to 1 if you have the <sys/stat.h> header file. */
#undef HAVE_SYS_STAT_H
/* Define to 1 if you have the <sys/types.h> header file. */
#undef HAVE_SYS_TYPES_H
/* truncate_setsize() is available */
#undef HAVE_TRUNCATE_SETSIZE
/* Define to 1 if you have the <unistd.h> header file. */
#undef HAVE_UNISTD_H
/* Define if you have zlib */
#undef HAVE_ZLIB
/* Define to the sub-directory in which libtool stores uninstalled libraries.
*/
#undef LT_OBJDIR
/* Define to 1 if NPTL threading implementation includes guard area in stack
allocation */
#undef NPTL_GUARD_WITHIN_STACK
/* Name of package */
#undef PACKAGE
/* Define to the address where bug reports for this package should be sent. */
#undef PACKAGE_BUGREPORT
/* Define to the full name of this package. */
#undef PACKAGE_NAME
/* Define to the full name and version of this package. */
#undef PACKAGE_STRING
/* Define to the one symbol short name of this package. */
#undef PACKAGE_TARNAME
/* Define to the version of this package. */
#undef PACKAGE_VERSION
/* Define to 1 if you have the ANSI C header files. */
#undef STDC_HEADERS
/* Version number of package */
#undef VERSION
/* Define the project alias string. */
#undef ZFS_META_ALIAS
/* Define the project author. */
#undef ZFS_META_AUTHOR
/* Define the project release date. */
#undef ZFS_META_DATA
/* Define the project license. */
#undef ZFS_META_LICENSE
/* Define the libtool library 'age' version information. */
#undef ZFS_META_LT_AGE
/* Define the libtool library 'current' version information. */
#undef ZFS_META_LT_CURRENT
/* Define the libtool library 'revision' version information. */
#undef ZFS_META_LT_REVISION
/* Define the project name. */
#undef ZFS_META_NAME
/* Define the project release. */
#undef ZFS_META_RELEASE
/* Define the project version. */
#undef ZFS_META_VERSION
#undef PACKAGE
#undef PACKAGE_BUGREPORT
#undef PACKAGE_NAME
#undef PACKAGE_STRING
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
#undef STDC_HEADERS
#undef VERSION