Add a vop_stdbmap(), and make it part of the default vop vector.

Make 7 filesystems which don't really know about VOP_BMAP rely
on the default vector, rather than more or less complete local
vop_nopbmap() implementations.
This commit is contained in:
phk 2001-04-29 11:48:41 +00:00
parent fb70f28dbe
commit 4d26864fcf
24 changed files with 33 additions and 414 deletions

View File

@ -70,7 +70,6 @@ extern int procfs_kmemaccess __P((struct proc *));
static int linprocfs_access __P((struct vop_access_args *));
static int linprocfs_badop __P((void));
static int linprocfs_bmap __P((struct vop_bmap_args *));
static int linprocfs_close __P((struct vop_close_args *));
static int linprocfs_getattr __P((struct vop_getattr_args *));
static int linprocfs_inactive __P((struct vop_inactive_args *));
@ -330,36 +329,6 @@ linprocfs_ioctl(ap)
return 0;
}
/*
* do block mapping for pfsnode (vp).
* since we don't use the buffer cache
* for procfs this function should never
* be called. in any case, it's not clear
* what part of the kernel ever makes use
* of this function. for sanity, this is the
* usual no-op bmap, although returning
* (EIO) would be a reasonable alternative.
*/
static int
linprocfs_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
daddr_t a_bn;
struct vnode **a_vpp;
daddr_t *a_bnp;
int *a_runp;
} */ *ap;
{
if (ap->a_vpp != NULL)
*ap->a_vpp = ap->a_vp;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn;
if (ap->a_runp != NULL)
*ap->a_runp = 0;
return (0);
}
/*
* linprocfs_inactive is called when the pfsnode
* is vrele'd and the reference count goes
@ -851,7 +820,6 @@ static struct vnodeopv_entry_desc linprocfs_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_defaultop },
{ &vop_access_desc, (vop_t *) linprocfs_access },
{ &vop_advlock_desc, (vop_t *) linprocfs_badop },
{ &vop_bmap_desc, (vop_t *) linprocfs_bmap },
{ &vop_close_desc, (vop_t *) linprocfs_close },
{ &vop_create_desc, (vop_t *) linprocfs_badop },
{ &vop_getattr_desc, (vop_t *) linprocfs_getattr },

View File

@ -685,7 +685,6 @@ static vop_t **devfs_vnodeop_p;
static struct vnodeopv_entry_desc devfs_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_defaultop },
{ &vop_access_desc, (vop_t *) devfs_access },
{ &vop_bmap_desc, (vop_t *) devfs_badop },
{ &vop_getattr_desc, (vop_t *) devfs_getattr },
{ &vop_lookup_desc, (vop_t *) devfs_lookup },
{ &vop_pathconf_desc, (vop_t *) vop_stdpathconf },
@ -705,31 +704,9 @@ static struct vnodeopv_desc devfs_vnodeop_opv_desc =
VNODEOP_SET(devfs_vnodeop_opv_desc);
#if 0
int
foo(ap)
struct vop_generic_args *ap;
{
int i;
i = spec_vnoperate(ap);
printf("foo(%s) = %d\n", ap->a_desc->vdesc_name, i);
return (i);
}
#endif
static vop_t **devfs_specop_p;
static struct vnodeopv_entry_desc devfs_specop_entries[] = {
#if 1
{ &vop_default_desc, (vop_t *) spec_vnoperate },
#else
{ &vop_default_desc, (vop_t *) foo },
{ &vop_lock_desc, (vop_t *) spec_vnoperate },
{ &vop_unlock_desc, (vop_t *) spec_vnoperate },
{ &vop_lease_desc, (vop_t *) spec_vnoperate },
{ &vop_strategy_desc, (vop_t *) spec_vnoperate },
{ &vop_bmap_desc, (vop_t *) spec_vnoperate },
#endif
{ &vop_access_desc, (vop_t *) devfs_access },
{ &vop_getattr_desc, (vop_t *) devfs_getattr },
{ &vop_print_desc, (vop_t *) devfs_print },

View File

@ -557,7 +557,6 @@ fdesc_badop()
static struct vnodeopv_entry_desc fdesc_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_defaultop },
{ &vop_access_desc, (vop_t *) vop_null },
{ &vop_bmap_desc, (vop_t *) fdesc_badop },
{ &vop_getattr_desc, (vop_t *) fdesc_getattr },
{ &vop_inactive_desc, (vop_t *) fdesc_inactive },
{ &vop_lookup_desc, (vop_t *) fdesc_lookup },

View File

@ -72,7 +72,6 @@ static int fifo_write __P((struct vop_write_args *));
static int fifo_ioctl __P((struct vop_ioctl_args *));
static int fifo_poll __P((struct vop_poll_args *));
static int fifo_kqfilter __P((struct vop_kqfilter_args *));
static int fifo_bmap __P((struct vop_bmap_args *));
static int fifo_pathconf __P((struct vop_pathconf_args *));
static int fifo_advlock __P((struct vop_advlock_args *));
@ -91,7 +90,6 @@ static struct vnodeopv_entry_desc fifo_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_defaultop },
{ &vop_access_desc, (vop_t *) vop_ebadf },
{ &vop_advlock_desc, (vop_t *) fifo_advlock },
{ &vop_bmap_desc, (vop_t *) fifo_bmap },
{ &vop_close_desc, (vop_t *) fifo_close },
{ &vop_create_desc, (vop_t *) fifo_badop },
{ &vop_getattr_desc, (vop_t *) vop_ebadf },
@ -462,32 +460,6 @@ fifo_poll(ap)
return (revents);
}
/*
* This is a noop, simply returning what one has been given.
*/
static int
fifo_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
daddr_t a_bn;
struct vnode **a_vpp;
daddr_t *a_bnp;
int *a_runp;
int *a_runb;
} */ *ap;
{
if (ap->a_vpp != NULL)
*ap->a_vpp = ap->a_vp;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn;
if (ap->a_runp != NULL)
*ap->a_runp = 0;
if (ap->a_runb != NULL)
*ap->a_runb = 0;
return (0);
}
/*
* Device close routine
*/

View File

@ -121,6 +121,7 @@ ntfs_putpages(ap)
}
#endif
#if defined(__NetBSD__)
/*
* This is a noop, simply returning what one has been given.
*/
@ -142,12 +143,9 @@ ntfs_bmap(ap)
*ap->a_bnp = ap->a_bn;
if (ap->a_runp != NULL)
*ap->a_runp = 0;
#if !defined(__NetBSD__)
if (ap->a_runb != NULL)
*ap->a_runb = 0;
#endif
return (0);
}
#endif
static int
ntfs_read(ap)
@ -871,7 +869,6 @@ struct vnodeopv_entry_desc ntfs_vnodeop_entries[] = {
{ &vop_readdir_desc, (vop_t *)ntfs_readdir },
{ &vop_fsync_desc, (vop_t *)ntfs_fsync },
{ &vop_bmap_desc, (vop_t *)ntfs_bmap },
{ &vop_getpages_desc, (vop_t *) ntfs_getpages },
{ &vop_putpages_desc, (vop_t *) ntfs_putpages },
{ &vop_strategy_desc, (vop_t *)ntfs_strategy },

View File

@ -78,7 +78,6 @@ static int nwfs_mkdir(struct vop_mkdir_args *);
static int nwfs_rmdir(struct vop_rmdir_args *);
static int nwfs_symlink(struct vop_symlink_args *);
static int nwfs_readdir(struct vop_readdir_args *);
static int nwfs_bmap(struct vop_bmap_args *);
static int nwfs_strategy(struct vop_strategy_args *);
static int nwfs_print(struct vop_print_args *);
static int nwfs_pathconf(struct vop_pathconf_args *ap);
@ -88,7 +87,6 @@ vop_t **nwfs_vnodeop_p;
static struct vnodeopv_entry_desc nwfs_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_defaultop },
{ &vop_access_desc, (vop_t *) nwfs_access },
{ &vop_bmap_desc, (vop_t *) nwfs_bmap },
{ &vop_open_desc, (vop_t *) nwfs_open },
{ &vop_close_desc, (vop_t *) nwfs_close },
{ &vop_create_desc, (vop_t *) nwfs_create },
@ -826,29 +824,6 @@ static int nwfs_strategy (ap)
return (error);
}
static int
nwfs_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
daddr_t a_bn;
struct vnode **a_vpp;
daddr_t *a_bnp;
int *a_runp;
int *a_runb;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
if (ap->a_vpp != NULL)
*ap->a_vpp = vp;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize);
if (ap->a_runp != NULL)
*ap->a_runp = 0;
if (ap->a_runb != NULL)
*ap->a_runb = 0;
return (0);
}
/*
* How to keep the brain busy ...

View File

@ -575,7 +575,6 @@ vop_t **portal_vnodeop_p;
static struct vnodeopv_entry_desc portal_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_defaultop },
{ &vop_access_desc, (vop_t *) vop_null },
{ &vop_bmap_desc, (vop_t *) portal_badop },
{ &vop_getattr_desc, (vop_t *) portal_getattr },
{ &vop_lookup_desc, (vop_t *) portal_lookup },
{ &vop_open_desc, (vop_t *) portal_open },

View File

@ -317,36 +317,6 @@ procfs_ioctl(ap)
return 0;
}
/*
* do block mapping for pfsnode (vp).
* since we don't use the buffer cache
* for procfs this function should never
* be called. in any case, it's not clear
* what part of the kernel ever makes use
* of this function. for sanity, this is the
* usual no-op bmap, although returning
* (EIO) would be a reasonable alternative.
*/
static int
procfs_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
daddr_t a_bn;
struct vnode **a_vpp;
daddr_t *a_bnp;
int *a_runp;
} */ *ap;
{
if (ap->a_vpp != NULL)
*ap->a_vpp = ap->a_vp;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn;
if (ap->a_runp != NULL)
*ap->a_runp = 0;
return (0);
}
/*
* _reclaim is called when getnewvnode()
* wants to make use of an entry on the vnode
@ -1019,7 +989,6 @@ static struct vnodeopv_entry_desc procfs_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_defaultop },
{ &vop_access_desc, (vop_t *) procfs_access },
{ &vop_advlock_desc, (vop_t *) procfs_badop },
{ &vop_bmap_desc, (vop_t *) procfs_bmap },
{ &vop_close_desc, (vop_t *) procfs_close },
{ &vop_create_desc, (vop_t *) procfs_badop },
{ &vop_getattr_desc, (vop_t *) procfs_getattr },

View File

@ -78,7 +78,6 @@ static int smbfs_mkdir(struct vop_mkdir_args *);
static int smbfs_rmdir(struct vop_rmdir_args *);
static int smbfs_symlink(struct vop_symlink_args *);
static int smbfs_readdir(struct vop_readdir_args *);
static int smbfs_bmap(struct vop_bmap_args *);
static int smbfs_strategy(struct vop_strategy_args *);
static int smbfs_print(struct vop_print_args *);
static int smbfs_pathconf(struct vop_pathconf_args *ap);
@ -92,7 +91,6 @@ static struct vnodeopv_entry_desc smbfs_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_defaultop },
{ &vop_access_desc, (vop_t *) smbfs_access },
{ &vop_advlock_desc, (vop_t *) smbfs_advlock },
{ &vop_bmap_desc, (vop_t *) smbfs_bmap },
{ &vop_close_desc, (vop_t *) smbfs_close },
{ &vop_create_desc, (vop_t *) smbfs_create },
{ &vop_fsync_desc, (vop_t *) smbfs_fsync },
@ -900,30 +898,6 @@ smbfs_strategy (ap)
return error;
}
static int
smbfs_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
daddr_t a_bn;
struct vnode **a_vpp;
daddr_t *a_bnp;
int *a_runp;
int *a_runb;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
if (ap->a_vpp != NULL)
*ap->a_vpp = vp;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize);
if (ap->a_runp != NULL)
*ap->a_runp = 0;
if (ap->a_runb != NULL)
*ap->a_runb = 0;
return (0);
}
int
smbfs_ioctl(ap)
struct vop_ioctl_args /* {

View File

@ -73,7 +73,6 @@ static struct vnodeopv_entry_desc spec_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_defaultop },
{ &vop_access_desc, (vop_t *) vop_ebadf },
{ &vop_advlock_desc, (vop_t *) spec_advlock },
{ &vop_bmap_desc, (vop_t *) spec_bmap },
{ &vop_close_desc, (vop_t *) spec_close },
{ &vop_create_desc, (vop_t *) vop_panic },
{ &vop_freeblks_desc, (vop_t *) spec_freeblks },

View File

@ -69,7 +69,6 @@ SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RD, &uniondebug, 0, "");
static int union_access __P((struct vop_access_args *ap));
static int union_advlock __P((struct vop_advlock_args *ap));
static int union_bmap __P((struct vop_bmap_args *ap));
static int union_close __P((struct vop_close_args *ap));
static int union_create __P((struct vop_create_args *ap));
static int union_fsync __P((struct vop_fsync_args *ap));
@ -1823,31 +1822,6 @@ union_unlock(ap)
return(error);
}
/*
* union_bmap:
*
* There isn't much we can do. We cannot push through to the real vnode
* to get to the underlying device because this will bypass data
* cached by the real vnode.
*
* For some reason we cannot return the 'real' vnode either, it seems
* to blow up memory maps.
*/
static int
union_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
daddr_t a_bn;
struct vnode **a_vpp;
daddr_t *a_bnp;
int *a_runp;
int *a_runb;
} */ *ap;
{
return(EOPNOTSUPP);
}
static int
union_print(ap)
struct vop_print_args /* {
@ -1941,7 +1915,7 @@ static struct vnodeopv_entry_desc union_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_defaultop },
{ &vop_access_desc, (vop_t *) union_access },
{ &vop_advlock_desc, (vop_t *) union_advlock },
{ &vop_bmap_desc, (vop_t *) union_bmap },
{ &vop_bmap_desc, (vop_t *) vop_eopnotsupp },
{ &vop_close_desc, (vop_t *) union_close },
{ &vop_create_desc, (vop_t *) union_create },
{ &vop_fsync_desc, (vop_t *) union_fsync },

View File

@ -80,6 +80,7 @@ vop_t **default_vnodeop_p;
static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_eopnotsupp },
{ &vop_advlock_desc, (vop_t *) vop_einval },
{ &vop_bmap_desc, (vop_t *) vop_stdbmap },
{ &vop_close_desc, (vop_t *) vop_null },
{ &vop_createvobject_desc, (vop_t *) vop_stdcreatevobject },
{ &vop_destroyvobject_desc, (vop_t *) vop_stddestroyvobject },
@ -609,6 +610,30 @@ vop_stdgetvobject(ap)
return (vp->v_object ? 0 : EINVAL);
}
int
vop_stdbmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
daddr_t a_bn;
struct vnode **a_vpp;
daddr_t *a_bnp;
int *a_runp;
int *a_runb;
} */ *ap;
{
if (ap->a_vpp != NULL)
*ap->a_vpp = ap->a_vp;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
if (ap->a_runp != NULL)
*ap->a_runp = 0;
if (ap->a_runb != NULL)
*ap->a_runb = 0;
return (0);
}
/*
* vfs default ops
* used to fill the vfs fucntion table to get reasonable default return values.

View File

@ -557,7 +557,6 @@ fdesc_badop()
static struct vnodeopv_entry_desc fdesc_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_defaultop },
{ &vop_access_desc, (vop_t *) vop_null },
{ &vop_bmap_desc, (vop_t *) fdesc_badop },
{ &vop_getattr_desc, (vop_t *) fdesc_getattr },
{ &vop_inactive_desc, (vop_t *) fdesc_inactive },
{ &vop_lookup_desc, (vop_t *) fdesc_lookup },

View File

@ -72,7 +72,6 @@ static int fifo_write __P((struct vop_write_args *));
static int fifo_ioctl __P((struct vop_ioctl_args *));
static int fifo_poll __P((struct vop_poll_args *));
static int fifo_kqfilter __P((struct vop_kqfilter_args *));
static int fifo_bmap __P((struct vop_bmap_args *));
static int fifo_pathconf __P((struct vop_pathconf_args *));
static int fifo_advlock __P((struct vop_advlock_args *));
@ -91,7 +90,6 @@ static struct vnodeopv_entry_desc fifo_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_defaultop },
{ &vop_access_desc, (vop_t *) vop_ebadf },
{ &vop_advlock_desc, (vop_t *) fifo_advlock },
{ &vop_bmap_desc, (vop_t *) fifo_bmap },
{ &vop_close_desc, (vop_t *) fifo_close },
{ &vop_create_desc, (vop_t *) fifo_badop },
{ &vop_getattr_desc, (vop_t *) vop_ebadf },
@ -462,32 +460,6 @@ fifo_poll(ap)
return (revents);
}
/*
* This is a noop, simply returning what one has been given.
*/
static int
fifo_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
daddr_t a_bn;
struct vnode **a_vpp;
daddr_t *a_bnp;
int *a_runp;
int *a_runb;
} */ *ap;
{
if (ap->a_vpp != NULL)
*ap->a_vpp = ap->a_vp;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn;
if (ap->a_runp != NULL)
*ap->a_runp = 0;
if (ap->a_runb != NULL)
*ap->a_runb = 0;
return (0);
}
/*
* Device close routine
*/

View File

@ -575,7 +575,6 @@ vop_t **portal_vnodeop_p;
static struct vnodeopv_entry_desc portal_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_defaultop },
{ &vop_access_desc, (vop_t *) vop_null },
{ &vop_bmap_desc, (vop_t *) portal_badop },
{ &vop_getattr_desc, (vop_t *) portal_getattr },
{ &vop_lookup_desc, (vop_t *) portal_lookup },
{ &vop_open_desc, (vop_t *) portal_open },

View File

@ -317,36 +317,6 @@ procfs_ioctl(ap)
return 0;
}
/*
* do block mapping for pfsnode (vp).
* since we don't use the buffer cache
* for procfs this function should never
* be called. in any case, it's not clear
* what part of the kernel ever makes use
* of this function. for sanity, this is the
* usual no-op bmap, although returning
* (EIO) would be a reasonable alternative.
*/
static int
procfs_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
daddr_t a_bn;
struct vnode **a_vpp;
daddr_t *a_bnp;
int *a_runp;
} */ *ap;
{
if (ap->a_vpp != NULL)
*ap->a_vpp = ap->a_vp;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn;
if (ap->a_runp != NULL)
*ap->a_runp = 0;
return (0);
}
/*
* _reclaim is called when getnewvnode()
* wants to make use of an entry on the vnode
@ -1019,7 +989,6 @@ static struct vnodeopv_entry_desc procfs_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_defaultop },
{ &vop_access_desc, (vop_t *) procfs_access },
{ &vop_advlock_desc, (vop_t *) procfs_badop },
{ &vop_bmap_desc, (vop_t *) procfs_bmap },
{ &vop_close_desc, (vop_t *) procfs_close },
{ &vop_create_desc, (vop_t *) procfs_badop },
{ &vop_getattr_desc, (vop_t *) procfs_getattr },

View File

@ -73,7 +73,6 @@ static struct vnodeopv_entry_desc spec_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_defaultop },
{ &vop_access_desc, (vop_t *) vop_ebadf },
{ &vop_advlock_desc, (vop_t *) spec_advlock },
{ &vop_bmap_desc, (vop_t *) spec_bmap },
{ &vop_close_desc, (vop_t *) spec_close },
{ &vop_create_desc, (vop_t *) vop_panic },
{ &vop_freeblks_desc, (vop_t *) spec_freeblks },

View File

@ -69,7 +69,6 @@ SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RD, &uniondebug, 0, "");
static int union_access __P((struct vop_access_args *ap));
static int union_advlock __P((struct vop_advlock_args *ap));
static int union_bmap __P((struct vop_bmap_args *ap));
static int union_close __P((struct vop_close_args *ap));
static int union_create __P((struct vop_create_args *ap));
static int union_fsync __P((struct vop_fsync_args *ap));
@ -1823,31 +1822,6 @@ union_unlock(ap)
return(error);
}
/*
* union_bmap:
*
* There isn't much we can do. We cannot push through to the real vnode
* to get to the underlying device because this will bypass data
* cached by the real vnode.
*
* For some reason we cannot return the 'real' vnode either, it seems
* to blow up memory maps.
*/
static int
union_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
daddr_t a_bn;
struct vnode **a_vpp;
daddr_t *a_bnp;
int *a_runp;
int *a_runb;
} */ *ap;
{
return(EOPNOTSUPP);
}
static int
union_print(ap)
struct vop_print_args /* {
@ -1941,7 +1915,7 @@ static struct vnodeopv_entry_desc union_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_defaultop },
{ &vop_access_desc, (vop_t *) union_access },
{ &vop_advlock_desc, (vop_t *) union_advlock },
{ &vop_bmap_desc, (vop_t *) union_bmap },
{ &vop_bmap_desc, (vop_t *) vop_eopnotsupp },
{ &vop_close_desc, (vop_t *) union_close },
{ &vop_create_desc, (vop_t *) union_create },
{ &vop_fsync_desc, (vop_t *) union_fsync },

View File

@ -121,7 +121,6 @@ static int nfs_mkdir __P((struct vop_mkdir_args *));
static int nfs_rmdir __P((struct vop_rmdir_args *));
static int nfs_symlink __P((struct vop_symlink_args *));
static int nfs_readdir __P((struct vop_readdir_args *));
static int nfs_bmap __P((struct vop_bmap_args *));
static int nfs_strategy __P((struct vop_strategy_args *));
static int nfs_lookitup __P((struct vnode *, const char *, int,
struct ucred *, struct proc *, struct nfsnode **));
@ -138,7 +137,6 @@ static struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_defaultop },
{ &vop_access_desc, (vop_t *) nfs_access },
{ &vop_advlock_desc, (vop_t *) nfs_advlock },
{ &vop_bmap_desc, (vop_t *) nfs_bmap },
{ &vop_close_desc, (vop_t *) nfs_close },
{ &vop_create_desc, (vop_t *) nfs_create },
{ &vop_fsync_desc, (vop_t *) nfs_fsync },
@ -2681,39 +2679,6 @@ nfs_commit(vp, offset, cnt, cred, procp)
return (error);
}
/*
* Kludge City..
* - make nfs_bmap() essentially a no-op that does no translation
* - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
* (Maybe I could use the process's page mapping, but I was concerned that
* Kernel Write might not be enabled and also figured copyout() would do
* a lot more work than bcopy() and also it currently happens in the
* context of the swapper process (2).
*/
static int
nfs_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
daddr_t a_bn;
struct vnode **a_vpp;
daddr_t *a_bnp;
int *a_runp;
int *a_runb;
} */ *ap;
{
register struct vnode *vp = ap->a_vp;
if (ap->a_vpp != NULL)
*ap->a_vpp = vp;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize);
if (ap->a_runp != NULL)
*ap->a_runp = 0;
if (ap->a_runb != NULL)
*ap->a_runb = 0;
return (0);
}
/*
* Strategy routine.
* For async requests when nfsiod(s) are running, queue the request by

View File

@ -121,7 +121,6 @@ static int nfs_mkdir __P((struct vop_mkdir_args *));
static int nfs_rmdir __P((struct vop_rmdir_args *));
static int nfs_symlink __P((struct vop_symlink_args *));
static int nfs_readdir __P((struct vop_readdir_args *));
static int nfs_bmap __P((struct vop_bmap_args *));
static int nfs_strategy __P((struct vop_strategy_args *));
static int nfs_lookitup __P((struct vnode *, const char *, int,
struct ucred *, struct proc *, struct nfsnode **));
@ -138,7 +137,6 @@ static struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_defaultop },
{ &vop_access_desc, (vop_t *) nfs_access },
{ &vop_advlock_desc, (vop_t *) nfs_advlock },
{ &vop_bmap_desc, (vop_t *) nfs_bmap },
{ &vop_close_desc, (vop_t *) nfs_close },
{ &vop_create_desc, (vop_t *) nfs_create },
{ &vop_fsync_desc, (vop_t *) nfs_fsync },
@ -2681,39 +2679,6 @@ nfs_commit(vp, offset, cnt, cred, procp)
return (error);
}
/*
* Kludge City..
* - make nfs_bmap() essentially a no-op that does no translation
* - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
* (Maybe I could use the process's page mapping, but I was concerned that
* Kernel Write might not be enabled and also figured copyout() would do
* a lot more work than bcopy() and also it currently happens in the
* context of the swapper process (2).
*/
static int
nfs_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
daddr_t a_bn;
struct vnode **a_vpp;
daddr_t *a_bnp;
int *a_runp;
int *a_runb;
} */ *ap;
{
register struct vnode *vp = ap->a_vp;
if (ap->a_vpp != NULL)
*ap->a_vpp = vp;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize);
if (ap->a_runp != NULL)
*ap->a_runp = 0;
if (ap->a_runb != NULL)
*ap->a_runb = 0;
return (0);
}
/*
* Strategy routine.
* For async requests when nfsiod(s) are running, queue the request by

View File

@ -121,6 +121,7 @@ ntfs_putpages(ap)
}
#endif
#if defined(__NetBSD__)
/*
* This is a noop, simply returning what one has been given.
*/
@ -142,12 +143,9 @@ ntfs_bmap(ap)
*ap->a_bnp = ap->a_bn;
if (ap->a_runp != NULL)
*ap->a_runp = 0;
#if !defined(__NetBSD__)
if (ap->a_runb != NULL)
*ap->a_runb = 0;
#endif
return (0);
}
#endif
static int
ntfs_read(ap)
@ -871,7 +869,6 @@ struct vnodeopv_entry_desc ntfs_vnodeop_entries[] = {
{ &vop_readdir_desc, (vop_t *)ntfs_readdir },
{ &vop_fsync_desc, (vop_t *)ntfs_fsync },
{ &vop_bmap_desc, (vop_t *)ntfs_bmap },
{ &vop_getpages_desc, (vop_t *) ntfs_getpages },
{ &vop_putpages_desc, (vop_t *) ntfs_putpages },
{ &vop_strategy_desc, (vop_t *)ntfs_strategy },

View File

@ -78,7 +78,6 @@ static int nwfs_mkdir(struct vop_mkdir_args *);
static int nwfs_rmdir(struct vop_rmdir_args *);
static int nwfs_symlink(struct vop_symlink_args *);
static int nwfs_readdir(struct vop_readdir_args *);
static int nwfs_bmap(struct vop_bmap_args *);
static int nwfs_strategy(struct vop_strategy_args *);
static int nwfs_print(struct vop_print_args *);
static int nwfs_pathconf(struct vop_pathconf_args *ap);
@ -88,7 +87,6 @@ vop_t **nwfs_vnodeop_p;
static struct vnodeopv_entry_desc nwfs_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_defaultop },
{ &vop_access_desc, (vop_t *) nwfs_access },
{ &vop_bmap_desc, (vop_t *) nwfs_bmap },
{ &vop_open_desc, (vop_t *) nwfs_open },
{ &vop_close_desc, (vop_t *) nwfs_close },
{ &vop_create_desc, (vop_t *) nwfs_create },
@ -826,29 +824,6 @@ static int nwfs_strategy (ap)
return (error);
}
static int
nwfs_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
daddr_t a_bn;
struct vnode **a_vpp;
daddr_t *a_bnp;
int *a_runp;
int *a_runb;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
if (ap->a_vpp != NULL)
*ap->a_vpp = vp;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize);
if (ap->a_runp != NULL)
*ap->a_runp = 0;
if (ap->a_runb != NULL)
*ap->a_runb = 0;
return (0);
}
/*
* How to keep the brain busy ...

View File

@ -620,6 +620,7 @@ int vfs_object_create __P((struct vnode *vp, struct proc *p,
void vfs_timestamp __P((struct timespec *));
void vfs_write_resume __P((struct mount *mp));
void vfs_write_suspend __P((struct mount *mp));
int vop_stdbmap __P((struct vop_bmap_args *));
int vop_stdgetwritemount __P((struct vop_getwritemount_args *));
int vop_stdinactive __P((struct vop_inactive_args *));
int vop_stdislocked __P((struct vop_islocked_args *));

View File

@ -53,7 +53,6 @@
#include <ufs/mfs/mfs_extern.h>
static int mfs_badop __P((struct vop_generic_args *));
static int mfs_bmap __P((struct vop_bmap_args *));
static int mfs_close __P((struct vop_close_args *));
static int mfs_fsync __P((struct vop_fsync_args *));
static int mfs_freeblks __P((struct vop_freeblks_args *));
@ -69,7 +68,7 @@ static int mfs_getpages __P((struct vop_getpages_args *)); /* XXX */
vop_t **mfs_vnodeop_p;
static struct vnodeopv_entry_desc mfs_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) mfs_badop },
{ &vop_bmap_desc, (vop_t *) mfs_bmap },
{ &vop_bmap_desc, (vop_t *) vop_stdbmap },
{ &vop_close_desc, (vop_t *) mfs_close },
{ &vop_createvobject_desc, (vop_t *) vop_stdcreatevobject },
{ &vop_destroyvobject_desc, (vop_t *) vop_stddestroyvobject },
@ -291,29 +290,6 @@ mfs_doio(bp, mfsp)
bufdone(bp);
}
/*
* This is a noop, simply returning what one has been given.
*/
static int
mfs_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
ufs_daddr_t a_bn;
struct vnode **a_vpp;
ufs_daddr_t *a_bnp;
int *a_runp;
} */ *ap;
{
if (ap->a_vpp != NULL)
*ap->a_vpp = ap->a_vp;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn;
if (ap->a_runp != NULL)
*ap->a_runp = 0;
return (0);
}
/*
* Memory filesystem close routine
*/