Currently the Linux character device mmap handling only supports mmap

operations that map a single page that has an associated vm_page_t.
This does not permit mapping larger regions (such as a PCI memory
BAR) and it does not permit mapping addresses beyond the top of RAM
(such as a 64-bit BAR located above the top of RAM).

Instead of using a single OBJT_DEVICE object and passing the physaddr via
the offset as a hack, create a new sglist and OBJT_SG object for each
mmap request. The requested memory attribute is applied to the object
thus affecting all pages mapped by the request.

Reviewed by:	hselasky, np
MFC after:	1 week
Sponsored by:	Chelsio
Differential Revision:	https://reviews.freebsd.org/D3386
This commit is contained in:
John Baldwin 2015-09-03 18:27:39 +00:00
parent e6aa141248
commit 188458ea7c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=287440
2 changed files with 22 additions and 25 deletions

View File

@ -33,6 +33,7 @@
#include <sys/kernel.h>
#include <sys/sysctl.h>
#include <sys/proc.h>
#include <sys/sglist.h>
#include <sys/sleepqueue.h>
#include <sys/lock.h>
#include <sys/mutex.h>
@ -415,16 +416,6 @@ linux_dev_poll(struct cdev *dev, int events, struct thread *td)
return (revents);
}
static int
linux_dev_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
int nprot, vm_memattr_t *memattr)
{
/* XXX memattr not honored. */
*paddr = offset;
return (0);
}
static int
linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
vm_size_t size, struct vm_object **object, int nprot)
@ -433,36 +424,41 @@ linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
struct linux_file *filp;
struct file *file;
struct vm_area_struct vma;
vm_paddr_t paddr;
vm_page_t m;
int error;
file = curthread->td_fpop;
ldev = dev->si_drv1;
if (ldev == NULL)
return (ENODEV);
if (size != PAGE_SIZE)
return (EINVAL);
if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
return (error);
filp->f_flags = file->f_flag;
vma.vm_start = 0;
vma.vm_end = PAGE_SIZE;
vma.vm_end = size;
vma.vm_pgoff = *offset / PAGE_SIZE;
vma.vm_pfn = 0;
vma.vm_page_prot = 0;
if (filp->f_op->mmap) {
error = -filp->f_op->mmap(filp, &vma);
if (error == 0) {
paddr = (vm_paddr_t)vma.vm_pfn << PAGE_SHIFT;
*offset = paddr;
m = PHYS_TO_VM_PAGE(paddr);
*object = vm_pager_allocate(OBJT_DEVICE, dev,
PAGE_SIZE, nprot, *offset, curthread->td_ucred);
if (*object == NULL)
return (EINVAL);
if (vma.vm_page_prot != VM_MEMATTR_DEFAULT)
pmap_page_set_memattr(m, vma.vm_page_prot);
struct sglist *sg;
sg = sglist_alloc(1, M_WAITOK);
sglist_append_phys(sg,
(vm_paddr_t)vma.vm_pfn << PAGE_SHIFT, vma.vm_len);
*object = vm_pager_allocate(OBJT_SG, sg, vma.vm_len,
nprot, 0, curthread->td_ucred);
if (*object == NULL) {
sglist_free(sg);
return (EINVAL);
}
*offset = 0;
if (vma.vm_page_prot != VM_MEMATTR_DEFAULT) {
VM_OBJECT_WLOCK(*object);
vm_object_set_memattr(*object,
vma.vm_page_prot);
VM_OBJECT_WUNLOCK(*object);
}
}
} else
error = ENODEV;
@ -479,7 +475,6 @@ struct cdevsw linuxcdevsw = {
.d_write = linux_dev_write,
.d_ioctl = linux_dev_ioctl,
.d_mmap_single = linux_dev_mmap_single,
.d_mmap = linux_dev_mmap,
.d_poll = linux_dev_poll,
};

View File

@ -40,6 +40,7 @@ struct vm_area_struct {
vm_offset_t vm_end;
vm_offset_t vm_pgoff;
vm_paddr_t vm_pfn; /* PFN For mmap. */
vm_size_t vm_len; /* length for mmap. */
vm_memattr_t vm_page_prot;
};
@ -78,6 +79,7 @@ io_remap_pfn_range(struct vm_area_struct *vma,
{
vma->vm_page_prot = prot;
vma->vm_pfn = pfn;
vma->vm_len = size;
return (0);
}