Add support for shared memory functions to the LinuxKPI.
Obtained from: kmacy @ MFC after: 1 week Sponsored by: Mellanox Technologies
This commit is contained in:
parent
2d347b2ef8
commit
c05238a681
@ -2,7 +2,7 @@
|
||||
* Copyright (c) 2010 Isilon Systems, Inc.
|
||||
* Copyright (c) 2010 iX Systems, Inc.
|
||||
* Copyright (c) 2010 Panasas, Inc.
|
||||
* Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
|
||||
* Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -288,4 +288,25 @@ noop_llseek(struct linux_file *file, loff_t offset, int whence)
|
||||
return (file->_file->f_offset);
|
||||
}
|
||||
|
||||
/* Shared memory support */
|
||||
unsigned long linux_invalidate_mapping_pages(vm_object_t, pgoff_t, pgoff_t);
|
||||
struct page *linux_shmem_read_mapping_page_gfp(vm_object_t, int, gfp_t);
|
||||
struct linux_file *linux_shmem_file_setup(const char *, loff_t, unsigned long);
|
||||
void linux_shmem_truncate_range(vm_object_t, loff_t, loff_t);
|
||||
|
||||
#define invalidate_mapping_pages(...) \
|
||||
linux_invalidate_mapping_pages(__VA_ARGS__)
|
||||
|
||||
#define shmem_read_mapping_page(...) \
|
||||
linux_shmem_read_mapping_page_gfp(__VA_ARGS__, 0)
|
||||
|
||||
#define shmem_read_mapping_page_gfp(...) \
|
||||
linux_shmem_read_mapping_page_gfp(__VA_ARGS__)
|
||||
|
||||
#define shmem_file_setup(...) \
|
||||
linux_shmem_file_setup(__VA_ARGS__)
|
||||
|
||||
#define shmem_truncate_range(...) \
|
||||
linux_shmem_truncate_range(__VA_ARGS__)
|
||||
|
||||
#endif /* _LINUX_FS_H_ */
|
||||
|
@ -61,6 +61,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#if defined(__amd64__) || defined(__aarch64__) || defined(__riscv)
|
||||
#define LINUXKPI_HAVE_DMAP
|
||||
@ -289,3 +290,107 @@ is_vmalloc_addr(const void *addr)
|
||||
{
|
||||
return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL);
|
||||
}
|
||||
|
||||
struct page *
|
||||
linux_shmem_read_mapping_page_gfp(vm_object_t obj, int pindex, gfp_t gfp)
|
||||
{
|
||||
vm_page_t page;
|
||||
int rv;
|
||||
|
||||
if ((gfp & GFP_NOWAIT) != 0)
|
||||
panic("GFP_NOWAIT is unimplemented");
|
||||
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
page = vm_page_grab(obj, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
|
||||
VM_ALLOC_WIRED);
|
||||
if (page->valid != VM_PAGE_BITS_ALL) {
|
||||
vm_page_xbusy(page);
|
||||
if (vm_pager_has_page(obj, pindex, NULL, NULL)) {
|
||||
rv = vm_pager_get_pages(obj, &page, 1, NULL, NULL);
|
||||
if (rv != VM_PAGER_OK) {
|
||||
vm_page_lock(page);
|
||||
vm_page_unwire(page, PQ_NONE);
|
||||
vm_page_free(page);
|
||||
vm_page_unlock(page);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
return (ERR_PTR(-EINVAL));
|
||||
}
|
||||
MPASS(page->valid == VM_PAGE_BITS_ALL);
|
||||
} else {
|
||||
pmap_zero_page(page);
|
||||
page->valid = VM_PAGE_BITS_ALL;
|
||||
page->dirty = 0;
|
||||
}
|
||||
vm_page_xunbusy(page);
|
||||
}
|
||||
vm_page_lock(page);
|
||||
vm_page_hold(page);
|
||||
vm_page_unlock(page);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
return (page);
|
||||
}
|
||||
|
||||
struct linux_file *
|
||||
linux_shmem_file_setup(const char *name, loff_t size, unsigned long flags)
|
||||
{
|
||||
struct fileobj {
|
||||
struct linux_file file __aligned(sizeof(void *));
|
||||
struct vnode vnode __aligned(sizeof(void *));
|
||||
};
|
||||
struct fileobj *fileobj;
|
||||
struct linux_file *filp;
|
||||
struct vnode *vp;
|
||||
int error;
|
||||
|
||||
fileobj = kzalloc(sizeof(*fileobj), GFP_KERNEL);
|
||||
if (fileobj == NULL) {
|
||||
error = -ENOMEM;
|
||||
goto err_0;
|
||||
}
|
||||
filp = &fileobj->file;
|
||||
vp = &fileobj->vnode;
|
||||
|
||||
filp->f_count = 1;
|
||||
filp->f_vnode = vp;
|
||||
filp->f_shmem = vm_pager_allocate(OBJT_DEFAULT, NULL, size,
|
||||
VM_PROT_READ | VM_PROT_WRITE, 0, curthread->td_ucred);
|
||||
if (filp->f_shmem == NULL) {
|
||||
error = -ENOMEM;
|
||||
goto err_1;
|
||||
}
|
||||
return (filp);
|
||||
err_1:
|
||||
kfree(filp);
|
||||
err_0:
|
||||
return (ERR_PTR(error));
|
||||
}
|
||||
|
||||
static vm_ooffset_t
|
||||
linux_invalidate_mapping_pages_sub(vm_object_t obj, vm_pindex_t start,
|
||||
vm_pindex_t end, int flags)
|
||||
{
|
||||
int start_count, end_count;
|
||||
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
start_count = obj->resident_page_count;
|
||||
vm_object_page_remove(obj, start, end, flags);
|
||||
end_count = obj->resident_page_count;
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
return (start_count - end_count);
|
||||
}
|
||||
|
||||
unsigned long
|
||||
linux_invalidate_mapping_pages(vm_object_t obj, pgoff_t start, pgoff_t end)
|
||||
{
|
||||
|
||||
return (linux_invalidate_mapping_pages_sub(obj, start, end, OBJPR_CLEANONLY));
|
||||
}
|
||||
|
||||
void
|
||||
linux_shmem_truncate_range(vm_object_t obj, loff_t lstart, loff_t lend)
|
||||
{
|
||||
vm_pindex_t start = OFF_TO_IDX(lstart + PAGE_SIZE - 1);
|
||||
vm_pindex_t end = OFF_TO_IDX(lend + 1);
|
||||
|
||||
(void) linux_invalidate_mapping_pages_sub(obj, start, end, 0);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user