LinuxKPI: Import linux_page.c and some dependent code from drm-kmod

No functional changes intended

Reviewed by:	hselasky, manu, markj
MFC after:	2 weeks
Differential revision:	https://reviews.freebsd.org/D32167
This commit is contained in:
Vladimir Kondratyev 2021-09-29 23:15:37 +03:00
parent 88531adbfb
commit c072f6e856
5 changed files with 236 additions and 0 deletions

View File

@ -0,0 +1,118 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2010 Isilon Systems, Inc.
* Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
* Copyright (c) 2017 Mellanox Technologies, Ltd.
* Copyright (c) 2021 Vladimir Kondratyev <wulf@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _LINUX_HIGHMEM_H_
#define _LINUX_HIGHMEM_H_
#include <sys/types.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/sched.h>
#include <sys/sf_buf.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/pmap.h>
#include <linux/page.h>
#define PageHighMem(p) (0)
static inline vm_page_t
kmap_to_page(void *addr)
{
return (virt_to_page(addr));
}
static inline void *
kmap(vm_page_t page)
{
struct sf_buf *sf;
if (PMAP_HAS_DMAP) {
return ((void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(page)));
} else {
sched_pin();
sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE);
if (sf == NULL) {
sched_unpin();
return (NULL);
}
return ((void *)sf_buf_kva(sf));
}
}
static inline void *
kmap_atomic_prot(vm_page_t page, pgprot_t prot)
{
vm_memattr_t attr = pgprot2cachemode(prot);
if (attr != VM_MEMATTR_DEFAULT) {
vm_page_lock(page);
page->flags |= PG_FICTITIOUS;
vm_page_unlock(page);
pmap_page_set_memattr(page, attr);
}
return (kmap(page));
}
static inline void *
kmap_atomic(vm_page_t page)
{
return (kmap_atomic_prot(page, VM_PROT_ALL));
}
static inline void
kunmap(vm_page_t page)
{
struct sf_buf *sf;
if (!PMAP_HAS_DMAP) {
/* lookup SF buffer in list */
sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE);
/* double-free */
sf_buf_free(sf);
sf_buf_free(sf);
sched_unpin();
}
}
static inline void
kunmap_atomic(void *vaddr)
{
if (!PMAP_HAS_DMAP)
kunmap(virt_to_page(vaddr));
}
#endif /* _LINUX_HIGHMEM_H_ */

View File

@ -290,4 +290,8 @@ vmalloc_to_page(const void *addr)
extern int is_vmalloc_addr(const void *addr);
void si_meminfo(struct sysinfo *si);
#define unmap_mapping_range(...) lkpi_unmap_mapping_range(__VA_ARGS__)
void lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused,
loff_t const holelen, int even_cows __unused);
#endif /* _LINUX_MM_H_ */

View File

@ -41,6 +41,10 @@
#include <vm/vm_page.h>
#include <vm/pmap.h>
#if defined(__i386__) || defined(__amd64__)
#include <machine/md_var.h>
#endif
typedef unsigned long linux_pte_t;
typedef unsigned long linux_pmd_t;
typedef unsigned long linux_pgd_t;
@ -53,6 +57,8 @@ typedef unsigned long pgprot_t;
CTASSERT((VM_PROT_ALL & -LINUXKPI_PROT_VALID) == 0);
#define PAGE_KERNEL_IO 0x0000
static inline pgprot_t
cachemode2protval(vm_memattr_t attr)
{
@ -72,6 +78,7 @@ pgprot2cachemode(pgprot_t prot)
#define page_to_pfn(pp) (VM_PAGE_TO_PHYS(pp) >> PAGE_SHIFT)
#define pfn_to_page(pfn) (PHYS_TO_VM_PAGE((pfn) << PAGE_SHIFT))
#define nth_page(page,n) pfn_to_page(page_to_pfn(page) + (n))
#define page_to_phys(page) VM_PAGE_TO_PHYS(page)
#define clear_page(page) memset(page, 0, PAGE_SIZE)
#define pgprot_noncached(prot) \
@ -93,4 +100,19 @@ pgprot2cachemode(pgprot_t prot)
#undef trunc_page
#define trunc_page(x) ((uintptr_t)(x) & ~(PAGE_SIZE - 1))
#if defined(__i386__) || defined(__amd64__)
#undef clflushopt
static inline void
lkpi_clflushopt(unsigned long addr)
{
if (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT)
clflushopt(addr);
else if (cpu_feature & CPUID_CLFSH)
clflush(addr);
else
pmap_invalidate_cache();
}
#define clflushopt(x) lkpi_clflushopt((unsigned long)(x))
#endif
#endif /* _LINUX_PAGE_H_ */

View File

@ -4,6 +4,7 @@
* Copyright (c) 2010 Panasas, Inc.
* Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
* Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
* Copyright (c) 2016 Matthew Macy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -534,4 +535,61 @@ sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
return (copied);
}
static inline size_t
sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
const void *buf, size_t buflen)
{
return (sg_pcopy_from_buffer(sgl, nents, buf, buflen, 0));
}
static inline size_t
sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, off_t offset)
{
struct sg_page_iter iter;
struct scatterlist *sg;
struct page *page;
struct sf_buf *sf;
char *vaddr;
size_t total = 0;
size_t len;
if (!PMAP_HAS_DMAP)
sched_pin();
for_each_sg_page(sgl, &iter, nents, 0) {
sg = iter.sg;
if (offset >= sg->length) {
offset -= sg->length;
continue;
}
len = ulmin(buflen, sg->length - offset);
if (len == 0)
break;
page = sg_page_iter_page(&iter);
if (!PMAP_HAS_DMAP) {
sf = sf_buf_alloc(page, SFB_CPUPRIVATE | SFB_NOWAIT);
if (sf == NULL)
break;
vaddr = (char *)sf_buf_kva(sf);
} else
vaddr = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(page));
memcpy(buf, vaddr + sg->offset + offset, len);
if (!PMAP_HAS_DMAP)
sf_buf_free(sf);
/* start at beginning of next page */
offset = 0;
/* advance buffer */
buf = (char *)buf + len;
buflen -= len;
total += len;
}
if (!PMAP_HAS_DMAP)
sched_unpin();
return (total);
}
#endif /* _LINUX_SCATTERLIST_H_ */

View File

@ -334,3 +334,37 @@ lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
return (VM_FAULT_NOPAGE);
}
/*
* Although FreeBSD version of unmap_mapping_range has semantics and types of
* parameters compatible with Linux version, the values passed in are different
* @obj should match to vm_private_data field of vm_area_struct returned by
* mmap file operation handler, see linux_file_mmap_single() sources
* @holelen should match to size of area to be munmapped.
*/
void
lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused,
loff_t const holelen, int even_cows __unused)
{
vm_object_t devobj;
vm_page_t page;
int i, page_count;
devobj = cdev_pager_lookup(obj);
if (devobj != NULL) {
page_count = OFF_TO_IDX(holelen);
VM_OBJECT_WLOCK(devobj);
retry:
for (i = 0; i < page_count; i++) {
page = vm_page_lookup(devobj, i);
if (page == NULL)
continue;
if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL))
goto retry;
cdev_pager_free_page(devobj, page);
}
VM_OBJECT_WUNLOCK(devobj);
vm_object_deallocate(devobj);
}
}