471 lines
12 KiB
C
471 lines
12 KiB
C
|
/***********************license start***************
|
||
|
* Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
|
||
|
* reserved.
|
||
|
*
|
||
|
*
|
||
|
* Redistribution and use in source and binary forms, with or without
|
||
|
* modification, are permitted provided that the following conditions are
|
||
|
* met:
|
||
|
*
|
||
|
* * Redistributions of source code must retain the above copyright
|
||
|
* notice, this list of conditions and the following disclaimer.
|
||
|
*
|
||
|
* * Redistributions in binary form must reproduce the above
|
||
|
* copyright notice, this list of conditions and the following
|
||
|
* disclaimer in the documentation and/or other materials provided
|
||
|
* with the distribution.
|
||
|
|
||
|
* * Neither the name of Cavium Networks nor the names of
|
||
|
* its contributors may be used to endorse or promote products
|
||
|
* derived from this software without specific prior written
|
||
|
* permission.
|
||
|
|
||
|
* This Software, including technical data, may be subject to U.S. export control
|
||
|
* laws, including the U.S. Export Administration Act and its associated
|
||
|
* regulations, and may be subject to export or import regulations in other
|
||
|
* countries.
|
||
|
|
||
|
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
|
||
|
* AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
|
||
|
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
|
||
|
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
|
||
|
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
|
||
|
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
|
||
|
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
|
||
|
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
|
||
|
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
|
||
|
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
|
||
|
***********************license end**************************************/
|
||
|
|
||
|
|
||
|
|
||
|
|
||
|
/**
|
||
|
* @file
|
||
|
*
|
||
|
* cvmx-tlb supplies per core TLB access functions for simple executive
|
||
|
* applications.
|
||
|
*
|
||
|
* <hr>$Revision: 41586 $<hr>
|
||
|
*/
|
||
|
#include "cvmx.h"
|
||
|
#include "cvmx-tlb.h"
|
||
|
|
||
|
//#define DEBUG
|
||
|
|
||
|
/**
|
||
|
* @INTERNAL
|
||
|
* Convert page mask to string
|
||
|
*/
|
||
|
static inline const char* __mask_to_str(uint64_t mask)
|
||
|
{
|
||
|
/* Most OCTEON processor does not support 1K page sizes */
|
||
|
uint64_t non_1k_mask = mask + CVMX_TLB_PAGEMASK_4K;
|
||
|
|
||
|
switch (non_1k_mask) {
|
||
|
case CVMX_TLB_PAGEMASK_4K: return "4kb";
|
||
|
case CVMX_TLB_PAGEMASK_16K: return "16kb";
|
||
|
case CVMX_TLB_PAGEMASK_64K: return "64kb";
|
||
|
case CVMX_TLB_PAGEMASK_256K: return "256kb";
|
||
|
case CVMX_TLB_PAGEMASK_1M: return "1Mb";
|
||
|
case CVMX_TLB_PAGEMASK_4M: return "4Mb";
|
||
|
case CVMX_TLB_PAGEMASK_16M: return "16Mb";
|
||
|
case CVMX_TLB_PAGEMASK_64M: return "64Mb";
|
||
|
case CVMX_TLB_PAGEMASK_256M: return "256Mb";
|
||
|
}
|
||
|
|
||
|
return "";
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* @INTERNAL
|
||
|
* issue the tlb read instruction
|
||
|
*/
|
||
|
static inline void __tlb_read(void){
|
||
|
CVMX_EHB;
|
||
|
CVMX_TLBR;
|
||
|
CVMX_EHB;
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* @INTERNAL
|
||
|
* issue the tlb write instruction
|
||
|
*/
|
||
|
static inline void __tlb_write(void){
|
||
|
|
||
|
CVMX_EHB;
|
||
|
CVMX_TLBWI;
|
||
|
CVMX_EHB;
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* @INTERNAL
|
||
|
* issue the tlb read instruction
|
||
|
*/
|
||
|
static inline int __tlb_probe(uint64_t hi){
|
||
|
int index;
|
||
|
CVMX_EHB;
|
||
|
CVMX_MT_ENTRY_HIGH(hi);
|
||
|
CVMX_TLBP;
|
||
|
CVMX_EHB;
|
||
|
|
||
|
CVMX_MF_TLB_INDEX(index);
|
||
|
|
||
|
if (index < 0) index = -1;
|
||
|
|
||
|
return index;
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* @INTERNAL
|
||
|
* read a single tlb entry
|
||
|
*
|
||
|
* return 0: tlb entry is read
|
||
|
* -1: index is invalid
|
||
|
*/
|
||
|
static inline int __tlb_read_index(uint32_t tlbi){
|
||
|
|
||
|
if (tlbi >= cvmx_tlb_size_limit()) {
|
||
|
return -1;
|
||
|
}
|
||
|
|
||
|
CVMX_MT_TLB_INDEX(tlbi);
|
||
|
__tlb_read();
|
||
|
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* @INTERNAL
|
||
|
* write a single tlb entry
|
||
|
*
|
||
|
* return 0: tlb entry is read
|
||
|
* -1: index is invalid
|
||
|
*/
|
||
|
static inline int __tlb_write_index(uint32_t tlbi,
|
||
|
uint64_t hi, uint64_t lo0,
|
||
|
uint64_t lo1, uint64_t pagemask)
|
||
|
{
|
||
|
|
||
|
if (tlbi >= cvmx_tlb_size_limit()) {
|
||
|
return -1;
|
||
|
}
|
||
|
|
||
|
#ifdef DEBUG
|
||
|
cvmx_dprintf("cvmx-tlb-dbg: "
|
||
|
"write TLB %d: hi %lx, lo0 %lx, lo1 %lx, pagemask %lx \n",
|
||
|
tlbi, hi, lo0, lo1, pagemask);
|
||
|
#endif
|
||
|
|
||
|
CVMX_MT_TLB_INDEX(tlbi);
|
||
|
CVMX_MT_ENTRY_HIGH(hi);
|
||
|
CVMX_MT_ENTRY_LO_0(lo0);
|
||
|
CVMX_MT_ENTRY_LO_1(lo1);
|
||
|
CVMX_MT_PAGEMASK(pagemask);
|
||
|
__tlb_write();
|
||
|
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* @INTERNAL
|
||
|
* Determine if a TLB entry is free to use
|
||
|
*/
|
||
|
static inline int __tlb_entry_is_free(uint32_t tlbi) {
|
||
|
int ret = 0;
|
||
|
uint64_t lo0 = 0, lo1 = 0;
|
||
|
|
||
|
if (tlbi < cvmx_tlb_size_limit()) {
|
||
|
|
||
|
__tlb_read_index(tlbi);
|
||
|
|
||
|
/* Unused entries have neither even nor odd page mapped */
|
||
|
CVMX_MF_ENTRY_LO_0(lo0);
|
||
|
CVMX_MF_ENTRY_LO_1(lo1);
|
||
|
|
||
|
if ( !(lo0 & TLB_VALID) && !(lo1 & TLB_VALID)) {
|
||
|
ret = 1;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
return ret;
|
||
|
}
|
||
|
|
||
|
|
||
|
/**
|
||
|
* @INTERNAL
|
||
|
* dump a single tlb entry
|
||
|
*/
|
||
|
static inline void __tlb_dump_index(uint32_t tlbi)
|
||
|
{
|
||
|
if (tlbi < cvmx_tlb_size_limit()) {
|
||
|
|
||
|
if (__tlb_entry_is_free(tlbi)) {
|
||
|
#ifdef DEBUG
|
||
|
cvmx_dprintf("Index: %3d Free \n", tlbi);
|
||
|
#endif
|
||
|
} else {
|
||
|
uint64_t lo0, lo1, pgmask;
|
||
|
uint32_t hi, c0, c1;
|
||
|
#ifdef DEBUG
|
||
|
int width = 13;
|
||
|
#endif
|
||
|
|
||
|
__tlb_read_index(tlbi);
|
||
|
|
||
|
CVMX_MF_ENTRY_HIGH(hi);
|
||
|
CVMX_MF_ENTRY_LO_0(lo0);
|
||
|
CVMX_MF_ENTRY_LO_1(lo1);
|
||
|
CVMX_MF_PAGEMASK(pgmask);
|
||
|
|
||
|
|
||
|
#ifdef DEBUG
|
||
|
cvmx_dprintf("Index: %3d pgmask=%s ", tlbi, __mask_to_str(pgmask));
|
||
|
#endif
|
||
|
|
||
|
c0 = ( lo0 >> 3 ) & 7;
|
||
|
c1 = ( lo1 >> 3 ) & 7;
|
||
|
|
||
|
#ifdef DEBUG
|
||
|
cvmx_dprintf("va=%0*lx asid=%02x\n",
|
||
|
width, (hi & ~0x1fffUL), hi & 0xff);
|
||
|
|
||
|
cvmx_dprintf("\t[pa=%0*lx c=%d d=%d v=%d g=%d] ",
|
||
|
width,
|
||
|
(lo0 << 6) & PAGE_MASK, c0,
|
||
|
(lo0 & 4) ? 1 : 0,
|
||
|
(lo0 & 2) ? 1 : 0,
|
||
|
(lo0 & 1) ? 1 : 0);
|
||
|
cvmx_dprintf("[pa=%0*lx c=%d d=%d v=%d g=%d]\n",
|
||
|
width,
|
||
|
(lo1 << 6) & PAGE_MASK, c1,
|
||
|
(lo1 & 4) ? 1 : 0,
|
||
|
(lo1 & 2) ? 1 : 0,
|
||
|
(lo1 & 1) ? 1 : 0);
|
||
|
|
||
|
#endif
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* @INTERNAL
|
||
|
* dump a single tlb entry
|
||
|
*/
|
||
|
static inline uint32_t __tlb_wired_index() {
|
||
|
uint32_t tlbi;
|
||
|
|
||
|
CVMX_MF_TLB_WIRED(tlbi);
|
||
|
return tlbi;
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* Set up a wired entry. This function is designed to be used by Simple
|
||
|
* Executive to set up its virtual to physical address mapping at start up
|
||
|
* time. After the mapping is set up, the remaining unused TLB entries can
|
||
|
* be use for run time shared memory mapping.
|
||
|
*
|
||
|
* Calling this function causes the C0 wired index register to increase.
|
||
|
* Wired index register points to the separation between fixed TLB mapping
|
||
|
* and run time shared memory mapping.
|
||
|
*
|
||
|
* @param hi Entry Hi
|
||
|
* @param lo0 Entry Low0
|
||
|
* @param lo1 Entry Low1
|
||
|
* @param pagemask Pagemask
|
||
|
*
|
||
|
* @return 0: the entry is added
|
||
|
* @return -1: out of TLB entry
|
||
|
*/
|
||
|
int cvmx_tlb_add_wired_entry( uint64_t hi, uint64_t lo0,
|
||
|
uint64_t lo1, uint64_t pagemask)
|
||
|
{
|
||
|
uint64_t index;
|
||
|
int ret = -1;
|
||
|
|
||
|
index = __tlb_wired_index();
|
||
|
|
||
|
/* Check to make sure if the index is free to use */
|
||
|
if (index < cvmx_tlb_size_limit() && __tlb_entry_is_free(index) ) {
|
||
|
/* increase the wired index by 1*/
|
||
|
__tlb_write_index(index, hi, lo0, lo1, pagemask);
|
||
|
CVMX_MT_TLB_WIRED(index + 1);
|
||
|
ret = 0;
|
||
|
}
|
||
|
|
||
|
return ret;
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* Find a free entry that can be used for share memory mapping.
|
||
|
*
|
||
|
* @return -1: no free entry found
|
||
|
* @return : a free entry
|
||
|
*/
|
||
|
int cvmx_tlb_allocate_runtime_entry(void)
|
||
|
{
|
||
|
uint32_t i, ret = -1;
|
||
|
|
||
|
for (i = __tlb_wired_index(); i< cvmx_tlb_size_limit(); i++) {
|
||
|
|
||
|
/* Check to make sure the index is free to use */
|
||
|
if (__tlb_entry_is_free(i)) {
|
||
|
/* Found and return */
|
||
|
ret = i;
|
||
|
break;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
return ret;
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* Invalidate the TLB entry. Remove previous mapping if one was set up
|
||
|
*/
|
||
|
void cvmx_tlb_free_runtime_entry(uint32_t tlbi)
|
||
|
{
|
||
|
/* Invalidate an unwired TLB entry */
|
||
|
if ((tlbi < cvmx_tlb_size_limit()) && (tlbi >= __tlb_wired_index())) {
|
||
|
__tlb_write_index(tlbi, 0xffffffff80000000ULL, 0, 0, 0);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
|
||
|
/**
|
||
|
* Program a single TLB entry to enable the provided vaddr to paddr mapping.
|
||
|
*
|
||
|
* @param index Index of the TLB entry
|
||
|
* @param vaddr The virtual address for this mapping
|
||
|
* @param paddr The physical address for this mapping
|
||
|
* @param size Size of the mapping
|
||
|
* @param tlb_flags Entry mapping flags
|
||
|
*/
|
||
|
|
||
|
void cvmx_tlb_write_entry(int index, uint64_t vaddr, uint64_t paddr,
|
||
|
uint64_t size, uint64_t tlb_flags) {
|
||
|
uint64_t lo0, lo1, hi, pagemask;
|
||
|
|
||
|
if ( __is_power_of_two(size) ) {
|
||
|
if ( (__log2(size) & 1 ) == 0) {
|
||
|
/* size is not power of 4, we only need to map
|
||
|
one page, figure out even or odd page to map */
|
||
|
if ((vaddr >> __log2(size) & 1)) {
|
||
|
lo0 = 0;
|
||
|
lo1 = ((paddr >> 12) << 6) | tlb_flags;
|
||
|
hi = ((vaddr - size) >> 12) << 12;
|
||
|
}else {
|
||
|
lo0 = ((paddr >> 12) << 6) | tlb_flags;
|
||
|
lo1 = 0;
|
||
|
hi = ((vaddr) >> 12) << 12;
|
||
|
}
|
||
|
pagemask = (size - 1) & (~1<<11);
|
||
|
}else {
|
||
|
lo0 = ((paddr >> 12)<< 6) | tlb_flags;
|
||
|
lo1 = (((paddr + size /2) >> 12) << 6) | tlb_flags;
|
||
|
hi = ((vaddr) >> 12) << 12;
|
||
|
pagemask = ((size/2) -1) & (~1<<11);
|
||
|
}
|
||
|
|
||
|
|
||
|
__tlb_write_index(index, hi, lo0, lo1, pagemask);
|
||
|
|
||
|
}
|
||
|
}
|
||
|
|
||
|
|
||
|
/**
|
||
|
* Program a single TLB entry to enable the provided vaddr to paddr mapping.
|
||
|
* This version adds a wired entry that should not be changed at run time
|
||
|
*
|
||
|
* @param vaddr The virtual address for this mapping
|
||
|
* @param paddr The physical address for this mapping
|
||
|
* @param size Size of the mapping
|
||
|
* @param tlb_flags Entry mapping flags
|
||
|
* @return -1: TLB out of entries
|
||
|
* 0: fixed entry added
|
||
|
*/
|
||
|
int cvmx_tlb_add_fixed_entry( uint64_t vaddr, uint64_t paddr, uint64_t size, uint64_t tlb_flags) {
|
||
|
|
||
|
uint64_t index;
|
||
|
int ret = 0;
|
||
|
|
||
|
CVMX_MF_TLB_WIRED(index);
|
||
|
|
||
|
/* Check to make sure if the index is free to use */
|
||
|
if (index < cvmx_tlb_size_limit() && __tlb_entry_is_free(index) ) {
|
||
|
cvmx_tlb_write_entry(index, vaddr, paddr, size, tlb_flags);
|
||
|
|
||
|
if (!__tlb_entry_is_free(index)) {
|
||
|
/* Bump up the wired register*/
|
||
|
CVMX_MT_TLB_WIRED(index + 1);
|
||
|
ret = 1;
|
||
|
}
|
||
|
}
|
||
|
return ret;
|
||
|
}
|
||
|
|
||
|
|
||
|
/**
|
||
|
* Program a single TLB entry to enable the provided vaddr to paddr mapping.
|
||
|
* This version writes a runtime entry. It will check the index to make sure
|
||
|
* not to overwrite any fixed entries.
|
||
|
*
|
||
|
* @param index Index of the TLB entry
|
||
|
* @param vaddr The virtual address for this mapping
|
||
|
* @param paddr The physical address for this mapping
|
||
|
* @param size Size of the mapping
|
||
|
* @param tlb_flags Entry mapping flags
|
||
|
*/
|
||
|
void cvmx_tlb_write_runtime_entry(int index, uint64_t vaddr, uint64_t paddr,
|
||
|
uint64_t size, uint64_t tlb_flags)
|
||
|
{
|
||
|
|
||
|
int wired_index;
|
||
|
CVMX_MF_TLB_WIRED(wired_index);
|
||
|
|
||
|
if (index >= wired_index) {
|
||
|
cvmx_tlb_write_entry(index, vaddr, paddr, size, tlb_flags);
|
||
|
}
|
||
|
|
||
|
}
|
||
|
|
||
|
|
||
|
|
||
|
/**
|
||
|
* Find the TLB index of a given virtual address
|
||
|
*
|
||
|
* @param vaddr The virtual address to look up
|
||
|
* @return -1 not TLB mapped
|
||
|
* >=0 TLB TLB index
|
||
|
*/
|
||
|
int cvmx_tlb_lookup(uint64_t vaddr) {
|
||
|
uint64_t hi= (vaddr >> 12 ) << 12; /* We always use ASID 0 */
|
||
|
|
||
|
return __tlb_probe(hi);
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* Debug routine to show all shared memory mapping
|
||
|
*/
|
||
|
void cvmx_tlb_dump_shared_mapping(void) {
|
||
|
uint32_t tlbi;
|
||
|
|
||
|
for ( tlbi = __tlb_wired_index(); tlbi<cvmx_tlb_size_limit(); tlbi++ ) {
|
||
|
__tlb_dump_index(tlbi);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* Debug routine to show all TLB entries of this core
|
||
|
*
|
||
|
*/
|
||
|
void cvmx_tlb_dump_all(void) {
|
||
|
|
||
|
uint32_t tlbi;
|
||
|
|
||
|
for (tlbi = 0; tlbi<= cvmx_tlb_size_limit(); tlbi++ ) {
|
||
|
__tlb_dump_index(tlbi);
|
||
|
}
|
||
|
}
|
||
|
|