Enable ib_dev.mmap function

Removed the ifdef linux from this function.
Added stub function for contiguous pages to avoid compilation
errors.

Submitted by:	Orit Moskovich (oritm mellanox.com)
Approved by:	re
This commit is contained in:
Alfred Perlstein 2013-10-01 15:42:38 +00:00
parent 92c6196caa
commit e18c176d9d
3 changed files with 73 additions and 2 deletions

View File

@ -530,3 +530,46 @@ int ib_umem_page_count(struct ib_umem *umem)
return n;
}
EXPORT_SYMBOL(ib_umem_page_count);
/**********************************************/
/*
* Stub functions for contiguous pages -
* We currently do not support this feature
*/
/**********************************************/
/**
* ib_cmem_release_contiguous_pages - release memory allocated by
* ib_cmem_alloc_contiguous_pages.
* @cmem: cmem struct to release
*/
void ib_cmem_release_contiguous_pages(struct ib_cmem *cmem)
{
}
EXPORT_SYMBOL(ib_cmem_release_contiguous_pages);
/**
* * ib_cmem_alloc_contiguous_pages - allocate contiguous pages
* * @context: userspace context to allocate memory for
* * @total_size: total required size for that allocation.
* * @page_size_order: order of one contiguous page.
* */
struct ib_cmem *ib_cmem_alloc_contiguous_pages(struct ib_ucontext *context,
unsigned long total_size,
unsigned long page_size_order)
{
return NULL;
}
EXPORT_SYMBOL(ib_cmem_alloc_contiguous_pages);
/**
* * ib_cmem_map_contiguous_pages_to_vma - map contiguous pages into VMA
* * @ib_cmem: cmem structure returned by ib_cmem_alloc_contiguous_pages
* * @vma: VMA to inject pages into.
* */
int ib_cmem_map_contiguous_pages_to_vma(struct ib_cmem *ib_cmem,
struct vm_area_struct *vma)
{
return 0;
}
EXPORT_SYMBOL(ib_cmem_map_contiguous_pages_to_vma);

View File

@ -726,6 +726,7 @@ static unsigned long mlx4_ib_get_unmapped_area(struct file *file,
addr = ALIGN(vma->vm_end, 1 << page_size_order);
}
}
#endif
static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
@ -780,7 +781,6 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
return 0;
}
#endif
static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
struct ib_ucontext *context,
@ -1984,8 +1984,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
#ifdef __linux__
ibdev->ib_dev.mmap = mlx4_ib_mmap;
#ifdef __linux__
ibdev->ib_dev.get_unmapped_area = mlx4_ib_get_unmapped_area;
#endif
ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;

View File

@ -57,6 +57,24 @@ struct ib_umem {
unsigned long diff;
};
struct ib_cmem {
struct ib_ucontext *context;
size_t length;
/* Link list of contiguous blocks being part of that cmem */
struct list_head ib_cmem_block;
/* Order of cmem block, 2^ block_order will equal number
of physical pages per block
*/
unsigned long block_order;
/* Refernce counter for that memory area
- When value became 0 pages will be returned to the kernel.
*/
struct kref refcount;
};
struct ib_umem_chunk {
struct list_head list;
int nents;
@ -70,4 +88,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
void ib_umem_release(struct ib_umem *umem);
int ib_umem_page_count(struct ib_umem *umem);
int ib_cmem_map_contiguous_pages_to_vma(struct ib_cmem *ib_cmem,
struct vm_area_struct *vma);
struct ib_cmem *ib_cmem_alloc_contiguous_pages(struct ib_ucontext *context,
unsigned long total_size,
unsigned long page_size_order);
void ib_cmem_release_contiguous_pages(struct ib_cmem *cmem);
int ib_umem_map_to_vma(struct ib_umem *umem,
struct vm_area_struct *vma);
#endif /* IB_UMEM_H */