gpu: ion: Use alloc_pages instead of vmalloc from the system heap
With this change the ion_system_heap will only use kernel address space when the memory is mapped into the kernel (rare case). Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com> [jstultz: modified patch to apply to staging directory] Signed-off-by: John Stultz <john.stultz@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
b892bf75b2
commit
b15934b62d
@ -27,74 +27,102 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
|
||||
unsigned long size, unsigned long align,
|
||||
unsigned long flags)
|
||||
{
|
||||
buffer->priv_virt = vmalloc_user(size);
|
||||
if (!buffer->priv_virt)
|
||||
struct sg_table *table;
|
||||
struct scatterlist *sg;
|
||||
int i, j;
|
||||
int npages = PAGE_ALIGN(size) / PAGE_SIZE;
|
||||
|
||||
table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
|
||||
if (!table)
|
||||
return -ENOMEM;
|
||||
i = sg_alloc_table(table, npages, GFP_KERNEL);
|
||||
if (i)
|
||||
goto err0;
|
||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
||||
struct page *page;
|
||||
page = alloc_page(GFP_KERNEL);
|
||||
if (!page)
|
||||
goto err1;
|
||||
sg_set_page(sg, page, PAGE_SIZE, 0);
|
||||
}
|
||||
buffer->priv_virt = table;
|
||||
return 0;
|
||||
err1:
|
||||
for_each_sg(table->sgl, sg, i, j)
|
||||
__free_page(sg_page(sg));
|
||||
sg_free_table(table);
|
||||
err0:
|
||||
kfree(table);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void ion_system_heap_free(struct ion_buffer *buffer)
|
||||
{
|
||||
vfree(buffer->priv_virt);
|
||||
}
|
||||
|
||||
struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
struct sg_table *table;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
|
||||
void *vaddr = buffer->priv_virt;
|
||||
int ret;
|
||||
struct scatterlist *sg;
|
||||
struct sg_table *table = buffer->priv_virt;
|
||||
|
||||
table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
|
||||
if (!table)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ret = sg_alloc_table(table, npages, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto err0;
|
||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
||||
struct page *page;
|
||||
page = vmalloc_to_page(vaddr);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
goto err1;
|
||||
}
|
||||
sg_set_page(sg, page, PAGE_SIZE, 0);
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
return table;
|
||||
err1:
|
||||
sg_free_table(table);
|
||||
err0:
|
||||
kfree(table);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
void ion_system_heap_unmap_dma(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
for_each_sg(table->sgl, sg, table->nents, i)
|
||||
__free_page(sg_page(sg));
|
||||
if (buffer->sg_table)
|
||||
sg_free_table(buffer->sg_table);
|
||||
kfree(buffer->sg_table);
|
||||
}
|
||||
|
||||
struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
return buffer->priv_virt;
|
||||
}
|
||||
|
||||
void ion_system_heap_unmap_dma(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void *ion_system_heap_map_kernel(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
return buffer->priv_virt;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
void *vaddr;
|
||||
struct sg_table *table = buffer->priv_virt;
|
||||
struct page **pages = kmalloc(sizeof(struct page *) * table->nents,
|
||||
GFP_KERNEL);
|
||||
|
||||
for_each_sg(table->sgl, sg, table->nents, i)
|
||||
pages[i] = sg_page(sg);
|
||||
vaddr = vmap(pages, table->nents, VM_MAP, PAGE_KERNEL);
|
||||
kfree(pages);
|
||||
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
void ion_system_heap_unmap_kernel(struct ion_heap *heap,
|
||||
struct ion_buffer *buffer)
|
||||
{
|
||||
vunmap(buffer->vaddr);
|
||||
}
|
||||
|
||||
int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
return remap_vmalloc_range(vma, buffer->priv_virt, vma->vm_pgoff);
|
||||
struct sg_table *table = buffer->priv_virt;
|
||||
unsigned long addr = vma->vm_start;
|
||||
unsigned long offset = vma->vm_pgoff;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
||||
if (offset) {
|
||||
offset--;
|
||||
continue;
|
||||
}
|
||||
vm_insert_page(vma, addr, sg_page(sg));
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ion_heap_ops vmalloc_ops = {
|
||||
|
Loading…
x
Reference in New Issue
Block a user