swiotlb: Refactor swiotlb init functions
Add a new function, swiotlb_init_io_tlb_mem, for the io_tlb_mem struct initialization to make the code reusable. Signed-off-by: Claire Chang <tientzu@chromium.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Tested-by: Stefano Stabellini <sstabellini@kernel.org> Acked-by: Stefano Stabellini <sstabellini@kernel.org> Tested-by: Will Deacon <will@kernel.org> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
parent
e73f0f0ee7
commit
0a65579cdd
@ -168,9 +168,28 @@ void __init swiotlb_update_mem_attributes(void)
|
||||
memset(vaddr, 0, bytes);
|
||||
}
|
||||
|
||||
static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
|
||||
unsigned long nslabs, bool late_alloc)
|
||||
{
|
||||
void *vaddr = phys_to_virt(start);
|
||||
unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
|
||||
|
||||
mem->nslabs = nslabs;
|
||||
mem->start = start;
|
||||
mem->end = mem->start + bytes;
|
||||
mem->index = 0;
|
||||
mem->late_alloc = late_alloc;
|
||||
spin_lock_init(&mem->lock);
|
||||
for (i = 0; i < mem->nslabs; i++) {
|
||||
mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
|
||||
mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
|
||||
mem->slots[i].alloc_size = 0;
|
||||
}
|
||||
memset(vaddr, 0, bytes);
|
||||
}
|
||||
|
||||
int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
|
||||
{
|
||||
unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
|
||||
struct io_tlb_mem *mem;
|
||||
size_t alloc_size;
|
||||
|
||||
@ -186,16 +205,8 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
|
||||
if (!mem)
|
||||
panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
|
||||
__func__, alloc_size, PAGE_SIZE);
|
||||
mem->nslabs = nslabs;
|
||||
mem->start = __pa(tlb);
|
||||
mem->end = mem->start + bytes;
|
||||
mem->index = 0;
|
||||
spin_lock_init(&mem->lock);
|
||||
for (i = 0; i < mem->nslabs; i++) {
|
||||
mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
|
||||
mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
|
||||
mem->slots[i].alloc_size = 0;
|
||||
}
|
||||
|
||||
swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
|
||||
|
||||
io_tlb_default_mem = mem;
|
||||
if (verbose)
|
||||
@ -282,8 +293,8 @@ swiotlb_late_init_with_default_size(size_t default_size)
|
||||
int
|
||||
swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
|
||||
{
|
||||
unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
|
||||
struct io_tlb_mem *mem;
|
||||
unsigned long bytes = nslabs << IO_TLB_SHIFT;
|
||||
|
||||
if (swiotlb_force == SWIOTLB_NO_FORCE)
|
||||
return 0;
|
||||
@ -297,20 +308,9 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
|
||||
if (!mem)
|
||||
return -ENOMEM;
|
||||
|
||||
mem->nslabs = nslabs;
|
||||
mem->start = virt_to_phys(tlb);
|
||||
mem->end = mem->start + bytes;
|
||||
mem->index = 0;
|
||||
mem->late_alloc = 1;
|
||||
spin_lock_init(&mem->lock);
|
||||
for (i = 0; i < mem->nslabs; i++) {
|
||||
mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
|
||||
mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
|
||||
mem->slots[i].alloc_size = 0;
|
||||
}
|
||||
|
||||
memset(mem, 0, sizeof(*mem));
|
||||
set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
|
||||
memset(tlb, 0, bytes);
|
||||
swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true);
|
||||
|
||||
io_tlb_default_mem = mem;
|
||||
swiotlb_print_info();
|
||||
|
Loading…
Reference in New Issue
Block a user