2019-08-02 10:57:51 +03:00
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
# include <linux/dma-mapping.h>
2022-03-08 21:48:54 +03:00
# include <linux/hisi_acc_qm.h>
2019-08-02 10:57:51 +03:00
# include <linux/module.h>
2019-09-30 10:08:52 +03:00
# include <linux/slab.h>
2019-08-02 10:57:51 +03:00
# define HISI_ACC_SGL_SGE_NR_MIN 1
# define HISI_ACC_SGL_NR_MAX 256
# define HISI_ACC_SGL_ALIGN_SIZE 64
2019-09-30 10:08:54 +03:00
# define HISI_ACC_MEM_BLOCK_NR 5
2019-08-02 10:57:51 +03:00
struct acc_hw_sge {
dma_addr_t buf ;
void * page_ctrl ;
__le32 len ;
__le32 pad ;
__le32 pad0 ;
__le32 pad1 ;
} ;
/* use default sgl head size 64B */
struct hisi_acc_hw_sgl {
dma_addr_t next_dma ;
__le16 entry_sum_in_chain ;
__le16 entry_sum_in_sgl ;
__le16 entry_length_in_sgl ;
__le16 pad0 ;
__le64 pad1 [ 5 ] ;
struct hisi_acc_hw_sgl * next ;
struct acc_hw_sge sge_entries [ ] ;
} __aligned ( 1 ) ;
2019-09-30 10:08:52 +03:00
struct hisi_acc_sgl_pool {
2019-09-30 10:08:54 +03:00
struct mem_block {
struct hisi_acc_hw_sgl * sgl ;
dma_addr_t sgl_dma ;
size_t size ;
} mem_block [ HISI_ACC_MEM_BLOCK_NR ] ;
u32 sgl_num_per_block ;
u32 block_num ;
2019-09-30 10:08:52 +03:00
u32 count ;
u32 sge_nr ;
size_t sgl_size ;
} ;
2019-08-02 10:57:51 +03:00
/**
* hisi_acc_create_sgl_pool ( ) - Create a hw sgl pool .
* @ dev : The device which hw sgl pool belongs to .
* @ count : Count of hisi_acc_hw_sgl in pool .
2019-09-30 10:08:52 +03:00
* @ sge_nr : The count of sge in hw_sgl
2019-08-02 10:57:51 +03:00
*
* This function creates a hw sgl pool , after this user can get hw sgl memory
* from it .
*/
2019-09-30 10:08:52 +03:00
struct hisi_acc_sgl_pool * hisi_acc_create_sgl_pool ( struct device * dev ,
u32 count , u32 sge_nr )
2019-08-02 10:57:51 +03:00
{
2021-04-09 12:04:01 +03:00
u32 sgl_size , block_size , sgl_num_per_block , block_num , remain_sgl ;
2019-09-30 10:08:52 +03:00
struct hisi_acc_sgl_pool * pool ;
2019-09-30 10:08:54 +03:00
struct mem_block * block ;
u32 i , j ;
2019-08-02 10:57:51 +03:00
2019-09-30 10:08:52 +03:00
if ( ! dev | | ! count | | ! sge_nr | | sge_nr > HISI_ACC_SGL_SGE_NR_MAX )
return ERR_PTR ( - EINVAL ) ;
2019-08-02 10:57:51 +03:00
2019-09-30 10:08:52 +03:00
sgl_size = sizeof ( struct acc_hw_sge ) * sge_nr +
2019-08-02 10:57:51 +03:00
sizeof ( struct hisi_acc_hw_sgl ) ;
2021-04-09 12:04:00 +03:00
/*
* the pool may allocate a block of memory of size PAGE_SIZE * 2 ^ ( MAX_ORDER - 1 ) ,
* block size may exceed 2 ^ 31 on ia64 , so the max of block size is 2 ^ 31
*/
2020-06-04 10:37:50 +03:00
block_size = 1 < < ( PAGE_SHIFT + MAX_ORDER < = 32 ?
PAGE_SHIFT + MAX_ORDER - 1 : 31 ) ;
2019-09-30 10:08:54 +03:00
sgl_num_per_block = block_size / sgl_size ;
block_num = count / sgl_num_per_block ;
remain_sgl = count % sgl_num_per_block ;
if ( ( ! remain_sgl & & block_num > HISI_ACC_MEM_BLOCK_NR ) | |
( remain_sgl > 0 & & block_num > HISI_ACC_MEM_BLOCK_NR - 1 ) )
return ERR_PTR ( - EINVAL ) ;
2019-08-02 10:57:51 +03:00
2019-09-30 10:08:52 +03:00
pool = kzalloc ( sizeof ( * pool ) , GFP_KERNEL ) ;
if ( ! pool )
return ERR_PTR ( - ENOMEM ) ;
2019-09-30 10:08:54 +03:00
block = pool - > mem_block ;
2019-09-30 10:08:52 +03:00
2019-09-30 10:08:54 +03:00
for ( i = 0 ; i < block_num ; i + + ) {
block [ i ] . sgl = dma_alloc_coherent ( dev , block_size ,
& block [ i ] . sgl_dma ,
GFP_KERNEL ) ;
2021-04-09 12:04:02 +03:00
if ( ! block [ i ] . sgl ) {
dev_err ( dev , " Fail to allocate hw SG buffer! \n " ) ;
2019-09-30 10:08:54 +03:00
goto err_free_mem ;
2021-04-09 12:04:02 +03:00
}
2019-09-30 10:08:54 +03:00
block [ i ] . size = block_size ;
2019-09-30 10:08:52 +03:00
}
2019-08-02 10:57:51 +03:00
2019-09-30 10:08:54 +03:00
if ( remain_sgl > 0 ) {
block [ i ] . sgl = dma_alloc_coherent ( dev , remain_sgl * sgl_size ,
& block [ i ] . sgl_dma ,
GFP_KERNEL ) ;
2021-04-09 12:04:02 +03:00
if ( ! block [ i ] . sgl ) {
dev_err ( dev , " Fail to allocate remained hw SG buffer! \n " ) ;
2019-09-30 10:08:54 +03:00
goto err_free_mem ;
2021-04-09 12:04:02 +03:00
}
2019-09-30 10:08:54 +03:00
block [ i ] . size = remain_sgl * sgl_size ;
}
pool - > sgl_num_per_block = sgl_num_per_block ;
pool - > block_num = remain_sgl ? block_num + 1 : block_num ;
2019-08-02 10:57:51 +03:00
pool - > count = count ;
pool - > sgl_size = sgl_size ;
2019-09-30 10:08:52 +03:00
pool - > sge_nr = sge_nr ;
2019-08-02 10:57:51 +03:00
2019-09-30 10:08:52 +03:00
return pool ;
2019-09-30 10:08:54 +03:00
err_free_mem :
for ( j = 0 ; j < i ; j + + ) {
dma_free_coherent ( dev , block_size , block [ j ] . sgl ,
block [ j ] . sgl_dma ) ;
memset ( block + j , 0 , sizeof ( * block ) ) ;
}
kfree ( pool ) ;
return ERR_PTR ( - ENOMEM ) ;
2019-08-02 10:57:51 +03:00
}
EXPORT_SYMBOL_GPL ( hisi_acc_create_sgl_pool ) ;
/**
* hisi_acc_free_sgl_pool ( ) - Free a hw sgl pool .
* @ dev : The device which hw sgl pool belongs to .
* @ pool : Pointer of pool .
*
* This function frees memory of a hw sgl pool .
*/
void hisi_acc_free_sgl_pool ( struct device * dev , struct hisi_acc_sgl_pool * pool )
{
2019-09-30 10:08:54 +03:00
struct mem_block * block ;
int i ;
2019-09-30 10:08:52 +03:00
if ( ! dev | | ! pool )
return ;
2019-09-30 10:08:54 +03:00
block = pool - > mem_block ;
for ( i = 0 ; i < pool - > block_num ; i + + )
dma_free_coherent ( dev , block [ i ] . size , block [ i ] . sgl ,
block [ i ] . sgl_dma ) ;
2019-09-30 10:08:52 +03:00
kfree ( pool ) ;
2019-08-02 10:57:51 +03:00
}
EXPORT_SYMBOL_GPL ( hisi_acc_free_sgl_pool ) ;
2019-09-30 10:08:55 +03:00
static struct hisi_acc_hw_sgl * acc_get_sgl ( struct hisi_acc_sgl_pool * pool ,
u32 index , dma_addr_t * hw_sgl_dma )
2019-08-02 10:57:51 +03:00
{
2019-09-30 10:08:54 +03:00
struct mem_block * block ;
u32 block_index , offset ;
if ( ! pool | | ! hw_sgl_dma | | index > = pool - > count )
2019-08-02 10:57:51 +03:00
return ERR_PTR ( - EINVAL ) ;
2019-09-30 10:08:54 +03:00
block = pool - > mem_block ;
block_index = index / pool - > sgl_num_per_block ;
offset = index % pool - > sgl_num_per_block ;
* hw_sgl_dma = block [ block_index ] . sgl_dma + pool - > sgl_size * offset ;
return ( void * ) block [ block_index ] . sgl + pool - > sgl_size * offset ;
2019-08-02 10:57:51 +03:00
}
static void sg_map_to_hw_sg ( struct scatterlist * sgl ,
struct acc_hw_sge * hw_sge )
{
2019-10-26 05:57:21 +03:00
hw_sge - > buf = sg_dma_address ( sgl ) ;
hw_sge - > len = cpu_to_le32 ( sg_dma_len ( sgl ) ) ;
2021-04-09 12:04:03 +03:00
hw_sge - > page_ctrl = sg_virt ( sgl ) ;
2019-08-02 10:57:51 +03:00
}
static void inc_hw_sgl_sge ( struct hisi_acc_hw_sgl * hw_sgl )
{
2019-09-30 10:08:55 +03:00
u16 var = le16_to_cpu ( hw_sgl - > entry_sum_in_sgl ) ;
var + + ;
hw_sgl - > entry_sum_in_sgl = cpu_to_le16 ( var ) ;
2019-08-02 10:57:51 +03:00
}
static void update_hw_sgl_sum_sge ( struct hisi_acc_hw_sgl * hw_sgl , u16 sum )
{
2019-09-30 10:08:55 +03:00
hw_sgl - > entry_sum_in_chain = cpu_to_le16 ( sum ) ;
2019-08-02 10:57:51 +03:00
}
2021-04-09 12:04:04 +03:00
static void clear_hw_sgl_sge ( struct hisi_acc_hw_sgl * hw_sgl )
{
struct acc_hw_sge * hw_sge = hw_sgl - > sge_entries ;
int i ;
for ( i = 0 ; i < le16_to_cpu ( hw_sgl - > entry_sum_in_sgl ) ; i + + ) {
hw_sge [ i ] . page_ctrl = NULL ;
hw_sge [ i ] . buf = 0 ;
hw_sge [ i ] . len = 0 ;
}
}
2019-08-02 10:57:51 +03:00
/**
* hisi_acc_sg_buf_map_to_hw_sgl - Map a scatterlist to a hw sgl .
* @ dev : The device which hw sgl belongs to .
* @ sgl : Scatterlist which will be mapped to hw sgl .
* @ pool : Pool which hw sgl memory will be allocated in .
* @ index : Index of hisi_acc_hw_sgl in pool .
* @ hw_sgl_dma : The dma address of allocated hw sgl .
*
* This function builds hw sgl according input sgl , user can use hw_sgl_dma
* as src / dst in its BD . Only support single hw sgl currently .
*/
struct hisi_acc_hw_sgl *
hisi_acc_sg_buf_map_to_hw_sgl ( struct device * dev ,
struct scatterlist * sgl ,
struct hisi_acc_sgl_pool * pool ,
u32 index , dma_addr_t * hw_sgl_dma )
{
struct hisi_acc_hw_sgl * curr_hw_sgl ;
2019-08-14 12:28:37 +03:00
dma_addr_t curr_sgl_dma = 0 ;
2019-08-02 10:57:51 +03:00
struct acc_hw_sge * curr_hw_sge ;
struct scatterlist * sg ;
2019-11-19 08:42:56 +03:00
int i , sg_n , sg_n_mapped ;
2019-08-02 10:57:51 +03:00
2019-09-30 10:08:55 +03:00
if ( ! dev | | ! sgl | | ! pool | | ! hw_sgl_dma )
return ERR_PTR ( - EINVAL ) ;
sg_n = sg_nents ( sgl ) ;
2019-11-19 08:42:56 +03:00
sg_n_mapped = dma_map_sg ( dev , sgl , sg_n , DMA_BIDIRECTIONAL ) ;
2021-04-09 12:04:02 +03:00
if ( ! sg_n_mapped ) {
dev_err ( dev , " DMA mapping for SG error! \n " ) ;
2019-08-02 10:57:51 +03:00
return ERR_PTR ( - EINVAL ) ;
2021-04-09 12:04:02 +03:00
}
2019-08-02 10:57:51 +03:00
2019-11-19 08:42:56 +03:00
if ( sg_n_mapped > pool - > sge_nr ) {
2021-04-09 12:04:02 +03:00
dev_err ( dev , " the number of entries in input scatterlist is bigger than SGL pool setting. \n " ) ;
2019-08-02 10:57:51 +03:00
return ERR_PTR ( - EINVAL ) ;
2019-11-19 08:42:56 +03:00
}
2019-08-02 10:57:51 +03:00
curr_hw_sgl = acc_get_sgl ( pool , index , & curr_sgl_dma ) ;
2019-09-30 10:08:55 +03:00
if ( IS_ERR ( curr_hw_sgl ) ) {
2021-04-09 12:04:02 +03:00
dev_err ( dev , " Get SGL error! \n " ) ;
2019-09-30 10:08:55 +03:00
dma_unmap_sg ( dev , sgl , sg_n , DMA_BIDIRECTIONAL ) ;
return ERR_PTR ( - ENOMEM ) ;
2019-08-02 10:57:51 +03:00
}
2019-09-30 10:08:55 +03:00
curr_hw_sgl - > entry_length_in_sgl = cpu_to_le16 ( pool - > sge_nr ) ;
2019-08-02 10:57:51 +03:00
curr_hw_sge = curr_hw_sgl - > sge_entries ;
2019-11-19 08:42:56 +03:00
for_each_sg ( sgl , sg , sg_n_mapped , i ) {
2019-08-02 10:57:51 +03:00
sg_map_to_hw_sg ( sg , curr_hw_sge ) ;
inc_hw_sgl_sge ( curr_hw_sgl ) ;
curr_hw_sge + + ;
}
2019-09-30 10:08:52 +03:00
update_hw_sgl_sum_sge ( curr_hw_sgl , pool - > sge_nr ) ;
2019-08-02 10:57:51 +03:00
* hw_sgl_dma = curr_sgl_dma ;
return curr_hw_sgl ;
}
EXPORT_SYMBOL_GPL ( hisi_acc_sg_buf_map_to_hw_sgl ) ;
/**
* hisi_acc_sg_buf_unmap ( ) - Unmap allocated hw sgl .
* @ dev : The device which hw sgl belongs to .
* @ sgl : Related scatterlist .
* @ hw_sgl : Virtual address of hw sgl .
*
* This function unmaps allocated hw sgl .
*/
void hisi_acc_sg_buf_unmap ( struct device * dev , struct scatterlist * sgl ,
struct hisi_acc_hw_sgl * hw_sgl )
{
2019-09-30 10:08:55 +03:00
if ( ! dev | | ! sgl | | ! hw_sgl )
return ;
2019-08-02 10:57:51 +03:00
dma_unmap_sg ( dev , sgl , sg_nents ( sgl ) , DMA_BIDIRECTIONAL ) ;
2021-04-09 12:04:04 +03:00
clear_hw_sgl_sge ( hw_sgl ) ;
2019-08-02 10:57:51 +03:00
hw_sgl - > entry_sum_in_chain = 0 ;
hw_sgl - > entry_sum_in_sgl = 0 ;
hw_sgl - > entry_length_in_sgl = 0 ;
}
EXPORT_SYMBOL_GPL ( hisi_acc_sg_buf_unmap ) ;