2020-07-19 10:25:21 +03:00
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2007-03-04 16:15:11 -08:00
/*
* Copyright ( c ) 2007 Cisco Systems . All rights reserved .
2020-12-15 13:27:13 -08:00
* Copyright ( c ) 2020 Intel Corporation . All rights reserved .
2007-03-04 16:15:11 -08:00
*/
# ifndef IB_UMEM_H
# define IB_UMEM_H
# include <linux/list.h>
# include <linux/scatterlist.h>
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/workqueue.h>
2019-01-09 11:15:16 +02:00
# include <rdma/ib_verbs.h>
2007-03-04 16:15:11 -08:00
struct ib_ucontext ;
2014-12-11 17:04:17 +02:00
struct ib_umem_odp ;
2020-12-15 13:27:13 -08:00
struct dma_buf_attach_ops ;
2007-03-04 16:15:11 -08:00
struct ib_umem {
2019-08-06 20:15:44 -03:00
struct ib_device * ibdev ;
2018-09-16 20:44:45 +03:00
struct mm_struct * owning_mm ;
2020-09-04 19:41:47 -03:00
u64 iova ;
2007-03-04 16:15:11 -08:00
size_t length ;
2014-12-11 17:04:12 +02:00
unsigned long address ;
2018-09-16 20:48:06 +03:00
u32 writable : 1 ;
u32 is_odp : 1 ;
2020-12-15 13:27:13 -08:00
u32 is_dmabuf : 1 ;
2007-04-18 20:20:28 -07:00
struct work_struct work ;
2021-08-24 17:25:31 +03:00
struct sg_append_table sgt_append ;
2007-03-04 16:15:11 -08:00
} ;
2020-12-15 13:27:13 -08:00
struct ib_umem_dmabuf {
struct ib_umem umem ;
struct dma_buf_attachment * attach ;
struct sg_table * sgt ;
struct scatterlist * first_sg ;
struct scatterlist * last_sg ;
unsigned long first_sg_offset ;
unsigned long last_sg_trim ;
void * private ;
} ;
static inline struct ib_umem_dmabuf * to_ib_umem_dmabuf ( struct ib_umem * umem )
{
return container_of ( umem , struct ib_umem_dmabuf , umem ) ;
}
2014-12-11 17:04:12 +02:00
/* Returns the offset of the umem start relative to the first page. */
static inline int ib_umem_offset ( struct ib_umem * umem )
{
2019-05-20 09:05:25 +03:00
return umem - > address & ~ PAGE_MASK ;
2014-12-11 17:04:12 +02:00
}
2020-11-15 13:43:05 +02:00
static inline unsigned long ib_umem_dma_offset ( struct ib_umem * umem ,
unsigned long pgsz )
{
2021-08-24 17:25:31 +03:00
return ( sg_dma_address ( umem - > sgt_append . sgt . sgl ) + ib_umem_offset ( umem ) ) &
2020-11-15 13:43:05 +02:00
( pgsz - 1 ) ;
}
2020-09-04 19:41:47 -03:00
static inline size_t ib_umem_num_dma_blocks ( struct ib_umem * umem ,
unsigned long pgsz )
{
return ( size_t ) ( ( ALIGN ( umem - > iova + umem - > length , pgsz ) -
ALIGN_DOWN ( umem - > iova , pgsz ) ) ) /
pgsz ;
}
2014-12-11 17:04:12 +02:00
static inline size_t ib_umem_num_pages ( struct ib_umem * umem )
{
2020-09-04 19:41:47 -03:00
return ib_umem_num_dma_blocks ( umem , PAGE_SIZE ) ;
2014-12-11 17:04:12 +02:00
}
2020-09-04 19:41:45 -03:00
static inline void __rdma_umem_block_iter_start ( struct ib_block_iter * biter ,
struct ib_umem * umem ,
unsigned long pgsz )
{
2021-08-24 17:25:31 +03:00
__rdma_block_iter_start ( biter , umem - > sgt_append . sgt . sgl ,
umem - > sgt_append . sgt . nents , pgsz ) ;
2020-09-04 19:41:45 -03:00
}
/**
* rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
* @ umem : umem to iterate over
* @ pgsz : Page size to split the list into
*
* pgsz must be < = PAGE_SIZE or computed by ib_umem_find_best_pgsz ( ) . The
* returned DMA blocks will be aligned to pgsz and span the range :
* ALIGN_DOWN ( umem - > address , pgsz ) to ALIGN ( umem - > address + umem - > length , pgsz )
2020-09-04 19:41:47 -03:00
*
* Performs exactly ib_umem_num_dma_blocks ( ) iterations .
2020-09-04 19:41:45 -03:00
*/
# define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
for ( __rdma_umem_block_iter_start ( biter , umem , pgsz ) ; \
__rdma_block_iter_next ( biter ) ; )
2007-03-04 16:15:11 -08:00
# ifdef CONFIG_INFINIBAND_USER_MEM
2020-01-15 14:43:31 +02:00
struct ib_umem * ib_umem_get ( struct ib_device * device , unsigned long addr ,
2019-11-13 08:32:14 +01:00
size_t size , int access ) ;
2007-03-04 16:15:11 -08:00
void ib_umem_release ( struct ib_umem * umem ) ;
2014-12-11 17:04:13 +02:00
int ib_umem_copy_from ( void * dst , struct ib_umem * umem , size_t offset ,
size_t length ) ;
2019-05-06 08:53:32 -05:00
unsigned long ib_umem_find_best_pgsz ( struct ib_umem * umem ,
unsigned long pgsz_bitmap ,
unsigned long virt ) ;
2020-12-15 13:27:13 -08:00
2020-11-15 13:43:05 +02:00
/**
* ib_umem_find_best_pgoff - Find best HW page size
*
* @ umem : umem struct
* @ pgsz_bitmap bitmap of HW supported page sizes
* @ pgoff_bitmask : Mask of bits that can be represented with an offset
*
* This is very similar to ib_umem_find_best_pgsz ( ) except instead of accepting
* an IOVA it accepts a bitmask specifying what address bits can be represented
* with a page offset .
*
* For instance if the HW has multiple page sizes , requires 64 byte alignemnt ,
* and can support aligned offsets up to 4032 then pgoff_bitmask would be
* " 111111000000 " .
*
* If the pgoff_bitmask requires either alignment in the low bit or an
* unavailable page size for the high bits , this function returns 0.
*/
static inline unsigned long ib_umem_find_best_pgoff ( struct ib_umem * umem ,
unsigned long pgsz_bitmap ,
u64 pgoff_bitmask )
{
2021-08-24 17:25:31 +03:00
struct scatterlist * sg = umem - > sgt_append . sgt . sgl ;
2020-11-15 13:43:05 +02:00
dma_addr_t dma_addr ;
dma_addr = sg_dma_address ( sg ) + ( umem - > address & ~ PAGE_MASK ) ;
return ib_umem_find_best_pgsz ( umem , pgsz_bitmap ,
dma_addr & pgoff_bitmask ) ;
}
2007-03-04 16:15:11 -08:00
2020-12-15 13:27:13 -08:00
struct ib_umem_dmabuf * ib_umem_dmabuf_get ( struct ib_device * device ,
unsigned long offset , size_t size ,
int fd , int access ,
const struct dma_buf_attach_ops * ops ) ;
int ib_umem_dmabuf_map_pages ( struct ib_umem_dmabuf * umem_dmabuf ) ;
void ib_umem_dmabuf_unmap_pages ( struct ib_umem_dmabuf * umem_dmabuf ) ;
void ib_umem_dmabuf_release ( struct ib_umem_dmabuf * umem_dmabuf ) ;
2007-03-04 16:15:11 -08:00
# else /* CONFIG_INFINIBAND_USER_MEM */
# include <linux/err.h>
2020-01-15 14:43:31 +02:00
static inline struct ib_umem * ib_umem_get ( struct ib_device * device ,
2007-03-04 16:15:11 -08:00
unsigned long addr , size_t size ,
2019-11-13 08:32:14 +01:00
int access )
2019-01-09 11:15:16 +02:00
{
2020-12-15 13:27:13 -08:00
return ERR_PTR ( - EOPNOTSUPP ) ;
2007-03-04 16:15:11 -08:00
}
static inline void ib_umem_release ( struct ib_umem * umem ) { }
2014-12-11 17:04:14 +02:00
static inline int ib_umem_copy_from ( void * dst , struct ib_umem * umem , size_t offset ,
size_t length ) {
2020-12-15 13:27:13 -08:00
return - EOPNOTSUPP ;
2014-12-11 17:04:14 +02:00
}
2020-08-25 15:17:08 -03:00
static inline unsigned long ib_umem_find_best_pgsz ( struct ib_umem * umem ,
unsigned long pgsz_bitmap ,
unsigned long virt )
{
return 0 ;
2019-05-06 08:53:32 -05:00
}
2020-11-15 13:43:05 +02:00
static inline unsigned long ib_umem_find_best_pgoff ( struct ib_umem * umem ,
unsigned long pgsz_bitmap ,
u64 pgoff_bitmask )
{
return 0 ;
}
2020-12-15 13:27:13 -08:00
static inline
struct ib_umem_dmabuf * ib_umem_dmabuf_get ( struct ib_device * device ,
unsigned long offset ,
size_t size , int fd ,
int access ,
struct dma_buf_attach_ops * ops )
{
return ERR_PTR ( - EOPNOTSUPP ) ;
}
static inline int ib_umem_dmabuf_map_pages ( struct ib_umem_dmabuf * umem_dmabuf )
{
return - EOPNOTSUPP ;
}
static inline void ib_umem_dmabuf_unmap_pages ( struct ib_umem_dmabuf * umem_dmabuf ) { }
static inline void ib_umem_dmabuf_release ( struct ib_umem_dmabuf * umem_dmabuf ) { }
2019-05-06 08:53:32 -05:00
2007-03-04 16:15:11 -08:00
# endif /* CONFIG_INFINIBAND_USER_MEM */
# endif /* IB_UMEM_H */