2020-07-19 10:25:21 +03:00
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2014-12-11 17:04:17 +02:00
/*
* Copyright ( c ) 2014 Mellanox Technologies . All rights reserved .
*/
# ifndef IB_UMEM_ODP_H
# define IB_UMEM_ODP_H
# include <rdma/ib_umem.h>
2014-12-11 17:04:18 +02:00
# include <rdma/ib_verbs.h>
2014-12-11 17:04:17 +02:00
struct ib_umem_odp {
2018-09-16 20:48:05 +03:00
struct ib_umem umem ;
2019-11-12 16:22:22 -04:00
struct mmu_interval_notifier notifier ;
struct pid * tgid ;
2018-09-16 20:48:07 +03:00
2020-09-30 19:38:25 +03:00
/* An array of the pfns included in the on-demand paging umem. */
unsigned long * pfn_list ;
2014-12-11 17:04:17 +02:00
/*
2020-09-30 19:38:25 +03:00
* An array with DMA addresses mapped for pfns in pfn_list .
* The lower two bits designate access permissions .
* See ODP_READ_ALLOWED_BIT and ODP_WRITE_ALLOWED_BIT .
2014-12-11 17:04:17 +02:00
*/
dma_addr_t * dma_list ;
/*
* The umem_mutex protects the page_list and dma_list fields of an ODP
2014-12-11 17:04:18 +02:00
* umem , allowing only a single thread to map / unmap pages . The mutex
* also protects access to the mmu notifier counters .
2014-12-11 17:04:17 +02:00
*/
struct mutex umem_mutex ;
void * private ; /* for the HW driver to use. */
2014-12-11 17:04:18 +02:00
2019-04-02 14:52:52 -05:00
int npages ;
2014-12-11 17:04:18 +02:00
2019-08-19 14:17:01 +03:00
/*
* An implicit odp umem cannot be DMA mapped , has 0 length , and serves
* only as an anchor for the driver to hold onto the per_mm . FIXME :
* This should be removed and drivers should work with the per_mm
* directly .
*/
bool is_implicit_odp ;
2019-05-20 09:05:25 +03:00
unsigned int page_shift ;
2014-12-11 17:04:17 +02:00
} ;
2018-09-16 20:48:04 +03:00
static inline struct ib_umem_odp * to_ib_umem_odp ( struct ib_umem * umem )
{
2018-09-16 20:48:05 +03:00
return container_of ( umem , struct ib_umem_odp , umem ) ;
2018-09-16 20:48:04 +03:00
}
2019-05-20 09:05:25 +03:00
/* Returns the first page of an ODP umem. */
static inline unsigned long ib_umem_start ( struct ib_umem_odp * umem_odp )
{
2019-11-12 16:22:22 -04:00
return umem_odp - > notifier . interval_tree . start ;
2019-05-20 09:05:25 +03:00
}
/* Returns the address of the page after the last one of an ODP umem. */
static inline unsigned long ib_umem_end ( struct ib_umem_odp * umem_odp )
{
2019-11-12 16:22:22 -04:00
return umem_odp - > notifier . interval_tree . last + 1 ;
2019-05-20 09:05:25 +03:00
}
static inline size_t ib_umem_odp_num_pages ( struct ib_umem_odp * umem_odp )
{
return ( ib_umem_end ( umem_odp ) - ib_umem_start ( umem_odp ) ) > >
umem_odp - > page_shift ;
}
2019-01-08 16:07:26 +02:00
/*
* The lower 2 bits of the DMA address signal the R / W permissions for
* the entry . To upgrade the permissions , provide the appropriate
* bitmask to the map_dma_pages function .
*
* Be aware that upgrading a mapped address might result in change of
* the DMA address for the page .
*/
# define ODP_READ_ALLOWED_BIT (1<<0ULL)
# define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
# define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
2014-12-11 17:04:17 +02:00
# ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
2019-11-12 16:22:22 -04:00
struct ib_umem_odp *
2020-01-15 14:43:31 +02:00
ib_umem_odp_get ( struct ib_device * device , unsigned long addr , size_t size ,
2019-11-12 16:22:22 -04:00
int access , const struct mmu_interval_notifier_ops * ops ) ;
2020-01-15 14:43:31 +02:00
struct ib_umem_odp * ib_umem_odp_alloc_implicit ( struct ib_device * device ,
2019-08-19 14:17:03 +03:00
int access ) ;
2019-11-12 16:22:22 -04:00
struct ib_umem_odp *
ib_umem_odp_alloc_child ( struct ib_umem_odp * root_umem , unsigned long addr ,
size_t size ,
const struct mmu_interval_notifier_ops * ops ) ;
2018-09-16 20:48:04 +03:00
void ib_umem_odp_release ( struct ib_umem_odp * umem_odp ) ;
2014-12-11 17:04:17 +02:00
2020-09-30 19:38:25 +03:00
int ib_umem_odp_map_dma_and_lock ( struct ib_umem_odp * umem_odp , u64 start_offset ,
2020-09-30 19:38:26 +03:00
u64 bcnt , u64 access_mask , bool fault ) ;
2014-12-11 17:04:17 +02:00
2018-09-16 20:48:04 +03:00
void ib_umem_odp_unmap_dma_pages ( struct ib_umem_odp * umem_odp , u64 start_offset ,
2014-12-11 17:04:17 +02:00
u64 bound ) ;
# else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
2019-11-12 16:22:22 -04:00
static inline struct ib_umem_odp *
2020-01-15 14:43:31 +02:00
ib_umem_odp_get ( struct ib_device * device , unsigned long addr , size_t size ,
2019-11-12 16:22:22 -04:00
int access , const struct mmu_interval_notifier_ops * ops )
2014-12-11 17:04:17 +02:00
{
2019-08-19 14:17:04 +03:00
return ERR_PTR ( - EINVAL ) ;
2014-12-11 17:04:17 +02:00
}
2018-09-16 20:48:04 +03:00
static inline void ib_umem_odp_release ( struct ib_umem_odp * umem_odp ) { }
2014-12-11 17:04:17 +02:00
# endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
# endif /* IB_UMEM_ODP_H */