2014-12-11 18:04:17 +03:00
/*
* Copyright ( c ) 2014 Mellanox Technologies . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# ifndef IB_UMEM_ODP_H
# define IB_UMEM_ODP_H
# include <rdma/ib_umem.h>
2014-12-11 18:04:18 +03:00
# include <rdma/ib_verbs.h>
2014-12-11 18:04:17 +03:00
struct ib_umem_odp {
2018-09-16 20:48:05 +03:00
struct ib_umem umem ;
2019-11-12 23:22:22 +03:00
struct mmu_interval_notifier notifier ;
struct pid * tgid ;
2018-09-16 20:48:07 +03:00
2014-12-11 18:04:17 +03:00
/*
* An array of the pages included in the on - demand paging umem .
* Indices of pages that are currently not mapped into the device will
* contain NULL .
*/
struct page * * page_list ;
/*
* An array of the same size as page_list , with DMA addresses mapped
* for pages the pages in page_list . The lower two bits designate
* access permissions . See ODP_READ_ALLOWED_BIT and
* ODP_WRITE_ALLOWED_BIT .
*/
dma_addr_t * dma_list ;
/*
* The umem_mutex protects the page_list and dma_list fields of an ODP
2014-12-11 18:04:18 +03:00
* umem , allowing only a single thread to map / unmap pages . The mutex
* also protects access to the mmu notifier counters .
2014-12-11 18:04:17 +03:00
*/
struct mutex umem_mutex ;
void * private ; /* for the HW driver to use. */
2014-12-11 18:04:18 +03:00
2019-04-02 22:52:52 +03:00
int npages ;
2014-12-11 18:04:18 +03:00
2019-08-19 14:17:01 +03:00
/*
* An implicit odp umem cannot be DMA mapped , has 0 length , and serves
* only as an anchor for the driver to hold onto the per_mm . FIXME :
* This should be removed and drivers should work with the per_mm
* directly .
*/
bool is_implicit_odp ;
2019-05-20 09:05:25 +03:00
unsigned int page_shift ;
2014-12-11 18:04:17 +03:00
} ;
2018-09-16 20:48:04 +03:00
static inline struct ib_umem_odp * to_ib_umem_odp ( struct ib_umem * umem )
{
2018-09-16 20:48:05 +03:00
return container_of ( umem , struct ib_umem_odp , umem ) ;
2018-09-16 20:48:04 +03:00
}
2019-05-20 09:05:25 +03:00
/* Returns the first page of an ODP umem. */
static inline unsigned long ib_umem_start ( struct ib_umem_odp * umem_odp )
{
2019-11-12 23:22:22 +03:00
return umem_odp - > notifier . interval_tree . start ;
2019-05-20 09:05:25 +03:00
}
/* Returns the address of the page after the last one of an ODP umem. */
static inline unsigned long ib_umem_end ( struct ib_umem_odp * umem_odp )
{
2019-11-12 23:22:22 +03:00
return umem_odp - > notifier . interval_tree . last + 1 ;
2019-05-20 09:05:25 +03:00
}
static inline size_t ib_umem_odp_num_pages ( struct ib_umem_odp * umem_odp )
{
return ( ib_umem_end ( umem_odp ) - ib_umem_start ( umem_odp ) ) > >
umem_odp - > page_shift ;
}
2019-01-08 17:07:26 +03:00
/*
* The lower 2 bits of the DMA address signal the R / W permissions for
* the entry . To upgrade the permissions , provide the appropriate
* bitmask to the map_dma_pages function .
*
* Be aware that upgrading a mapped address might result in change of
* the DMA address for the page .
*/
# define ODP_READ_ALLOWED_BIT (1<<0ULL)
# define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
# define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
2014-12-11 18:04:17 +03:00
# ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
2019-11-12 23:22:22 +03:00
struct ib_umem_odp *
2020-01-15 15:43:31 +03:00
ib_umem_odp_get ( struct ib_device * device , unsigned long addr , size_t size ,
2019-11-12 23:22:22 +03:00
int access , const struct mmu_interval_notifier_ops * ops ) ;
2020-01-15 15:43:31 +03:00
struct ib_umem_odp * ib_umem_odp_alloc_implicit ( struct ib_device * device ,
2019-08-19 14:17:03 +03:00
int access ) ;
2019-11-12 23:22:22 +03:00
struct ib_umem_odp *
ib_umem_odp_alloc_child ( struct ib_umem_odp * root_umem , unsigned long addr ,
size_t size ,
const struct mmu_interval_notifier_ops * ops ) ;
2018-09-16 20:48:04 +03:00
void ib_umem_odp_release ( struct ib_umem_odp * umem_odp ) ;
2014-12-11 18:04:17 +03:00
2018-09-16 20:48:04 +03:00
int ib_umem_odp_map_dma_pages ( struct ib_umem_odp * umem_odp , u64 start_offset ,
u64 bcnt , u64 access_mask ,
unsigned long current_seq ) ;
2014-12-11 18:04:17 +03:00
2018-09-16 20:48:04 +03:00
void ib_umem_odp_unmap_dma_pages ( struct ib_umem_odp * umem_odp , u64 start_offset ,
2014-12-11 18:04:17 +03:00
u64 bound ) ;
# else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
2019-11-12 23:22:22 +03:00
static inline struct ib_umem_odp *
2020-01-15 15:43:31 +03:00
ib_umem_odp_get ( struct ib_device * device , unsigned long addr , size_t size ,
2019-11-12 23:22:22 +03:00
int access , const struct mmu_interval_notifier_ops * ops )
2014-12-11 18:04:17 +03:00
{
2019-08-19 14:17:04 +03:00
return ERR_PTR ( - EINVAL ) ;
2014-12-11 18:04:17 +03:00
}
2018-09-16 20:48:04 +03:00
static inline void ib_umem_odp_release ( struct ib_umem_odp * umem_odp ) { }
2014-12-11 18:04:17 +03:00
# endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
# endif /* IB_UMEM_ODP_H */