2014-12-11 17:04:17 +02:00
/*
* Copyright ( c ) 2014 Mellanox Technologies . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# include <linux/types.h>
# include <linux/sched.h>
2017-02-08 18:51:29 +01:00
# include <linux/sched/mm.h>
2017-02-05 15:30:50 +01:00
# include <linux/sched/task.h>
2014-12-11 17:04:17 +02:00
# include <linux/pid.h>
# include <linux/slab.h>
# include <linux/export.h>
# include <linux/vmalloc.h>
2017-04-05 09:23:57 +03:00
# include <linux/hugetlb.h>
2017-10-25 18:56:49 +03:00
# include <linux/interval_tree_generic.h>
2014-12-11 17:04:17 +02:00
# include <rdma/ib_verbs.h>
# include <rdma/ib_umem.h>
# include <rdma/ib_umem_odp.h>
2017-10-25 18:56:49 +03:00
/*
* The ib_umem list keeps track of memory regions for which the HW
* device request to receive notification when the related memory
* mapping is changed .
*
* ib_umem_lock protects the list .
*/
static u64 node_start ( struct umem_odp_node * n )
{
struct ib_umem_odp * umem_odp =
container_of ( n , struct ib_umem_odp , interval_tree ) ;
2018-09-16 20:48:05 +03:00
return ib_umem_start ( & umem_odp - > umem ) ;
2017-10-25 18:56:49 +03:00
}
/* Note that the representation of the intervals in the interval tree
* considers the ending point as contained in the interval , while the
* function ib_umem_end returns the first address which is not contained
* in the umem .
*/
static u64 node_last ( struct umem_odp_node * n )
{
struct ib_umem_odp * umem_odp =
container_of ( n , struct ib_umem_odp , interval_tree ) ;
2018-09-16 20:48:05 +03:00
return ib_umem_end ( & umem_odp - > umem ) - 1 ;
2017-10-25 18:56:49 +03:00
}
INTERVAL_TREE_DEFINE ( struct umem_odp_node , rb , u64 , __subtree_last ,
node_start , node_last , static , rbt_ib_umem )
2018-09-16 20:48:04 +03:00
static void ib_umem_notifier_start_account ( struct ib_umem_odp * umem_odp )
2014-12-11 17:04:18 +02:00
{
2018-09-16 20:48:04 +03:00
mutex_lock ( & umem_odp - > umem_mutex ) ;
2018-09-16 20:48:09 +03:00
if ( umem_odp - > notifiers_count + + = = 0 )
/*
* Initialize the completion object for waiting on
* notifiers . Since notifier_count is zero , no one should be
* waiting right now .
*/
reinit_completion ( & umem_odp - > notifier_completion ) ;
2018-09-16 20:48:04 +03:00
mutex_unlock ( & umem_odp - > umem_mutex ) ;
2014-12-11 17:04:18 +02:00
}
2018-09-16 20:48:04 +03:00
static void ib_umem_notifier_end_account ( struct ib_umem_odp * umem_odp )
2014-12-11 17:04:18 +02:00
{
2018-09-16 20:48:04 +03:00
mutex_lock ( & umem_odp - > umem_mutex ) ;
2018-09-16 20:48:09 +03:00
/*
* This sequence increase will notify the QP page fault that the page
* that is going to be mapped in the spte could have been freed .
*/
+ + umem_odp - > notifiers_seq ;
if ( - - umem_odp - > notifiers_count = = 0 )
complete_all ( & umem_odp - > notifier_completion ) ;
2018-09-16 20:48:04 +03:00
mutex_unlock ( & umem_odp - > umem_mutex ) ;
2014-12-11 17:04:18 +02:00
}
2018-09-16 20:48:04 +03:00
static int ib_umem_notifier_release_trampoline ( struct ib_umem_odp * umem_odp ,
u64 start , u64 end , void * cookie )
{
2018-09-16 20:48:05 +03:00
struct ib_umem * umem = & umem_odp - > umem ;
2018-09-16 20:48:04 +03:00
2014-12-11 17:04:18 +02:00
/*
* Increase the number of notifiers running , to
* prevent any further fault handling on this MR .
*/
2018-09-16 20:48:04 +03:00
ib_umem_notifier_start_account ( umem_odp ) ;
umem_odp - > dying = 1 ;
2014-12-11 17:04:18 +02:00
/* Make sure that the fact the umem is dying is out before we release
* all pending page faults . */
smp_wmb ( ) ;
2018-09-16 20:48:04 +03:00
complete_all ( & umem_odp - > notifier_completion ) ;
umem - > context - > invalidate_range ( umem_odp , ib_umem_start ( umem ) ,
ib_umem_end ( umem ) ) ;
2014-12-11 17:04:18 +02:00
return 0 ;
}
static void ib_umem_notifier_release ( struct mmu_notifier * mn ,
struct mm_struct * mm )
{
2018-09-16 20:48:07 +03:00
struct ib_ucontext_per_mm * per_mm =
container_of ( mn , struct ib_ucontext_per_mm , mn ) ;
2014-12-11 17:04:18 +02:00
2018-09-16 20:48:07 +03:00
down_read ( & per_mm - > umem_rwsem ) ;
2018-09-16 20:48:10 +03:00
if ( per_mm - > active )
rbt_ib_umem_for_each_in_range (
& per_mm - > umem_tree , 0 , ULLONG_MAX ,
ib_umem_notifier_release_trampoline , true , NULL ) ;
2018-09-16 20:48:07 +03:00
up_read ( & per_mm - > umem_rwsem ) ;
2014-12-11 17:04:18 +02:00
}
2018-09-16 20:48:04 +03:00
static int invalidate_range_start_trampoline ( struct ib_umem_odp * item ,
u64 start , u64 end , void * cookie )
2014-12-11 17:04:18 +02:00
{
ib_umem_notifier_start_account ( item ) ;
2018-09-16 20:48:05 +03:00
item - > umem . context - > invalidate_range ( item , start , end ) ;
2014-12-11 17:04:18 +02:00
return 0 ;
}
2018-08-21 21:52:33 -07:00
static int ib_umem_notifier_invalidate_range_start ( struct mmu_notifier * mn ,
2018-12-28 00:38:05 -08:00
const struct mmu_notifier_range * range )
2014-12-11 17:04:18 +02:00
{
2018-09-16 20:48:07 +03:00
struct ib_ucontext_per_mm * per_mm =
container_of ( mn , struct ib_ucontext_per_mm , mn ) ;
2018-08-21 21:52:33 -07:00
2018-12-28 00:38:05 -08:00
if ( range - > blockable )
2018-09-16 20:48:07 +03:00
down_read ( & per_mm - > umem_rwsem ) ;
else if ( ! down_read_trylock ( & per_mm - > umem_rwsem ) )
2018-08-21 21:52:33 -07:00
return - EAGAIN ;
2014-12-11 17:04:18 +02:00
2018-09-16 20:48:10 +03:00
if ( ! per_mm - > active ) {
up_read ( & per_mm - > umem_rwsem ) ;
/*
* At this point active is permanently set and visible to this
* CPU without a lock , that fact is relied on to skip the unlock
* in range_end .
*/
return 0 ;
}
2018-12-28 00:38:05 -08:00
return rbt_ib_umem_for_each_in_range ( & per_mm - > umem_tree , range - > start ,
range - > end ,
2018-09-16 20:48:09 +03:00
invalidate_range_start_trampoline ,
2018-12-28 00:38:05 -08:00
range - > blockable , NULL ) ;
2014-12-11 17:04:18 +02:00
}
2018-09-16 20:48:04 +03:00
static int invalidate_range_end_trampoline ( struct ib_umem_odp * item , u64 start ,
2014-12-11 17:04:18 +02:00
u64 end , void * cookie )
{
ib_umem_notifier_end_account ( item ) ;
return 0 ;
}
static void ib_umem_notifier_invalidate_range_end ( struct mmu_notifier * mn ,
2018-12-28 00:38:05 -08:00
const struct mmu_notifier_range * range )
2014-12-11 17:04:18 +02:00
{
2018-09-16 20:48:07 +03:00
struct ib_ucontext_per_mm * per_mm =
container_of ( mn , struct ib_ucontext_per_mm , mn ) ;
2014-12-11 17:04:18 +02:00
2018-09-16 20:48:10 +03:00
if ( unlikely ( ! per_mm - > active ) )
2014-12-11 17:04:18 +02:00
return ;
2018-12-28 00:38:05 -08:00
rbt_ib_umem_for_each_in_range ( & per_mm - > umem_tree , range - > start ,
range - > end ,
2018-08-21 21:52:33 -07:00
invalidate_range_end_trampoline , true , NULL ) ;
2018-09-16 20:48:07 +03:00
up_read ( & per_mm - > umem_rwsem ) ;
2014-12-11 17:04:18 +02:00
}
2015-11-29 23:02:51 +01:00
static const struct mmu_notifier_ops ib_umem_notifiers = {
2014-12-11 17:04:18 +02:00
. release = ib_umem_notifier_release ,
. invalidate_range_start = ib_umem_notifier_invalidate_range_start ,
. invalidate_range_end = ib_umem_notifier_invalidate_range_end ,
} ;
2018-09-16 20:48:08 +03:00
static void add_umem_to_per_mm ( struct ib_umem_odp * umem_odp )
{
struct ib_ucontext_per_mm * per_mm = umem_odp - > per_mm ;
struct ib_umem * umem = & umem_odp - > umem ;
down_write ( & per_mm - > umem_rwsem ) ;
if ( likely ( ib_umem_start ( umem ) ! = ib_umem_end ( umem ) ) )
rbt_ib_umem_insert ( & umem_odp - > interval_tree ,
& per_mm - > umem_tree ) ;
up_write ( & per_mm - > umem_rwsem ) ;
}
static void remove_umem_from_per_mm ( struct ib_umem_odp * umem_odp )
{
struct ib_ucontext_per_mm * per_mm = umem_odp - > per_mm ;
struct ib_umem * umem = & umem_odp - > umem ;
down_write ( & per_mm - > umem_rwsem ) ;
if ( likely ( ib_umem_start ( umem ) ! = ib_umem_end ( umem ) ) )
rbt_ib_umem_remove ( & umem_odp - > interval_tree ,
& per_mm - > umem_tree ) ;
2018-09-16 20:48:09 +03:00
complete_all ( & umem_odp - > notifier_completion ) ;
2018-09-16 20:48:08 +03:00
up_write ( & per_mm - > umem_rwsem ) ;
}
static struct ib_ucontext_per_mm * alloc_per_mm ( struct ib_ucontext * ctx ,
struct mm_struct * mm )
2017-01-18 16:58:07 +02:00
{
2018-09-16 20:48:07 +03:00
struct ib_ucontext_per_mm * per_mm ;
2018-09-16 20:48:08 +03:00
int ret ;
per_mm = kzalloc ( sizeof ( * per_mm ) , GFP_KERNEL ) ;
if ( ! per_mm )
return ERR_PTR ( - ENOMEM ) ;
per_mm - > context = ctx ;
per_mm - > mm = mm ;
per_mm - > umem_tree = RB_ROOT_CACHED ;
init_rwsem ( & per_mm - > umem_rwsem ) ;
2018-09-16 20:48:10 +03:00
per_mm - > active = ctx - > invalidate_range ;
2018-09-16 20:48:08 +03:00
rcu_read_lock ( ) ;
per_mm - > tgid = get_task_pid ( current - > group_leader , PIDTYPE_PID ) ;
rcu_read_unlock ( ) ;
WARN_ON ( mm ! = current - > mm ) ;
per_mm - > mn . ops = & ib_umem_notifiers ;
ret = mmu_notifier_register ( & per_mm - > mn , per_mm - > mm ) ;
if ( ret ) {
dev_err ( & ctx - > device - > dev ,
" Failed to register mmu_notifier %d \n " , ret ) ;
goto out_pid ;
}
list_add ( & per_mm - > ucontext_list , & ctx - > per_mm_list ) ;
return per_mm ;
out_pid :
put_pid ( per_mm - > tgid ) ;
kfree ( per_mm ) ;
return ERR_PTR ( ret ) ;
}
static int get_per_mm ( struct ib_umem_odp * umem_odp )
{
struct ib_ucontext * ctx = umem_odp - > umem . context ;
struct ib_ucontext_per_mm * per_mm ;
/*
* Generally speaking we expect only one or two per_mm in this list ,
* so no reason to optimize this search today .
*/
mutex_lock ( & ctx - > per_mm_list_lock ) ;
list_for_each_entry ( per_mm , & ctx - > per_mm_list , ucontext_list ) {
if ( per_mm - > mm = = umem_odp - > umem . owning_mm )
goto found ;
}
per_mm = alloc_per_mm ( ctx , umem_odp - > umem . owning_mm ) ;
if ( IS_ERR ( per_mm ) ) {
mutex_unlock ( & ctx - > per_mm_list_lock ) ;
return PTR_ERR ( per_mm ) ;
}
found :
umem_odp - > per_mm = per_mm ;
per_mm - > odp_mrs_count + + ;
mutex_unlock ( & ctx - > per_mm_list_lock ) ;
return 0 ;
}
2018-09-16 20:48:11 +03:00
static void free_per_mm ( struct rcu_head * rcu )
{
kfree ( container_of ( rcu , struct ib_ucontext_per_mm , rcu ) ) ;
}
2019-01-22 10:24:23 -08:00
static void put_per_mm ( struct ib_umem_odp * umem_odp )
2018-09-16 20:48:08 +03:00
{
struct ib_ucontext_per_mm * per_mm = umem_odp - > per_mm ;
struct ib_ucontext * ctx = umem_odp - > umem . context ;
bool need_free ;
mutex_lock ( & ctx - > per_mm_list_lock ) ;
umem_odp - > per_mm = NULL ;
per_mm - > odp_mrs_count - - ;
need_free = per_mm - > odp_mrs_count = = 0 ;
if ( need_free )
list_del ( & per_mm - > ucontext_list ) ;
mutex_unlock ( & ctx - > per_mm_list_lock ) ;
if ( ! need_free )
return ;
2018-09-16 20:48:10 +03:00
/*
* NOTE ! mmu_notifier_unregister ( ) can happen between a start / end
* callback , resulting in an start / end , and thus an unbalanced
* lock . This doesn ' t really matter to us since we are about to kfree
* the memory that holds the lock , however LOCKDEP doesn ' t like this .
*/
down_write ( & per_mm - > umem_rwsem ) ;
per_mm - > active = false ;
up_write ( & per_mm - > umem_rwsem ) ;
2018-09-16 20:48:11 +03:00
WARN_ON ( ! RB_EMPTY_ROOT ( & per_mm - > umem_tree . rb_root ) ) ;
mmu_notifier_unregister_no_release ( & per_mm - > mn , per_mm - > mm ) ;
2018-09-16 20:48:08 +03:00
put_pid ( per_mm - > tgid ) ;
2018-09-16 20:48:11 +03:00
mmu_notifier_call_srcu ( & per_mm - > rcu , free_per_mm ) ;
2018-09-16 20:48:08 +03:00
}
struct ib_umem_odp * ib_alloc_odp_umem ( struct ib_ucontext_per_mm * per_mm ,
unsigned long addr , size_t size )
{
struct ib_ucontext * ctx = per_mm - > context ;
2017-01-18 16:58:07 +02:00
struct ib_umem_odp * odp_data ;
2018-09-16 20:48:05 +03:00
struct ib_umem * umem ;
2017-01-18 16:58:07 +02:00
int pages = size > > PAGE_SHIFT ;
int ret ;
2018-09-16 20:48:05 +03:00
odp_data = kzalloc ( sizeof ( * odp_data ) , GFP_KERNEL ) ;
if ( ! odp_data )
2017-01-18 16:58:07 +02:00
return ERR_PTR ( - ENOMEM ) ;
2018-09-16 20:48:05 +03:00
umem = & odp_data - > umem ;
2018-09-16 20:48:08 +03:00
umem - > context = ctx ;
2017-04-05 09:23:50 +03:00
umem - > length = size ;
umem - > address = addr ;
umem - > page_shift = PAGE_SHIFT ;
umem - > writable = 1 ;
2018-09-16 20:48:06 +03:00
umem - > is_odp = 1 ;
2018-09-16 20:48:08 +03:00
odp_data - > per_mm = per_mm ;
2017-01-18 16:58:07 +02:00
mutex_init ( & odp_data - > umem_mutex ) ;
init_completion ( & odp_data - > notifier_completion ) ;
treewide: Use array_size() in vzalloc()
The vzalloc() function has no 2-factor argument form, so multiplication
factors need to be wrapped in array_size(). This patch replaces cases of:
vzalloc(a * b)
with:
vzalloc(array_size(a, b))
as well as handling cases of:
vzalloc(a * b * c)
with:
vzalloc(array3_size(a, b, c))
This does, however, attempt to ignore constant size factors like:
vzalloc(4 * 1024)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
vzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
vzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
vzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
vzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
vzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
vzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
vzalloc(
- sizeof(TYPE) * (COUNT_ID)
+ array_size(COUNT_ID, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT_ID
+ array_size(COUNT_ID, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * (COUNT_CONST)
+ array_size(COUNT_CONST, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT_CONST
+ array_size(COUNT_CONST, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT_ID)
+ array_size(COUNT_ID, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT_ID
+ array_size(COUNT_ID, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT_CONST)
+ array_size(COUNT_CONST, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT_CONST
+ array_size(COUNT_CONST, sizeof(THING))
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
vzalloc(
- SIZE * COUNT
+ array_size(COUNT, SIZE)
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
vzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
vzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
vzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
vzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
vzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
vzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
vzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
vzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
vzalloc(C1 * C2 * C3, ...)
|
vzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants.
@@
expression E1, E2;
constant C1, C2;
@@
(
vzalloc(C1 * C2, ...)
|
vzalloc(
- E1 * E2
+ array_size(E1, E2)
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:27:37 -07:00
odp_data - > page_list =
vzalloc ( array_size ( pages , sizeof ( * odp_data - > page_list ) ) ) ;
2017-01-18 16:58:07 +02:00
if ( ! odp_data - > page_list ) {
ret = - ENOMEM ;
goto out_odp_data ;
}
treewide: Use array_size() in vzalloc()
The vzalloc() function has no 2-factor argument form, so multiplication
factors need to be wrapped in array_size(). This patch replaces cases of:
vzalloc(a * b)
with:
vzalloc(array_size(a, b))
as well as handling cases of:
vzalloc(a * b * c)
with:
vzalloc(array3_size(a, b, c))
This does, however, attempt to ignore constant size factors like:
vzalloc(4 * 1024)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
vzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
vzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
vzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
vzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
vzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
vzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
vzalloc(
- sizeof(TYPE) * (COUNT_ID)
+ array_size(COUNT_ID, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT_ID
+ array_size(COUNT_ID, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * (COUNT_CONST)
+ array_size(COUNT_CONST, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT_CONST
+ array_size(COUNT_CONST, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT_ID)
+ array_size(COUNT_ID, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT_ID
+ array_size(COUNT_ID, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT_CONST)
+ array_size(COUNT_CONST, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT_CONST
+ array_size(COUNT_CONST, sizeof(THING))
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
vzalloc(
- SIZE * COUNT
+ array_size(COUNT, SIZE)
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
vzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
vzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
vzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
vzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
vzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
vzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
vzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
vzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
vzalloc(C1 * C2 * C3, ...)
|
vzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants.
@@
expression E1, E2;
constant C1, C2;
@@
(
vzalloc(C1 * C2, ...)
|
vzalloc(
- E1 * E2
+ array_size(E1, E2)
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:27:37 -07:00
odp_data - > dma_list =
vzalloc ( array_size ( pages , sizeof ( * odp_data - > dma_list ) ) ) ;
2017-01-18 16:58:07 +02:00
if ( ! odp_data - > dma_list ) {
ret = - ENOMEM ;
goto out_page_list ;
}
2018-09-16 20:48:08 +03:00
/*
* Caller must ensure that the umem_odp that the per_mm came from
* cannot be freed during the call to ib_alloc_odp_umem .
*/
mutex_lock ( & ctx - > per_mm_list_lock ) ;
2018-09-16 20:48:07 +03:00
per_mm - > odp_mrs_count + + ;
2018-09-16 20:48:08 +03:00
mutex_unlock ( & ctx - > per_mm_list_lock ) ;
add_umem_to_per_mm ( odp_data ) ;
2017-01-18 16:58:07 +02:00
2018-09-16 20:48:04 +03:00
return odp_data ;
2017-01-18 16:58:07 +02:00
out_page_list :
vfree ( odp_data - > page_list ) ;
out_odp_data :
kfree ( odp_data ) ;
return ERR_PTR ( ret ) ;
}
EXPORT_SYMBOL ( ib_alloc_odp_umem ) ;
2018-09-16 20:48:05 +03:00
int ib_umem_odp_get ( struct ib_umem_odp * umem_odp , int access )
2014-12-11 17:04:17 +02:00
{
2018-09-16 20:48:05 +03:00
struct ib_umem * umem = & umem_odp - > umem ;
2018-09-16 20:48:08 +03:00
/*
* NOTE : This must called in a process context where umem - > owning_mm
* = = current - > mm
*/
struct mm_struct * mm = umem - > owning_mm ;
2014-12-11 17:04:17 +02:00
int ret_val ;
2017-04-05 09:23:57 +03:00
if ( access & IB_ACCESS_HUGETLB ) {
struct vm_area_struct * vma ;
struct hstate * h ;
2017-05-21 19:08:09 +03:00
down_read ( & mm - > mmap_sem ) ;
2017-04-05 09:23:57 +03:00
vma = find_vma ( mm , ib_umem_start ( umem ) ) ;
2017-05-21 19:08:09 +03:00
if ( ! vma | | ! is_vm_hugetlb_page ( vma ) ) {
up_read ( & mm - > mmap_sem ) ;
2017-04-05 09:23:57 +03:00
return - EINVAL ;
2017-05-21 19:08:09 +03:00
}
2017-04-05 09:23:57 +03:00
h = hstate_vma ( vma ) ;
umem - > page_shift = huge_page_shift ( h ) ;
2017-05-21 19:08:09 +03:00
up_read ( & mm - > mmap_sem ) ;
2017-04-05 09:23:57 +03:00
umem - > hugetlb = 1 ;
} else {
umem - > hugetlb = 0 ;
}
2018-09-16 20:48:05 +03:00
mutex_init ( & umem_odp - > umem_mutex ) ;
2014-12-11 17:04:17 +02:00
2018-09-16 20:48:05 +03:00
init_completion ( & umem_odp - > notifier_completion ) ;
2014-12-11 17:04:18 +02:00
2017-01-18 16:58:07 +02:00
if ( ib_umem_num_pages ( umem ) ) {
2018-09-16 20:48:05 +03:00
umem_odp - > page_list =
vzalloc ( array_size ( sizeof ( * umem_odp - > page_list ) ,
treewide: Use array_size() in vzalloc()
The vzalloc() function has no 2-factor argument form, so multiplication
factors need to be wrapped in array_size(). This patch replaces cases of:
vzalloc(a * b)
with:
vzalloc(array_size(a, b))
as well as handling cases of:
vzalloc(a * b * c)
with:
vzalloc(array3_size(a, b, c))
This does, however, attempt to ignore constant size factors like:
vzalloc(4 * 1024)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
vzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
vzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
vzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
vzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
vzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
vzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
vzalloc(
- sizeof(TYPE) * (COUNT_ID)
+ array_size(COUNT_ID, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT_ID
+ array_size(COUNT_ID, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * (COUNT_CONST)
+ array_size(COUNT_CONST, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT_CONST
+ array_size(COUNT_CONST, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT_ID)
+ array_size(COUNT_ID, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT_ID
+ array_size(COUNT_ID, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT_CONST)
+ array_size(COUNT_CONST, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT_CONST
+ array_size(COUNT_CONST, sizeof(THING))
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
vzalloc(
- SIZE * COUNT
+ array_size(COUNT, SIZE)
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
vzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
vzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
vzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
vzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
vzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
vzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
vzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
vzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
vzalloc(C1 * C2 * C3, ...)
|
vzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants.
@@
expression E1, E2;
constant C1, C2;
@@
(
vzalloc(C1 * C2, ...)
|
vzalloc(
- E1 * E2
+ array_size(E1, E2)
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:27:37 -07:00
ib_umem_num_pages ( umem ) ) ) ;
2018-09-16 20:48:08 +03:00
if ( ! umem_odp - > page_list )
return - ENOMEM ;
2014-12-11 17:04:17 +02:00
2018-09-16 20:48:05 +03:00
umem_odp - > dma_list =
vzalloc ( array_size ( sizeof ( * umem_odp - > dma_list ) ,
treewide: Use array_size() in vzalloc()
The vzalloc() function has no 2-factor argument form, so multiplication
factors need to be wrapped in array_size(). This patch replaces cases of:
vzalloc(a * b)
with:
vzalloc(array_size(a, b))
as well as handling cases of:
vzalloc(a * b * c)
with:
vzalloc(array3_size(a, b, c))
This does, however, attempt to ignore constant size factors like:
vzalloc(4 * 1024)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
vzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
vzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
vzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
vzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
vzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
vzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
vzalloc(
- sizeof(TYPE) * (COUNT_ID)
+ array_size(COUNT_ID, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT_ID
+ array_size(COUNT_ID, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * (COUNT_CONST)
+ array_size(COUNT_CONST, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT_CONST
+ array_size(COUNT_CONST, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT_ID)
+ array_size(COUNT_ID, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT_ID
+ array_size(COUNT_ID, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT_CONST)
+ array_size(COUNT_CONST, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT_CONST
+ array_size(COUNT_CONST, sizeof(THING))
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
vzalloc(
- SIZE * COUNT
+ array_size(COUNT, SIZE)
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
vzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
vzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
vzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
vzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
vzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
vzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
vzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
vzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
vzalloc(C1 * C2 * C3, ...)
|
vzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants.
@@
expression E1, E2;
constant C1, C2;
@@
(
vzalloc(C1 * C2, ...)
|
vzalloc(
- E1 * E2
+ array_size(E1, E2)
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:27:37 -07:00
ib_umem_num_pages ( umem ) ) ) ;
2018-09-16 20:48:05 +03:00
if ( ! umem_odp - > dma_list ) {
2017-01-18 16:58:07 +02:00
ret_val = - ENOMEM ;
goto out_page_list ;
}
2014-12-11 17:04:17 +02:00
}
2018-09-16 20:48:08 +03:00
ret_val = get_per_mm ( umem_odp ) ;
if ( ret_val )
goto out_dma_list ;
add_umem_to_per_mm ( umem_odp ) ;
2014-12-11 17:04:18 +02:00
2014-12-11 17:04:17 +02:00
return 0 ;
2018-09-16 20:48:08 +03:00
out_dma_list :
2018-09-16 20:48:05 +03:00
vfree ( umem_odp - > dma_list ) ;
2014-12-11 17:04:17 +02:00
out_page_list :
2018-09-16 20:48:05 +03:00
vfree ( umem_odp - > page_list ) ;
2014-12-11 17:04:17 +02:00
return ret_val ;
}
2018-09-16 20:48:04 +03:00
void ib_umem_odp_release ( struct ib_umem_odp * umem_odp )
2014-12-11 17:04:17 +02:00
{
2018-09-16 20:48:05 +03:00
struct ib_umem * umem = & umem_odp - > umem ;
2014-12-11 17:04:18 +02:00
2014-12-11 17:04:17 +02:00
/*
* Ensure that no more pages are mapped in the umem .
*
* It is the driver ' s responsibility to ensure , before calling us ,
* that the hardware will not attempt to access the MR any more .
*/
2018-09-16 20:48:04 +03:00
ib_umem_odp_unmap_dma_pages ( umem_odp , ib_umem_start ( umem ) ,
2014-12-11 17:04:17 +02:00
ib_umem_end ( umem ) ) ;
2018-09-16 20:48:08 +03:00
remove_umem_from_per_mm ( umem_odp ) ;
put_per_mm ( umem_odp ) ;
2018-09-16 20:48:04 +03:00
vfree ( umem_odp - > dma_list ) ;
vfree ( umem_odp - > page_list ) ;
2014-12-11 17:04:17 +02:00
}
/*
* Map for DMA and insert a single page into the on - demand paging page tables .
*
* @ umem : the umem to insert the page to .
* @ page_index : index in the umem to add the page to .
* @ page : the page struct to map and add .
* @ access_mask : access permissions needed for this page .
* @ current_seq : sequence number for synchronization with invalidations .
* the sequence number is taken from
2018-09-16 20:48:04 +03:00
* umem_odp - > notifiers_seq .
2014-12-11 17:04:17 +02:00
*
2014-12-11 17:04:18 +02:00
* The function returns - EFAULT if the DMA mapping operation fails . It returns
* - EAGAIN if a concurrent invalidation prevents us from updating the page .
2014-12-11 17:04:17 +02:00
*
* The page is released via put_page even if the operation failed . For
* on - demand pinning , the page is released whenever it isn ' t stored in the
* umem .
*/
static int ib_umem_odp_map_dma_single_page (
2018-09-16 20:48:04 +03:00
struct ib_umem_odp * umem_odp ,
2014-12-11 17:04:17 +02:00
int page_index ,
struct page * page ,
u64 access_mask ,
unsigned long current_seq )
{
2018-09-16 20:48:05 +03:00
struct ib_umem * umem = & umem_odp - > umem ;
2014-12-11 17:04:17 +02:00
struct ib_device * dev = umem - > context - > device ;
dma_addr_t dma_addr ;
int stored_page = 0 ;
2014-12-11 17:04:18 +02:00
int remove_existing_mapping = 0 ;
2014-12-11 17:04:17 +02:00
int ret = 0 ;
2014-12-11 17:04:18 +02:00
/*
* Note : we avoid writing if seq is different from the initial seq , to
* handle case of a racing notifier . This check also allows us to bail
* early if we have a notifier running in parallel with us .
*/
2018-09-16 20:48:04 +03:00
if ( ib_umem_mmu_notifier_retry ( umem_odp , current_seq ) ) {
2014-12-11 17:04:18 +02:00
ret = - EAGAIN ;
goto out ;
}
2018-09-16 20:48:04 +03:00
if ( ! ( umem_odp - > dma_list [ page_index ] ) ) {
2014-12-11 17:04:17 +02:00
dma_addr = ib_dma_map_page ( dev ,
page ,
2017-04-05 09:23:55 +03:00
0 , BIT ( umem - > page_shift ) ,
2014-12-11 17:04:17 +02:00
DMA_BIDIRECTIONAL ) ;
if ( ib_dma_mapping_error ( dev , dma_addr ) ) {
ret = - EFAULT ;
goto out ;
}
2018-09-16 20:48:04 +03:00
umem_odp - > dma_list [ page_index ] = dma_addr | access_mask ;
umem_odp - > page_list [ page_index ] = page ;
2017-01-18 16:58:07 +02:00
umem - > npages + + ;
2014-12-11 17:04:17 +02:00
stored_page = 1 ;
2018-09-16 20:48:04 +03:00
} else if ( umem_odp - > page_list [ page_index ] = = page ) {
umem_odp - > dma_list [ page_index ] | = access_mask ;
2014-12-11 17:04:17 +02:00
} else {
pr_err ( " error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p \n " ,
2018-09-16 20:48:04 +03:00
umem_odp - > page_list [ page_index ] , page ) ;
2014-12-11 17:04:18 +02:00
/* Better remove the mapping now, to prevent any further
* damage . */
remove_existing_mapping = 1 ;
2014-12-11 17:04:17 +02:00
}
out :
2014-12-11 17:04:18 +02:00
/* On Demand Paging - avoid pinning the page */
if ( umem - > context - > invalidate_range | | ! stored_page )
2014-12-11 17:04:17 +02:00
put_page ( page ) ;
2014-12-11 17:04:18 +02:00
if ( remove_existing_mapping & & umem - > context - > invalidate_range ) {
2018-11-25 20:34:25 +02:00
ib_umem_notifier_start_account ( umem_odp ) ;
umem - > context - > invalidate_range (
2018-09-16 20:48:04 +03:00
umem_odp ,
2018-11-25 20:34:25 +02:00
ib_umem_start ( umem ) + ( page_index < < umem - > page_shift ) ,
ib_umem_start ( umem ) +
( ( page_index + 1 ) < < umem - > page_shift ) ) ;
ib_umem_notifier_end_account ( umem_odp ) ;
2014-12-11 17:04:18 +02:00
ret = - EAGAIN ;
}
2014-12-11 17:04:17 +02:00
return ret ;
}
/**
* ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR .
*
* Pins the range of pages passed in the argument , and maps them to
* DMA addresses . The DMA addresses of the mapped pages is updated in
2018-09-16 20:48:04 +03:00
* umem_odp - > dma_list .
2014-12-11 17:04:17 +02:00
*
* Returns the number of pages mapped in success , negative error code
* for failure .
2014-12-11 17:04:18 +02:00
* An - EAGAIN error code is returned when a concurrent mmu notifier prevents
* the function from completing its task .
2017-01-18 16:58:08 +02:00
* An - ENOENT error code indicates that userspace process is being terminated
* and mm was already destroyed .
2018-09-16 20:48:04 +03:00
* @ umem_odp : the umem to map and pin
2014-12-11 17:04:17 +02:00
* @ user_virt : the address from which we need to map .
* @ bcnt : the minimal number of bytes to pin and map . The mapping might be
* bigger due to alignment , and may also be smaller in case of an error
* pinning or mapping a page . The actual pages mapped is returned in
* the return value .
* @ access_mask : bit mask of the requested access permissions for the given
* range .
* @ current_seq : the MMU notifiers sequance value for synchronization with
* invalidations . the sequance number is read from
2018-09-16 20:48:04 +03:00
* umem_odp - > notifiers_seq before calling this function
2014-12-11 17:04:17 +02:00
*/
2018-09-16 20:48:04 +03:00
int ib_umem_odp_map_dma_pages ( struct ib_umem_odp * umem_odp , u64 user_virt ,
u64 bcnt , u64 access_mask ,
unsigned long current_seq )
2014-12-11 17:04:17 +02:00
{
2018-09-16 20:48:05 +03:00
struct ib_umem * umem = & umem_odp - > umem ;
2014-12-11 17:04:17 +02:00
struct task_struct * owning_process = NULL ;
2018-09-16 20:48:08 +03:00
struct mm_struct * owning_mm = umem_odp - > umem . owning_mm ;
2014-12-11 17:04:17 +02:00
struct page * * local_page_list = NULL ;
2017-04-05 09:23:55 +03:00
u64 page_mask , off ;
int j , k , ret = 0 , start_idx , npages = 0 , page_shift ;
2016-10-13 01:20:17 +01:00
unsigned int flags = 0 ;
2017-04-05 09:23:55 +03:00
phys_addr_t p = 0 ;
2014-12-11 17:04:17 +02:00
if ( access_mask = = 0 )
return - EINVAL ;
if ( user_virt < ib_umem_start ( umem ) | |
user_virt + bcnt > ib_umem_end ( umem ) )
return - EFAULT ;
local_page_list = ( struct page * * ) __get_free_page ( GFP_KERNEL ) ;
if ( ! local_page_list )
return - ENOMEM ;
2017-04-05 09:23:55 +03:00
page_shift = umem - > page_shift ;
page_mask = ~ ( BIT ( page_shift ) - 1 ) ;
off = user_virt & ( ~ page_mask ) ;
user_virt = user_virt & page_mask ;
2014-12-11 17:04:17 +02:00
bcnt + = off ; /* Charge for the first page offset as well. */
2018-09-16 20:48:08 +03:00
/*
* owning_process is allowed to be NULL , this means somehow the mm is
* existing beyond the lifetime of the originating process . . Presumably
* mmget_not_zero will fail in this case .
*/
owning_process = get_pid_task ( umem_odp - > per_mm - > tgid , PIDTYPE_PID ) ;
if ( WARN_ON ( ! mmget_not_zero ( umem_odp - > umem . owning_mm ) ) ) {
2014-12-11 17:04:17 +02:00
ret = - EINVAL ;
goto out_put_task ;
}
2016-10-13 01:20:17 +01:00
if ( access_mask & ODP_WRITE_ALLOWED_BIT )
flags | = FOLL_WRITE ;
2017-04-05 09:23:55 +03:00
start_idx = ( user_virt - ib_umem_start ( umem ) ) > > page_shift ;
2014-12-11 17:04:17 +02:00
k = start_idx ;
while ( bcnt > 0 ) {
2017-04-05 09:23:55 +03:00
const size_t gup_num_pages = min_t ( size_t ,
( bcnt + BIT ( page_shift ) - 1 ) > > page_shift ,
PAGE_SIZE / sizeof ( struct page * ) ) ;
2014-12-11 17:04:17 +02:00
down_read ( & owning_mm - > mmap_sem ) ;
/*
* Note : this might result in redundent page getting . We can
* avoid this by checking dma_list to be 0 before calling
* get_user_pages . However , this make the code much more
* complex ( and doesn ' t gain us much performance in most use
* cases ) .
*/
2016-02-12 13:01:54 -08:00
npages = get_user_pages_remote ( owning_process , owning_mm ,
user_virt , gup_num_pages ,
2016-12-14 15:06:52 -08:00
flags , local_page_list , NULL , NULL ) ;
2014-12-11 17:04:17 +02:00
up_read ( & owning_mm - > mmap_sem ) ;
2018-11-08 21:10:17 +02:00
if ( npages < 0 ) {
if ( npages ! = - EAGAIN )
pr_warn ( " fail to get %zu user pages with error %d \n " , gup_num_pages , npages ) ;
else
pr_debug ( " fail to get %zu user pages with error %d \n " , gup_num_pages , npages ) ;
2014-12-11 17:04:17 +02:00
break ;
2018-11-08 21:10:17 +02:00
}
2014-12-11 17:04:17 +02:00
bcnt - = min_t ( size_t , npages < < PAGE_SHIFT , bcnt ) ;
2018-09-16 20:48:04 +03:00
mutex_lock ( & umem_odp - > umem_mutex ) ;
2017-04-05 09:23:55 +03:00
for ( j = 0 ; j < npages ; j + + , user_virt + = PAGE_SIZE ) {
if ( user_virt & ~ page_mask ) {
p + = PAGE_SIZE ;
if ( page_to_phys ( local_page_list [ j ] ) ! = p ) {
ret = - EFAULT ;
break ;
}
put_page ( local_page_list [ j ] ) ;
continue ;
}
2014-12-11 17:04:17 +02:00
ret = ib_umem_odp_map_dma_single_page (
2018-09-16 20:48:04 +03:00
umem_odp , k , local_page_list [ j ] ,
2017-04-05 09:23:55 +03:00
access_mask , current_seq ) ;
2018-11-08 21:10:17 +02:00
if ( ret < 0 ) {
if ( ret ! = - EAGAIN )
pr_warn ( " ib_umem_odp_map_dma_single_page failed with error %d \n " , ret ) ;
else
pr_debug ( " ib_umem_odp_map_dma_single_page failed with error %d \n " , ret ) ;
2014-12-11 17:04:17 +02:00
break ;
2018-11-08 21:10:17 +02:00
}
2017-04-05 09:23:55 +03:00
p = page_to_phys ( local_page_list [ j ] ) ;
2014-12-11 17:04:17 +02:00
k + + ;
}
2018-09-16 20:48:04 +03:00
mutex_unlock ( & umem_odp - > umem_mutex ) ;
2014-12-11 17:04:17 +02:00
if ( ret < 0 ) {
/* Release left over pages when handling errors. */
for ( + + j ; j < npages ; + + j )
put_page ( local_page_list [ j ] ) ;
break ;
}
}
if ( ret > = 0 ) {
if ( npages < 0 & & k = = start_idx )
ret = npages ;
else
ret = k - start_idx ;
}
mmput ( owning_mm ) ;
out_put_task :
2018-09-16 20:48:08 +03:00
if ( owning_process )
put_task_struct ( owning_process ) ;
2014-12-11 17:04:17 +02:00
free_page ( ( unsigned long ) local_page_list ) ;
return ret ;
}
EXPORT_SYMBOL ( ib_umem_odp_map_dma_pages ) ;
2018-09-16 20:48:04 +03:00
void ib_umem_odp_unmap_dma_pages ( struct ib_umem_odp * umem_odp , u64 virt ,
2014-12-11 17:04:17 +02:00
u64 bound )
{
2018-09-16 20:48:05 +03:00
struct ib_umem * umem = & umem_odp - > umem ;
2014-12-11 17:04:17 +02:00
int idx ;
u64 addr ;
struct ib_device * dev = umem - > context - > device ;
virt = max_t ( u64 , virt , ib_umem_start ( umem ) ) ;
bound = min_t ( u64 , bound , ib_umem_end ( umem ) ) ;
2014-12-11 17:04:18 +02:00
/* Note that during the run of this function, the
* notifiers_count of the MR is > 0 , preventing any racing
* faults from completion . We might be racing with other
* invalidations , so we must make sure we free each page only
* once . */
2018-09-16 20:48:04 +03:00
mutex_lock ( & umem_odp - > umem_mutex ) ;
2017-04-05 09:23:50 +03:00
for ( addr = virt ; addr < bound ; addr + = BIT ( umem - > page_shift ) ) {
2017-04-05 09:23:55 +03:00
idx = ( addr - ib_umem_start ( umem ) ) > > umem - > page_shift ;
2018-09-16 20:48:04 +03:00
if ( umem_odp - > page_list [ idx ] ) {
struct page * page = umem_odp - > page_list [ idx ] ;
dma_addr_t dma = umem_odp - > dma_list [ idx ] ;
2014-12-11 17:04:17 +02:00
dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK ;
WARN_ON ( ! dma_addr ) ;
ib_dma_unmap_page ( dev , dma_addr , PAGE_SIZE ,
DMA_BIDIRECTIONAL ) ;
2015-04-15 18:17:57 +03:00
if ( dma & ODP_WRITE_ALLOWED_BIT ) {
struct page * head_page = compound_head ( page ) ;
2014-12-11 17:04:18 +02:00
/*
* set_page_dirty prefers being called with
* the page lock . However , MMU notifiers are
* called sometimes with and sometimes without
* the lock . We rely on the umem_mutex instead
* to prevent other mmu notifiers from
* continuing and allowing the page mapping to
* be removed .
*/
set_page_dirty ( head_page ) ;
2015-04-15 18:17:57 +03:00
}
2014-12-11 17:04:18 +02:00
/* on demand pinning support */
if ( ! umem - > context - > invalidate_range )
put_page ( page ) ;
2018-09-16 20:48:04 +03:00
umem_odp - > page_list [ idx ] = NULL ;
umem_odp - > dma_list [ idx ] = 0 ;
2017-01-18 16:58:07 +02:00
umem - > npages - - ;
2014-12-11 17:04:17 +02:00
}
}
2018-09-16 20:48:04 +03:00
mutex_unlock ( & umem_odp - > umem_mutex ) ;
2014-12-11 17:04:17 +02:00
}
EXPORT_SYMBOL ( ib_umem_odp_unmap_dma_pages ) ;
2017-10-25 18:56:49 +03:00
/* @last is not a part of the interval. See comment for function
* node_last .
*/
int rbt_ib_umem_for_each_in_range ( struct rb_root_cached * root ,
u64 start , u64 last ,
umem_call_back cb ,
2018-08-21 21:52:33 -07:00
bool blockable ,
2017-10-25 18:56:49 +03:00
void * cookie )
{
int ret_val = 0 ;
struct umem_odp_node * node , * next ;
struct ib_umem_odp * umem ;
if ( unlikely ( start = = last ) )
return ret_val ;
for ( node = rbt_ib_umem_iter_first ( root , start , last - 1 ) ;
node ; node = next ) {
2018-08-21 21:52:33 -07:00
/* TODO move the blockable decision up to the callback */
if ( ! blockable )
return - EAGAIN ;
2017-10-25 18:56:49 +03:00
next = rbt_ib_umem_iter_next ( node , start , last - 1 ) ;
umem = container_of ( node , struct ib_umem_odp , interval_tree ) ;
2018-09-16 20:48:04 +03:00
ret_val = cb ( umem , start , last , cookie ) | | ret_val ;
2017-10-25 18:56:49 +03:00
}
return ret_val ;
}
EXPORT_SYMBOL ( rbt_ib_umem_for_each_in_range ) ;
struct ib_umem_odp * rbt_ib_umem_lookup ( struct rb_root_cached * root ,
u64 addr , u64 length )
{
struct umem_odp_node * node ;
node = rbt_ib_umem_iter_first ( root , addr , addr + length - 1 ) ;
if ( node )
return container_of ( node , struct ib_umem_odp , interval_tree ) ;
return NULL ;
}
EXPORT_SYMBOL ( rbt_ib_umem_lookup ) ;