2019-05-27 09:55:01 +03:00
/* SPDX-License-Identifier: GPL-2.0-or-later */
2005-04-17 02:20:36 +04:00
/*
2007-12-07 13:52:49 +03:00
* Cryptographic scatter and gather helpers .
2005-04-17 02:20:36 +04:00
*
* Copyright ( c ) 2002 James Morris < jmorris @ intercode . com . au >
* Copyright ( c ) 2002 Adam J . Richter < adam @ yggdrasil . com >
* Copyright ( c ) 2004 Jean - Luc Cooke < jlcooke @ certainkey . com >
2007-12-07 13:52:49 +03:00
* Copyright ( c ) 2007 Herbert Xu < herbert @ gondor . apana . org . au >
2005-04-17 02:20:36 +04:00
*/
# ifndef _CRYPTO_SCATTERWALK_H
# define _CRYPTO_SCATTERWALK_H
2006-08-12 15:56:17 +04:00
2007-12-07 13:52:49 +03:00
# include <crypto/algapi.h>
2021-12-10 17:30:09 +03:00
2007-12-07 13:52:49 +03:00
# include <linux/highmem.h>
2021-12-10 17:30:09 +03:00
# include <linux/mm.h>
2006-08-12 15:56:17 +04:00
# include <linux/scatterlist.h>
2005-04-17 02:20:36 +04:00
2010-11-22 13:25:50 +03:00
static inline void scatterwalk_crypto_chain ( struct scatterlist * head ,
2018-07-23 20:01:33 +03:00
struct scatterlist * sg , int num )
2010-11-22 13:25:50 +03:00
{
if ( sg )
2015-08-07 19:15:13 +03:00
sg_chain ( head , num , sg ) ;
2010-11-22 13:25:50 +03:00
else
sg_mark_end ( head ) ;
}
2006-08-12 15:56:17 +04:00
static inline unsigned int scatterwalk_pagelen ( struct scatter_walk * walk )
{
unsigned int len = walk - > sg - > offset + walk - > sg - > length - walk - > offset ;
unsigned int len_this_page = offset_in_page ( ~ walk - > offset ) + 1 ;
return len_this_page > len ? len : len_this_page ;
2005-04-17 02:20:36 +04:00
}
2005-07-07 00:51:31 +04:00
static inline unsigned int scatterwalk_clamp ( struct scatter_walk * walk ,
unsigned int nbytes )
2005-04-17 02:20:36 +04:00
{
2006-08-12 15:56:17 +04:00
unsigned int len_this_page = scatterwalk_pagelen ( walk ) ;
return nbytes > len_this_page ? len_this_page : nbytes ;
2005-04-17 02:20:36 +04:00
}
static inline void scatterwalk_advance ( struct scatter_walk * walk ,
unsigned int nbytes )
{
walk - > offset + = nbytes ;
}
2006-08-12 15:56:17 +04:00
static inline struct page * scatterwalk_page ( struct scatter_walk * walk )
{
2007-10-22 21:40:16 +04:00
return sg_page ( walk - > sg ) + ( walk - > offset > > PAGE_SHIFT ) ;
2006-08-12 15:56:17 +04:00
}
2011-11-25 19:14:17 +04:00
static inline void scatterwalk_unmap ( void * vaddr )
2006-08-12 15:56:17 +04:00
{
crypto: scatterwalk - use kmap_local() not kmap_atomic()
kmap_atomic() is used to create short-lived mappings of pages that may
not be accessible via the kernel direct map. This is only needed on
32-bit architectures that implement CONFIG_HIGHMEM, but it can be used
on 64-bit other architectures too, where the returned mapping is simply
the kernel direct address of the page.
However, kmap_atomic() does not support migration on CONFIG_HIGHMEM
configurations, due to the use of per-CPU kmap slots, and so it disables
preemption on all architectures, not just the 32-bit ones. This implies
that all scatterwalk based crypto routines essentially execute with
preemption disabled all the time, which is less than ideal.
So let's switch scatterwalk_map/_unmap and the shash/ahash routines to
kmap_local() instead, which serves a similar purpose, but without the
resulting impact on preemption on architectures that have no need for
CONFIG_HIGHMEM.
Cc: Eric Biggers <ebiggers@kernel.org>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: "Elliott, Robert (Servers)" <elliott@hpe.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2022-12-13 19:13:10 +03:00
kunmap_local ( vaddr ) ;
2006-08-12 15:56:17 +04:00
}
2016-07-12 08:18:00 +03:00
static inline void scatterwalk_start ( struct scatter_walk * walk ,
struct scatterlist * sg )
{
walk - > sg = sg ;
walk - > offset = sg - > offset ;
}
static inline void * scatterwalk_map ( struct scatter_walk * walk )
{
crypto: scatterwalk - use kmap_local() not kmap_atomic()
kmap_atomic() is used to create short-lived mappings of pages that may
not be accessible via the kernel direct map. This is only needed on
32-bit architectures that implement CONFIG_HIGHMEM, but it can be used
on 64-bit other architectures too, where the returned mapping is simply
the kernel direct address of the page.
However, kmap_atomic() does not support migration on CONFIG_HIGHMEM
configurations, due to the use of per-CPU kmap slots, and so it disables
preemption on all architectures, not just the 32-bit ones. This implies
that all scatterwalk based crypto routines essentially execute with
preemption disabled all the time, which is less than ideal.
So let's switch scatterwalk_map/_unmap and the shash/ahash routines to
kmap_local() instead, which serves a similar purpose, but without the
resulting impact on preemption on architectures that have no need for
CONFIG_HIGHMEM.
Cc: Eric Biggers <ebiggers@kernel.org>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: "Elliott, Robert (Servers)" <elliott@hpe.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2022-12-13 19:13:10 +03:00
return kmap_local_page ( scatterwalk_page ( walk ) ) +
2016-07-12 08:18:00 +03:00
offset_in_page ( walk - > offset ) ;
}
static inline void scatterwalk_pagedone ( struct scatter_walk * walk , int out ,
unsigned int more )
{
if ( out ) {
struct page * page ;
page = sg_page ( walk - > sg ) + ( ( walk - > offset - 1 ) > > PAGE_SHIFT ) ;
2021-06-24 09:32:15 +03:00
flush_dcache_page ( page ) ;
2016-07-12 08:18:00 +03:00
}
if ( more & & walk - > offset > = walk - > sg - > offset + walk - > sg - > length )
scatterwalk_start ( walk , sg_next ( walk - > sg ) ) ;
}
static inline void scatterwalk_done ( struct scatter_walk * walk , int out ,
int more )
{
if ( ! more | | walk - > offset > = walk - > sg - > offset + walk - > sg - > length | |
! ( walk - > offset & ( PAGE_SIZE - 1 ) ) )
scatterwalk_pagedone ( walk , out , more ) ;
}
2006-08-12 15:56:17 +04:00
void scatterwalk_copychunks ( void * buf , struct scatter_walk * walk ,
size_t nbytes , int out ) ;
2005-04-17 02:20:36 +04:00
2007-08-29 12:31:34 +04:00
void scatterwalk_map_and_copy ( void * buf , struct scatterlist * sg ,
unsigned int start , unsigned int nbytes , int out ) ;
2015-05-21 10:10:59 +03:00
struct scatterlist * scatterwalk_ffwd ( struct scatterlist dst [ 2 ] ,
struct scatterlist * src ,
unsigned int len ) ;
2005-04-17 02:20:36 +04:00
# endif /* _CRYPTO_SCATTERWALK_H */