2020-05-13 02:54:17 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
2005-04-17 02:20:36 +04:00
/*
2005-11-02 06:58:39 +03:00
* Copyright ( c ) 2000 - 2005 Silicon Graphics , Inc .
* All Rights Reserved .
2005-04-17 02:20:36 +04:00
*/
# ifndef __XFS_SUPPORT_KMEM_H__
# define __XFS_SUPPORT_KMEM_H__
# include <linux/slab.h>
# include <linux/sched.h>
# include <linux/mm.h>
2010-01-21 00:55:30 +03:00
# include <linux/vmalloc.h>
2005-04-17 02:20:36 +04:00
2006-03-14 05:18:19 +03:00
/*
* General memory allocation interfaces
*/
2012-04-02 14:24:04 +04:00
typedef unsigned __bitwise xfs_km_flags_t ;
# define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
# define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
2013-11-04 14:21:05 +04:00
# define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
2020-05-26 19:33:11 +03:00
# define KM_NOLOCKDEP ((__force xfs_km_flags_t)0x0020u)
2006-03-14 05:18:19 +03:00
/*
* We use a special process flag to avoid recursive callbacks into
* the filesystem during transactions . We will also issue our own
* warnings , so we explicitly skip any generic ones ( silly of us ) .
*/
static inline gfp_t
2012-04-02 14:24:04 +04:00
kmem_flags_convert ( xfs_km_flags_t flags )
2005-04-17 02:20:36 +04:00
{
2006-03-14 05:18:19 +03:00
gfp_t lflags ;
2005-04-17 02:20:36 +04:00
2020-05-26 19:33:11 +03:00
BUG_ON ( flags & ~ ( KM_NOFS | KM_MAYFAIL | KM_ZERO | KM_NOLOCKDEP ) ) ;
2005-04-17 02:20:36 +04:00
2019-08-26 22:06:22 +03:00
lflags = GFP_KERNEL | __GFP_NOWARN ;
if ( flags & KM_NOFS )
lflags & = ~ __GFP_FS ;
2013-11-04 14:21:05 +04:00
2017-07-13 00:36:49 +03:00
/*
* Default page / slab allocator behavior is to retry for ever
* for small allocations . We can override this behavior by using
* __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long
* as it is feasible but rather fail than retry forever for all
* request sizes .
*/
if ( flags & KM_MAYFAIL )
lflags | = __GFP_RETRY_MAYFAIL ;
2013-11-04 14:21:05 +04:00
if ( flags & KM_ZERO )
lflags | = __GFP_ZERO ;
2020-05-26 19:33:11 +03:00
if ( flags & KM_NOLOCKDEP )
lflags | = __GFP_NOLOCKDEP ;
2006-03-14 05:18:19 +03:00
return lflags ;
2005-04-17 02:20:36 +04:00
}
2012-04-02 14:24:04 +04:00
extern void * kmem_alloc ( size_t , xfs_km_flags_t ) ;
2015-02-02 01:54:18 +03:00
static inline void kmem_free ( const void * ptr )
{
kvfree ( ptr ) ;
}
2006-03-14 05:18:19 +03:00
2010-01-21 00:55:30 +03:00
2013-11-04 14:21:05 +04:00
static inline void *
kmem_zalloc ( size_t size , xfs_km_flags_t flags )
{
return kmem_alloc ( size , flags | KM_ZERO ) ;
2018-03-07 04:03:28 +03:00
}
2006-03-14 05:18:19 +03:00
/*
* Zone interfaces
*/
2019-06-29 05:27:19 +03:00
static inline struct page *
kmem_to_page ( void * addr )
{
if ( is_vmalloc_addr ( addr ) )
return vmalloc_to_page ( addr ) ;
return virt_to_page ( addr ) ;
}
2005-04-17 02:20:36 +04:00
# endif /* __XFS_SUPPORT_KMEM_H__ */