2005-04-16 15:20:36 -07:00
/*
* Implementation of the extensible bitmap type .
*
* Author : Stephen Smalley , < sds @ epoch . ncsc . mil >
*/
2006-08-04 23:17:57 -07:00
/*
2011-08-01 11:10:33 +00:00
* Updated : Hewlett - Packard < paul @ paul - moore . com >
2006-08-04 23:17:57 -07:00
*
2006-11-29 13:18:18 -05:00
* Added support to import / export the NetLabel category bitmap
2006-08-04 23:17:57 -07:00
*
* ( c ) Copyright Hewlett - Packard Development Company , L . P . , 2006
*/
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
/*
* Updated : KaiGai Kohei < kaigai @ ak . jp . nec . com >
* Applied standard bit operations to improve bitmap scanning .
*/
2006-08-04 23:17:57 -07:00
2005-04-16 15:20:36 -07:00
# include <linux/kernel.h>
# include <linux/slab.h>
# include <linux/errno.h>
2006-11-29 13:18:18 -05:00
# include <net/netlabel.h>
2005-04-16 15:20:36 -07:00
# include "ebitmap.h"
# include "policydb.h"
2010-10-13 17:50:25 -04:00
# define BITS_PER_U64 (sizeof(u64) * 8)
2005-04-16 15:20:36 -07:00
int ebitmap_cmp ( struct ebitmap * e1 , struct ebitmap * e2 )
{
struct ebitmap_node * n1 , * n2 ;
if ( e1 - > highbit ! = e2 - > highbit )
return 0 ;
n1 = e1 - > node ;
n2 = e2 - > node ;
while ( n1 & & n2 & &
( n1 - > startbit = = n2 - > startbit ) & &
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
! memcmp ( n1 - > maps , n2 - > maps , EBITMAP_SIZE / 8 ) ) {
2005-04-16 15:20:36 -07:00
n1 = n1 - > next ;
n2 = n2 - > next ;
}
if ( n1 | | n2 )
return 0 ;
return 1 ;
}
int ebitmap_cpy ( struct ebitmap * dst , struct ebitmap * src )
{
struct ebitmap_node * n , * new , * prev ;
ebitmap_init ( dst ) ;
n = src - > node ;
prev = NULL ;
while ( n ) {
2005-10-30 14:59:21 -08:00
new = kzalloc ( sizeof ( * new ) , GFP_ATOMIC ) ;
2005-04-16 15:20:36 -07:00
if ( ! new ) {
ebitmap_destroy ( dst ) ;
return - ENOMEM ;
}
new - > startbit = n - > startbit ;
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
memcpy ( new - > maps , n - > maps , EBITMAP_SIZE / 8 ) ;
2005-04-16 15:20:36 -07:00
new - > next = NULL ;
if ( prev )
prev - > next = new ;
else
dst - > node = new ;
prev = new ;
n = n - > next ;
}
dst - > highbit = src - > highbit ;
return 0 ;
}
2006-11-29 13:18:18 -05:00
# ifdef CONFIG_NETLABEL
2006-08-04 23:17:57 -07:00
/**
2006-11-29 13:18:18 -05:00
* ebitmap_netlbl_export - Export an ebitmap into a NetLabel category bitmap
* @ ebmap : the ebitmap to export
* @ catmap : the NetLabel category bitmap
2006-08-04 23:17:57 -07:00
*
* Description :
2006-11-29 13:18:18 -05:00
* Export a SELinux extensibile bitmap into a NetLabel category bitmap .
* Returns zero on success , negative values on error .
2006-08-04 23:17:57 -07:00
*
*/
2006-11-29 13:18:18 -05:00
int ebitmap_netlbl_export ( struct ebitmap * ebmap ,
struct netlbl_lsm_secattr_catmap * * catmap )
2006-08-04 23:17:57 -07:00
{
2006-11-29 13:18:18 -05:00
struct ebitmap_node * e_iter = ebmap - > node ;
struct netlbl_lsm_secattr_catmap * c_iter ;
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
u32 cmap_idx , cmap_sft ;
int i ;
2006-11-29 13:18:18 -05:00
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
/* NetLabel's NETLBL_CATMAP_MAPTYPE is defined as an array of u64,
* however , it is not always compatible with an array of unsigned long
* in ebitmap_node .
* In addition , you should pay attention the following implementation
* assumes unsigned long has a width equal with or less than 64 - bit .
*/
2006-11-29 13:18:18 -05:00
if ( e_iter = = NULL ) {
* catmap = NULL ;
2006-10-11 19:10:48 -04:00
return 0 ;
}
2006-11-29 13:18:18 -05:00
c_iter = netlbl_secattr_catmap_alloc ( GFP_ATOMIC ) ;
if ( c_iter = = NULL )
2006-08-04 23:17:57 -07:00
return - ENOMEM ;
2006-11-29 13:18:18 -05:00
* catmap = c_iter ;
c_iter - > startbit = e_iter - > startbit & ~ ( NETLBL_CATMAP_SIZE - 1 ) ;
2008-08-07 03:18:20 +03:00
while ( e_iter ) {
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
for ( i = 0 ; i < EBITMAP_UNIT_NUMS ; i + + ) {
unsigned int delta , e_startbit , c_endbit ;
e_startbit = e_iter - > startbit + i * EBITMAP_UNIT_SIZE ;
c_endbit = c_iter - > startbit + NETLBL_CATMAP_SIZE ;
if ( e_startbit > = c_endbit ) {
c_iter - > next
= netlbl_secattr_catmap_alloc ( GFP_ATOMIC ) ;
if ( c_iter - > next = = NULL )
goto netlbl_export_failure ;
c_iter = c_iter - > next ;
c_iter - > startbit
= e_startbit & ~ ( NETLBL_CATMAP_SIZE - 1 ) ;
}
delta = e_startbit - c_iter - > startbit ;
cmap_idx = delta / NETLBL_CATMAP_MAPSIZE ;
cmap_sft = delta % NETLBL_CATMAP_MAPSIZE ;
c_iter - > bitmap [ cmap_idx ]
2010-02-24 18:52:44 -05:00
| = e_iter - > maps [ i ] < < cmap_sft ;
2006-11-29 13:18:18 -05:00
}
2007-11-07 01:17:16 +09:00
e_iter = e_iter - > next ;
2006-11-29 13:18:18 -05:00
}
2006-08-04 23:17:57 -07:00
return 0 ;
2006-11-29 13:18:18 -05:00
netlbl_export_failure :
netlbl_secattr_catmap_free ( * catmap ) ;
return - ENOMEM ;
2006-08-04 23:17:57 -07:00
}
/**
2006-11-29 13:18:18 -05:00
* ebitmap_netlbl_import - Import a NetLabel category bitmap into an ebitmap
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
* @ ebmap : the ebitmap to import
2006-11-29 13:18:18 -05:00
* @ catmap : the NetLabel category bitmap
2006-08-04 23:17:57 -07:00
*
* Description :
2006-11-29 13:18:18 -05:00
* Import a NetLabel category bitmap into a SELinux extensibile bitmap .
* Returns zero on success , negative values on error .
2006-08-04 23:17:57 -07:00
*
*/
2006-11-29 13:18:18 -05:00
int ebitmap_netlbl_import ( struct ebitmap * ebmap ,
struct netlbl_lsm_secattr_catmap * catmap )
2006-08-04 23:17:57 -07:00
{
2006-11-29 13:18:18 -05:00
struct ebitmap_node * e_iter = NULL ;
struct ebitmap_node * emap_prev = NULL ;
struct netlbl_lsm_secattr_catmap * c_iter = catmap ;
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
u32 c_idx , c_pos , e_idx , e_sft ;
2006-08-04 23:17:57 -07:00
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
/* NetLabel's NETLBL_CATMAP_MAPTYPE is defined as an array of u64,
* however , it is not always compatible with an array of unsigned long
* in ebitmap_node .
* In addition , you should pay attention the following implementation
* assumes unsigned long has a width equal with or less than 64 - bit .
*/
2006-08-04 23:17:57 -07:00
2006-11-29 13:18:18 -05:00
do {
for ( c_idx = 0 ; c_idx < NETLBL_CATMAP_MAPCNT ; c_idx + + ) {
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
unsigned int delta ;
u64 map = c_iter - > bitmap [ c_idx ] ;
if ( ! map )
2006-11-29 13:18:18 -05:00
continue ;
2006-08-04 23:17:57 -07:00
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
c_pos = c_iter - > startbit
+ c_idx * NETLBL_CATMAP_MAPSIZE ;
if ( ! e_iter
| | c_pos > = e_iter - > startbit + EBITMAP_SIZE ) {
e_iter = kzalloc ( sizeof ( * e_iter ) , GFP_ATOMIC ) ;
if ( ! e_iter )
goto netlbl_import_failure ;
e_iter - > startbit
= c_pos - ( c_pos % EBITMAP_SIZE ) ;
if ( emap_prev = = NULL )
ebmap - > node = e_iter ;
else
emap_prev - > next = e_iter ;
emap_prev = e_iter ;
}
delta = c_pos - e_iter - > startbit ;
e_idx = delta / EBITMAP_UNIT_SIZE ;
e_sft = delta % EBITMAP_UNIT_SIZE ;
while ( map ) {
e_iter - > maps [ e_idx + + ] | = map & ( - 1UL ) ;
2007-10-03 23:42:56 +09:00
map = EBITMAP_SHIFT_UNIT_SIZE ( map ) ;
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
}
2006-11-29 13:18:18 -05:00
}
c_iter = c_iter - > next ;
2008-08-07 03:18:20 +03:00
} while ( c_iter ) ;
2006-11-29 13:18:18 -05:00
if ( e_iter ! = NULL )
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
ebmap - > highbit = e_iter - > startbit + EBITMAP_SIZE ;
2006-08-04 23:17:57 -07:00
else
2006-11-29 13:18:18 -05:00
ebitmap_destroy ( ebmap ) ;
2006-08-04 23:17:57 -07:00
return 0 ;
2006-11-29 13:18:18 -05:00
netlbl_import_failure :
ebitmap_destroy ( ebmap ) ;
return - ENOMEM ;
2006-08-04 23:17:57 -07:00
}
2006-11-29 13:18:18 -05:00
# endif /* CONFIG_NETLABEL */
2006-08-04 23:17:57 -07:00
2013-07-23 17:38:41 -04:00
/*
* Check to see if all the bits set in e2 are also set in e1 . Optionally ,
* if last_e2bit is non - zero , the highest set bit in e2 cannot exceed
* last_e2bit .
*/
int ebitmap_contains ( struct ebitmap * e1 , struct ebitmap * e2 , u32 last_e2bit )
2005-04-16 15:20:36 -07:00
{
struct ebitmap_node * n1 , * n2 ;
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
int i ;
2005-04-16 15:20:36 -07:00
if ( e1 - > highbit < e2 - > highbit )
return 0 ;
n1 = e1 - > node ;
n2 = e2 - > node ;
2013-07-23 17:38:41 -04:00
2005-04-16 15:20:36 -07:00
while ( n1 & & n2 & & ( n1 - > startbit < = n2 - > startbit ) ) {
if ( n1 - > startbit < n2 - > startbit ) {
n1 = n1 - > next ;
continue ;
}
2013-07-23 17:38:41 -04:00
for ( i = EBITMAP_UNIT_NUMS - 1 ; ( i > = 0 ) & & ! n2 - > maps [ i ] ; )
i - - ; /* Skip trailing NULL map entries */
if ( last_e2bit & & ( i > = 0 ) ) {
u32 lastsetbit = n2 - > startbit + i * EBITMAP_UNIT_SIZE +
__fls ( n2 - > maps [ i ] ) ;
if ( lastsetbit > last_e2bit )
return 0 ;
}
while ( i > = 0 ) {
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
if ( ( n1 - > maps [ i ] & n2 - > maps [ i ] ) ! = n2 - > maps [ i ] )
return 0 ;
2013-07-23 17:38:41 -04:00
i - - ;
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
}
2005-04-16 15:20:36 -07:00
n1 = n1 - > next ;
n2 = n2 - > next ;
}
if ( n2 )
return 0 ;
return 1 ;
}
int ebitmap_get_bit ( struct ebitmap * e , unsigned long bit )
{
struct ebitmap_node * n ;
if ( e - > highbit < bit )
return 0 ;
n = e - > node ;
while ( n & & ( n - > startbit < = bit ) ) {
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
if ( ( n - > startbit + EBITMAP_SIZE ) > bit )
return ebitmap_node_get_bit ( n , bit ) ;
2005-04-16 15:20:36 -07:00
n = n - > next ;
}
return 0 ;
}
int ebitmap_set_bit ( struct ebitmap * e , unsigned long bit , int value )
{
struct ebitmap_node * n , * prev , * new ;
prev = NULL ;
n = e - > node ;
while ( n & & n - > startbit < = bit ) {
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
if ( ( n - > startbit + EBITMAP_SIZE ) > bit ) {
2005-04-16 15:20:36 -07:00
if ( value ) {
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
ebitmap_node_set_bit ( n , bit ) ;
2005-04-16 15:20:36 -07:00
} else {
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
unsigned int s ;
ebitmap_node_clr_bit ( n , bit ) ;
s = find_first_bit ( n - > maps , EBITMAP_SIZE ) ;
if ( s < EBITMAP_SIZE )
return 0 ;
/* drop this node from the bitmap */
if ( ! n - > next ) {
/*
* this was the highest map
* within the bitmap
*/
2005-04-16 15:20:36 -07:00
if ( prev )
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
e - > highbit = prev - > startbit
+ EBITMAP_SIZE ;
2005-04-16 15:20:36 -07:00
else
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
e - > highbit = 0 ;
2005-04-16 15:20:36 -07:00
}
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
if ( prev )
prev - > next = n - > next ;
else
e - > node = n - > next ;
kfree ( n ) ;
2005-04-16 15:20:36 -07:00
}
return 0 ;
}
prev = n ;
n = n - > next ;
}
if ( ! value )
return 0 ;
2005-10-30 14:59:21 -08:00
new = kzalloc ( sizeof ( * new ) , GFP_ATOMIC ) ;
2005-04-16 15:20:36 -07:00
if ( ! new )
return - ENOMEM ;
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
new - > startbit = bit - ( bit % EBITMAP_SIZE ) ;
ebitmap_node_set_bit ( new , bit ) ;
2005-04-16 15:20:36 -07:00
if ( ! n )
/* this node will be the highest map within the bitmap */
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
e - > highbit = new - > startbit + EBITMAP_SIZE ;
2005-04-16 15:20:36 -07:00
if ( prev ) {
new - > next = prev - > next ;
prev - > next = new ;
} else {
new - > next = e - > node ;
e - > node = new ;
}
return 0 ;
}
void ebitmap_destroy ( struct ebitmap * e )
{
struct ebitmap_node * n , * temp ;
if ( ! e )
return ;
n = e - > node ;
while ( n ) {
temp = n ;
n = n - > next ;
kfree ( temp ) ;
}
e - > highbit = 0 ;
e - > node = NULL ;
return ;
}
int ebitmap_read ( struct ebitmap * e , void * fp )
{
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
struct ebitmap_node * n = NULL ;
u32 mapunit , count , startbit , index ;
u64 map ;
2005-09-03 15:55:17 -07:00
__le32 buf [ 3 ] ;
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
int rc , i ;
2005-04-16 15:20:36 -07:00
ebitmap_init ( e ) ;
rc = next_entry ( buf , fp , sizeof buf ) ;
if ( rc < 0 )
goto out ;
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
mapunit = le32_to_cpu ( buf [ 0 ] ) ;
2005-04-16 15:20:36 -07:00
e - > highbit = le32_to_cpu ( buf [ 1 ] ) ;
count = le32_to_cpu ( buf [ 2 ] ) ;
2010-10-13 17:50:25 -04:00
if ( mapunit ! = BITS_PER_U64 ) {
2008-02-26 20:42:02 +11:00
printk ( KERN_ERR " SELinux: ebitmap: map size %u does not "
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
" match my size %Zd (high bit was %d) \n " ,
2010-10-13 17:50:25 -04:00
mapunit , BITS_PER_U64 , e - > highbit ) ;
2005-04-16 15:20:36 -07:00
goto bad ;
}
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
/* round up e->highbit */
e - > highbit + = EBITMAP_SIZE - 1 ;
e - > highbit - = ( e - > highbit % EBITMAP_SIZE ) ;
2005-04-16 15:20:36 -07:00
if ( ! e - > highbit ) {
e - > node = NULL ;
goto ok ;
}
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
2005-04-16 15:20:36 -07:00
for ( i = 0 ; i < count ; i + + ) {
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
rc = next_entry ( & startbit , fp , sizeof ( u32 ) ) ;
2005-04-16 15:20:36 -07:00
if ( rc < 0 ) {
2008-02-26 20:42:02 +11:00
printk ( KERN_ERR " SELinux: ebitmap: truncated map \n " ) ;
2005-04-16 15:20:36 -07:00
goto bad ;
}
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
startbit = le32_to_cpu ( startbit ) ;
2005-04-16 15:20:36 -07:00
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
if ( startbit & ( mapunit - 1 ) ) {
2008-02-26 20:42:02 +11:00
printk ( KERN_ERR " SELinux: ebitmap start bit (%d) is "
2007-10-03 23:42:56 +09:00
" not a multiple of the map unit size (%u) \n " ,
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
startbit , mapunit ) ;
goto bad ;
2005-04-16 15:20:36 -07:00
}
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
if ( startbit > e - > highbit - mapunit ) {
2008-02-26 20:42:02 +11:00
printk ( KERN_ERR " SELinux: ebitmap start bit (%d) is "
2007-10-03 23:42:56 +09:00
" beyond the end of the bitmap (%u) \n " ,
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
startbit , ( e - > highbit - mapunit ) ) ;
goto bad ;
}
if ( ! n | | startbit > = n - > startbit + EBITMAP_SIZE ) {
struct ebitmap_node * tmp ;
tmp = kzalloc ( sizeof ( * tmp ) , GFP_KERNEL ) ;
if ( ! tmp ) {
printk ( KERN_ERR
2008-02-26 20:42:02 +11:00
" SELinux: ebitmap: out of memory \n " ) ;
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
rc = - ENOMEM ;
goto bad ;
}
/* round down */
tmp - > startbit = startbit - ( startbit % EBITMAP_SIZE ) ;
2008-04-18 17:38:30 -04:00
if ( n )
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
n - > next = tmp ;
2008-04-18 17:38:30 -04:00
else
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
e - > node = tmp ;
n = tmp ;
} else if ( startbit < = n - > startbit ) {
2008-02-26 20:42:02 +11:00
printk ( KERN_ERR " SELinux: ebitmap: start bit %d "
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
" comes after start bit %d \n " ,
startbit , n - > startbit ) ;
goto bad ;
2005-04-16 15:20:36 -07:00
}
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
2005-04-16 15:20:36 -07:00
rc = next_entry ( & map , fp , sizeof ( u64 ) ) ;
if ( rc < 0 ) {
2008-02-26 20:42:02 +11:00
printk ( KERN_ERR " SELinux: ebitmap: truncated map \n " ) ;
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
goto bad ;
2005-04-16 15:20:36 -07:00
}
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
map = le64_to_cpu ( map ) ;
2005-04-16 15:20:36 -07:00
SELinux: improve performance when AVC misses.
* We add ebitmap_for_each_positive_bit() which enables to walk on
any positive bit on the given ebitmap, to improve its performance
using common bit-operations defined in linux/bitops.h.
In the previous version, this logic was implemented using a combination
of ebitmap_for_each_bit() and ebitmap_node_get_bit(), but is was worse
in performance aspect.
This logic is most frequestly used to compute a new AVC entry,
so this patch can improve SELinux performance when AVC misses are happen.
* struct ebitmap_node is redefined as an array of "unsigned long", to get
suitable for using find_next_bit() which is fasted than iteration of
shift and logical operation, and to maximize memory usage allocated
from general purpose slab.
* Any ebitmap_for_each_bit() are repleced by the new implementation
in ss/service.c and ss/mls.c. Some of related implementation are
changed, however, there is no incompatibility with the previous
version.
* The width of any new line are less or equal than 80-chars.
The following benchmark shows the effect of this patch, when we
access many files which have different security context one after
another. The number is more than /selinux/avc/cache_threshold, so
any access always causes AVC misses.
selinux-2.6 selinux-2.6-ebitmap
AVG: 22.763 [s] 8.750 [s]
STD: 0.265 0.019
------------------------------------------
1st: 22.558 [s] 8.786 [s]
2nd: 22.458 [s] 8.750 [s]
3rd: 22.478 [s] 8.754 [s]
4th: 22.724 [s] 8.745 [s]
5th: 22.918 [s] 8.748 [s]
6th: 22.905 [s] 8.764 [s]
7th: 23.238 [s] 8.726 [s]
8th: 22.822 [s] 8.729 [s]
Signed-off-by: KaiGai Kohei <kaigai@ak.jp.nec.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: James Morris <jmorris@namei.org>
2007-09-29 02:20:55 +09:00
index = ( startbit - n - > startbit ) / EBITMAP_UNIT_SIZE ;
while ( map ) {
2007-10-03 23:42:56 +09:00
n - > maps [ index + + ] = map & ( - 1UL ) ;
map = EBITMAP_SHIFT_UNIT_SIZE ( map ) ;
2005-04-16 15:20:36 -07:00
}
}
ok :
rc = 0 ;
out :
return rc ;
bad :
if ( ! rc )
rc = - EINVAL ;
ebitmap_destroy ( e ) ;
goto out ;
}
2010-10-13 17:50:25 -04:00
int ebitmap_write ( struct ebitmap * e , void * fp )
{
struct ebitmap_node * n ;
u32 count ;
__le32 buf [ 3 ] ;
u64 map ;
int bit , last_bit , last_startbit , rc ;
buf [ 0 ] = cpu_to_le32 ( BITS_PER_U64 ) ;
count = 0 ;
last_bit = 0 ;
last_startbit = - 1 ;
ebitmap_for_each_positive_bit ( e , n , bit ) {
if ( rounddown ( bit , ( int ) BITS_PER_U64 ) > last_startbit ) {
count + + ;
last_startbit = rounddown ( bit , BITS_PER_U64 ) ;
}
last_bit = roundup ( bit + 1 , BITS_PER_U64 ) ;
}
buf [ 1 ] = cpu_to_le32 ( last_bit ) ;
buf [ 2 ] = cpu_to_le32 ( count ) ;
rc = put_entry ( buf , sizeof ( u32 ) , 3 , fp ) ;
if ( rc )
return rc ;
map = 0 ;
last_startbit = INT_MIN ;
ebitmap_for_each_positive_bit ( e , n , bit ) {
if ( rounddown ( bit , ( int ) BITS_PER_U64 ) > last_startbit ) {
__le64 buf64 [ 1 ] ;
/* this is the very first bit */
if ( ! map ) {
last_startbit = rounddown ( bit , BITS_PER_U64 ) ;
map = ( u64 ) 1 < < ( bit - last_startbit ) ;
continue ;
}
/* write the last node */
buf [ 0 ] = cpu_to_le32 ( last_startbit ) ;
rc = put_entry ( buf , sizeof ( u32 ) , 1 , fp ) ;
if ( rc )
return rc ;
buf64 [ 0 ] = cpu_to_le64 ( map ) ;
rc = put_entry ( buf64 , sizeof ( u64 ) , 1 , fp ) ;
if ( rc )
return rc ;
/* set up for the next node */
map = 0 ;
last_startbit = rounddown ( bit , BITS_PER_U64 ) ;
}
map | = ( u64 ) 1 < < ( bit - last_startbit ) ;
}
/* write the last node */
if ( map ) {
__le64 buf64 [ 1 ] ;
/* write the last node */
buf [ 0 ] = cpu_to_le32 ( last_startbit ) ;
rc = put_entry ( buf , sizeof ( u32 ) , 1 , fp ) ;
if ( rc )
return rc ;
buf64 [ 0 ] = cpu_to_le64 ( map ) ;
rc = put_entry ( buf64 , sizeof ( u64 ) , 1 , fp ) ;
if ( rc )
return rc ;
}
return 0 ;
}