2021-09-23 21:03:49 -04:00
// SPDX-License-Identifier: GPL-2.0
/* Do not include this file directly. */
# ifndef _TRACE_INTERNAL_PID_LIST_H
# define _TRACE_INTERNAL_PID_LIST_H
tracing: Create a sparse bitmask for pid filtering
When the trace_pid_list was created, the default pid max was 32768.
Creating a bitmask that can hold one bit for all 32768 took up 4096 (one
page). Having a one page bitmask was not much of a problem, and that was
used for mapping pids. But today, systems are bigger and can run more
tasks, and now the default pid_max is usually set to 4194304. Which means
to handle that many pids requires 524288 bytes. Worse yet, the pid_max can
be set to 2^30 (1073741824 or 1G) which would take 134217728 (128M) of
memory to store this array.
Since the pid_list array is very sparsely populated, it is a huge waste of
memory to store all possible bits for each pid when most will not be set.
Instead, use a page table scheme to store the array, and allow this to
handle up to 30 bit pids.
The pid_mask will start out with 256 entries for the first 8 MSB bits.
This will cost 1K for 32 bit architectures and 2K for 64 bit. Each of
these will have a 256 array to store the next 8 bits of the pid (another
1 or 2K). These will hold an 2K byte bitmask (which will cover the LSB
14 bits or 16384 pids).
When the trace_pid_list is allocated, it will have the 1/2K upper bits
allocated, and then it will allocate a cache for the next upper chunks and
the lower chunks (default 6 of each). Then when a bit is "set", these
chunks will be pulled from the free list and added to the array. If the
free list gets down to a lever (default 2), it will trigger an irqwork
that will refill the cache back up.
On clearing a bit, if the clear causes the bitmask to be zero, that chunk
will then be placed back into the free cache for later use, keeping the
need to allocate more down to a minimum.
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2021-09-23 22:20:57 -04:00
/*
* In order to keep track of what pids to trace , a tree is created much
* like page tables are used . This creates a sparse bit map , where
* the tree is filled in when needed . A PID is at most 30 bits ( see
* linux / thread . h ) , and is broken up into 3 sections based on the bit map
* of the bits . The 8 MSB is the " upper1 " section . The next 8 MSB is the
* " upper2 " section and the 14 LSB is the " lower " section .
*
* A trace_pid_list structure holds the " upper1 " section , in an
* array of 256 pointers ( 1 or 2 K in size ) to " upper_chunk " unions , where
* each has an array of 256 pointers ( 1 or 2 K in size ) to the " lower_chunk "
* structures , where each has an array of size 2 K bytes representing a bitmask
* of the 14 LSB of the PID ( 256 * 8 = 2048 )
*
* When a trace_pid_list is allocated , it includes the 256 pointer array
* of the upper1 unions . Then a " cache " of upper and lower is allocated
* where these will be assigned as needed .
*
* When a bit is set in the pid_list bitmask , the pid to use has
* the 8 MSB masked , and this is used to index the array in the
* pid_list to find the next upper union . If the element is NULL ,
* then one is retrieved from the upper_list cache . If none is
* available , then - ENOMEM is returned .
*
* The next 8 MSB is used to index into the " upper2 " section . If this
* element is NULL , then it is retrieved from the lower_list cache .
* Again , if one is not available - ENOMEM is returned .
*
* Finally the 14 LSB of the PID is used to set the bit in the 16384
* bitmask ( made up of 2 K bytes ) .
*
* When the second upper section or the lower section has their last
* bit cleared , they are added back to the free list to be reused
* when needed .
*/
# define UPPER_BITS 8
# define UPPER_MAX (1 << UPPER_BITS)
# define UPPER1_SIZE (1 << UPPER_BITS)
# define UPPER2_SIZE (1 << UPPER_BITS)
# define LOWER_BITS 14
# define LOWER_MAX (1 << LOWER_BITS)
# define LOWER_SIZE (LOWER_MAX / BITS_PER_LONG)
# define UPPER1_SHIFT (LOWER_BITS + UPPER_BITS)
# define UPPER2_SHIFT LOWER_BITS
# define LOWER_MASK (LOWER_MAX - 1)
# define UPPER_MASK (UPPER_MAX - 1)
/* According to linux/thread.h pids can not be bigger than or equal to 1 << 30 */
# define MAX_PID (1 << 30)
/* Just keep 6 chunks of both upper and lower in the cache on alloc */
# define CHUNK_ALLOC 6
/* Have 2 chunks free, trigger a refill of the cache */
# define CHUNK_REALLOC 2
union lower_chunk {
union lower_chunk * next ;
unsigned long data [ LOWER_SIZE ] ; // 2K in size
} ;
union upper_chunk {
union upper_chunk * next ;
union lower_chunk * data [ UPPER2_SIZE ] ; // 1 or 2K in size
} ;
2021-09-23 21:03:49 -04:00
struct trace_pid_list {
tracing: Create a sparse bitmask for pid filtering
When the trace_pid_list was created, the default pid max was 32768.
Creating a bitmask that can hold one bit for all 32768 took up 4096 (one
page). Having a one page bitmask was not much of a problem, and that was
used for mapping pids. But today, systems are bigger and can run more
tasks, and now the default pid_max is usually set to 4194304. Which means
to handle that many pids requires 524288 bytes. Worse yet, the pid_max can
be set to 2^30 (1073741824 or 1G) which would take 134217728 (128M) of
memory to store this array.
Since the pid_list array is very sparsely populated, it is a huge waste of
memory to store all possible bits for each pid when most will not be set.
Instead, use a page table scheme to store the array, and allow this to
handle up to 30 bit pids.
The pid_mask will start out with 256 entries for the first 8 MSB bits.
This will cost 1K for 32 bit architectures and 2K for 64 bit. Each of
these will have a 256 array to store the next 8 bits of the pid (another
1 or 2K). These will hold an 2K byte bitmask (which will cover the LSB
14 bits or 16384 pids).
When the trace_pid_list is allocated, it will have the 1/2K upper bits
allocated, and then it will allocate a cache for the next upper chunks and
the lower chunks (default 6 of each). Then when a bit is "set", these
chunks will be pulled from the free list and added to the array. If the
free list gets down to a lever (default 2), it will trigger an irqwork
that will refill the cache back up.
On clearing a bit, if the clear causes the bitmask to be zero, that chunk
will then be placed back into the free cache for later use, keeping the
need to allocate more down to a minimum.
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2021-09-23 22:20:57 -04:00
raw_spinlock_t lock ;
struct irq_work refill_irqwork ;
union upper_chunk * upper [ UPPER1_SIZE ] ; // 1 or 2K in size
union upper_chunk * upper_list ;
union lower_chunk * lower_list ;
int free_upper_chunks ;
int free_lower_chunks ;
2021-09-23 21:03:49 -04:00
} ;
# endif /* _TRACE_INTERNAL_PID_LIST_H */