2022-07-29 13:07:55 -07:00
// SPDX-License-Identifier: GPL-2.0
2023-09-06 10:49:00 -07:00
# include "util/cgroup.h"
2022-07-29 13:07:55 -07:00
# include "util/debug.h"
2022-07-29 13:07:56 -07:00
# include "util/evlist.h"
2022-07-29 13:07:55 -07:00
# include "util/machine.h"
# include "util/map.h"
# include "util/symbol.h"
2022-07-29 13:07:56 -07:00
# include "util/target.h"
2022-12-09 11:07:25 -08:00
# include "util/thread.h"
2022-07-29 13:07:56 -07:00
# include "util/thread_map.h"
2022-07-29 13:07:55 -07:00
# include "util/lock-contention.h"
# include <linux/zalloc.h>
2022-09-11 22:53:12 -07:00
# include <linux/string.h>
2022-07-29 13:07:55 -07:00
# include <bpf/bpf.h>
2023-11-18 02:48:57 +00:00
# include <inttypes.h>
2022-07-29 13:07:55 -07:00
# include "bpf_skel/lock_contention.skel.h"
2022-12-09 11:07:24 -08:00
# include "bpf_skel/lock_data.h"
2022-07-29 13:07:55 -07:00
static struct lock_contention_bpf * skel ;
2022-08-02 12:10:02 -07:00
int lock_contention_prepare ( struct lock_contention * con )
2022-07-29 13:07:55 -07:00
{
2022-07-29 13:07:56 -07:00
int i , fd ;
2023-09-06 10:49:02 -07:00
int ncpus = 1 , ntasks = 1 , ntypes = 1 , naddrs = 1 , ncgrps = 1 ;
2022-08-02 12:10:02 -07:00
struct evlist * evlist = con - > evlist ;
struct target * target = con - > target ;
2022-07-29 13:07:56 -07:00
2022-07-29 13:07:55 -07:00
skel = lock_contention_bpf__open ( ) ;
if ( ! skel ) {
pr_err ( " Failed to open lock-contention BPF skeleton \n " ) ;
return - 1 ;
}
2022-09-11 22:53:13 -07:00
bpf_map__set_value_size ( skel - > maps . stacks , con - > max_stack * sizeof ( u64 ) ) ;
2022-08-02 12:10:03 -07:00
bpf_map__set_max_entries ( skel - > maps . lock_stat , con - > map_nr_entries ) ;
2022-11-18 11:01:09 -08:00
bpf_map__set_max_entries ( skel - > maps . tstamp , con - > map_nr_entries ) ;
2022-08-02 12:10:03 -07:00
2023-02-02 18:13:24 -08:00
if ( con - > aggr_mode = = LOCK_AGGR_TASK )
2022-12-09 11:07:25 -08:00
bpf_map__set_max_entries ( skel - > maps . task_data , con - > map_nr_entries ) ;
2023-02-02 18:13:24 -08:00
else
2022-12-09 11:07:25 -08:00
bpf_map__set_max_entries ( skel - > maps . task_data , 1 ) ;
2023-02-02 18:13:24 -08:00
if ( con - > save_callstack )
2022-12-09 11:07:25 -08:00
bpf_map__set_max_entries ( skel - > maps . stacks , con - > map_nr_entries ) ;
2023-02-02 18:13:24 -08:00
else
bpf_map__set_max_entries ( skel - > maps . stacks , 1 ) ;
2022-12-09 11:07:25 -08:00
2022-07-29 13:07:56 -07:00
if ( target__has_cpu ( target ) )
ncpus = perf_cpu_map__nr ( evlist - > core . user_requested_cpus ) ;
if ( target__has_task ( target ) )
ntasks = perf_thread_map__nr ( evlist - > core . threads ) ;
2022-12-19 12:17:29 -08:00
if ( con - > filters - > nr_types )
ntypes = con - > filters - > nr_types ;
2023-09-06 10:49:02 -07:00
if ( con - > filters - > nr_cgrps )
ncgrps = con - > filters - > nr_cgrps ;
2022-07-29 13:07:56 -07:00
2022-12-19 12:17:31 -08:00
/* resolve lock name filters to addr */
if ( con - > filters - > nr_syms ) {
struct symbol * sym ;
struct map * kmap ;
unsigned long * addrs ;
for ( i = 0 ; i < con - > filters - > nr_syms ; i + + ) {
sym = machine__find_kernel_symbol_by_name ( con - > machine ,
con - > filters - > syms [ i ] ,
& kmap ) ;
if ( sym = = NULL ) {
pr_warning ( " ignore unknown symbol: %s \n " ,
con - > filters - > syms [ i ] ) ;
continue ;
}
addrs = realloc ( con - > filters - > addrs ,
( con - > filters - > nr_addrs + 1 ) * sizeof ( * addrs ) ) ;
if ( addrs = = NULL ) {
pr_warning ( " memory allocation failure \n " ) ;
continue ;
}
2023-04-04 13:59:44 -07:00
addrs [ con - > filters - > nr_addrs + + ] = map__unmap_ip ( kmap , sym - > start ) ;
2022-12-19 12:17:31 -08:00
con - > filters - > addrs = addrs ;
}
naddrs = con - > filters - > nr_addrs ;
}
2022-07-29 13:07:56 -07:00
bpf_map__set_max_entries ( skel - > maps . cpu_filter , ncpus ) ;
bpf_map__set_max_entries ( skel - > maps . task_filter , ntasks ) ;
2022-12-19 12:17:29 -08:00
bpf_map__set_max_entries ( skel - > maps . type_filter , ntypes ) ;
2022-12-19 12:17:31 -08:00
bpf_map__set_max_entries ( skel - > maps . addr_filter , naddrs ) ;
2023-09-06 10:49:02 -07:00
bpf_map__set_max_entries ( skel - > maps . cgroup_filter , ncgrps ) ;
2022-07-29 13:07:56 -07:00
2022-07-29 13:07:55 -07:00
if ( lock_contention_bpf__load ( skel ) < 0 ) {
pr_err ( " Failed to load lock-contention BPF skeleton \n " ) ;
return - 1 ;
}
2022-07-29 13:07:56 -07:00
if ( target__has_cpu ( target ) ) {
u32 cpu ;
u8 val = 1 ;
skel - > bss - > has_cpu = 1 ;
fd = bpf_map__fd ( skel - > maps . cpu_filter ) ;
for ( i = 0 ; i < ncpus ; i + + ) {
cpu = perf_cpu_map__cpu ( evlist - > core . user_requested_cpus , i ) . cpu ;
bpf_map_update_elem ( fd , & cpu , & val , BPF_ANY ) ;
}
}
if ( target__has_task ( target ) ) {
u32 pid ;
u8 val = 1 ;
skel - > bss - > has_task = 1 ;
fd = bpf_map__fd ( skel - > maps . task_filter ) ;
for ( i = 0 ; i < ntasks ; i + + ) {
pid = perf_thread_map__pid ( evlist - > core . threads , i ) ;
bpf_map_update_elem ( fd , & pid , & val , BPF_ANY ) ;
}
}
if ( target__none ( target ) & & evlist - > workload . pid > 0 ) {
u32 pid = evlist - > workload . pid ;
u8 val = 1 ;
skel - > bss - > has_task = 1 ;
fd = bpf_map__fd ( skel - > maps . task_filter ) ;
bpf_map_update_elem ( fd , & pid , & val , BPF_ANY ) ;
}
2022-12-19 12:17:29 -08:00
if ( con - > filters - > nr_types ) {
u8 val = 1 ;
skel - > bss - > has_type = 1 ;
fd = bpf_map__fd ( skel - > maps . type_filter ) ;
for ( i = 0 ; i < con - > filters - > nr_types ; i + + )
bpf_map_update_elem ( fd , & con - > filters - > types [ i ] , & val , BPF_ANY ) ;
}
2022-12-19 12:17:31 -08:00
if ( con - > filters - > nr_addrs ) {
u8 val = 1 ;
skel - > bss - > has_addr = 1 ;
fd = bpf_map__fd ( skel - > maps . addr_filter ) ;
for ( i = 0 ; i < con - > filters - > nr_addrs ; i + + )
bpf_map_update_elem ( fd , & con - > filters - > addrs [ i ] , & val , BPF_ANY ) ;
}
2023-09-06 10:49:02 -07:00
if ( con - > filters - > nr_cgrps ) {
u8 val = 1 ;
skel - > bss - > has_cgroup = 1 ;
fd = bpf_map__fd ( skel - > maps . cgroup_filter ) ;
for ( i = 0 ; i < con - > filters - > nr_cgrps ; i + + )
bpf_map_update_elem ( fd , & con - > filters - > cgrps [ i ] , & val , BPF_ANY ) ;
}
2022-12-09 11:07:25 -08:00
/* these don't work well if in the rodata section */
2022-09-11 22:53:14 -07:00
skel - > bss - > stack_skip = con - > stack_skip ;
2022-12-09 11:07:25 -08:00
skel - > bss - > aggr_mode = con - > aggr_mode ;
2023-02-02 18:13:24 -08:00
skel - > bss - > needs_callstack = con - > save_callstack ;
2023-02-06 16:24:02 -08:00
skel - > bss - > lock_owner = con - > owner ;
2022-09-11 22:53:14 -07:00
2023-09-06 10:49:01 -07:00
if ( con - > aggr_mode = = LOCK_AGGR_CGROUP ) {
if ( cgroup_is_v2 ( " perf_event " ) )
skel - > bss - > use_cgroup_v2 = 1 ;
2023-09-06 10:49:00 -07:00
read_all_cgroups ( & con - > cgroups ) ;
}
2023-03-13 13:48:24 -07:00
bpf_program__set_autoload ( skel - > progs . collect_lock_syms , false ) ;
2022-07-29 13:07:55 -07:00
lock_contention_bpf__attach ( skel ) ;
return 0 ;
}
2024-02-27 21:33:35 -08:00
/*
* Run the BPF program directly using BPF_PROG_TEST_RUN to update the end
* timestamp in ktime so that it can calculate delta easily .
*/
static void mark_end_timestamp ( void )
{
DECLARE_LIBBPF_OPTS ( bpf_test_run_opts , opts ,
. flags = BPF_F_TEST_RUN_ON_CPU ,
) ;
int prog_fd = bpf_program__fd ( skel - > progs . end_timestamp ) ;
bpf_prog_test_run_opts ( prog_fd , & opts ) ;
}
static void update_lock_stat ( int map_fd , int pid , u64 end_ts ,
enum lock_aggr_mode aggr_mode ,
struct tstamp_data * ts_data )
{
u64 delta ;
struct contention_key stat_key = { } ;
struct contention_data stat_data ;
if ( ts_data - > timestamp > = end_ts )
return ;
delta = end_ts - ts_data - > timestamp ;
switch ( aggr_mode ) {
case LOCK_AGGR_CALLER :
stat_key . stack_id = ts_data - > stack_id ;
break ;
case LOCK_AGGR_TASK :
stat_key . pid = pid ;
break ;
case LOCK_AGGR_ADDR :
stat_key . lock_addr_or_cgroup = ts_data - > lock ;
break ;
case LOCK_AGGR_CGROUP :
/* TODO */
return ;
default :
return ;
}
if ( bpf_map_lookup_elem ( map_fd , & stat_key , & stat_data ) < 0 )
return ;
stat_data . total_time + = delta ;
stat_data . count + + ;
if ( delta > stat_data . max_time )
stat_data . max_time = delta ;
if ( delta < stat_data . min_time )
stat_data . min_time = delta ;
bpf_map_update_elem ( map_fd , & stat_key , & stat_data , BPF_EXIST ) ;
}
/*
* Account entries in the tstamp map ( which didn ' t see the corresponding
* lock : contention_end tracepoint ) using end_ts .
*/
static void account_end_timestamp ( struct lock_contention * con )
{
int ts_fd , stat_fd ;
int * prev_key , key ;
u64 end_ts = skel - > bss - > end_ts ;
int total_cpus ;
enum lock_aggr_mode aggr_mode = con - > aggr_mode ;
struct tstamp_data ts_data , * cpu_data ;
/* Iterate per-task tstamp map (key = TID) */
ts_fd = bpf_map__fd ( skel - > maps . tstamp ) ;
stat_fd = bpf_map__fd ( skel - > maps . lock_stat ) ;
prev_key = NULL ;
while ( ! bpf_map_get_next_key ( ts_fd , prev_key , & key ) ) {
if ( bpf_map_lookup_elem ( ts_fd , & key , & ts_data ) = = 0 ) {
int pid = key ;
if ( aggr_mode = = LOCK_AGGR_TASK & & con - > owner )
pid = ts_data . flags ;
update_lock_stat ( stat_fd , pid , end_ts , aggr_mode ,
& ts_data ) ;
}
prev_key = & key ;
}
/* Now it'll check per-cpu tstamp map which doesn't have TID. */
if ( aggr_mode = = LOCK_AGGR_TASK | | aggr_mode = = LOCK_AGGR_CGROUP )
return ;
total_cpus = cpu__max_cpu ( ) . cpu ;
ts_fd = bpf_map__fd ( skel - > maps . tstamp_cpu ) ;
cpu_data = calloc ( total_cpus , sizeof ( * cpu_data ) ) ;
if ( cpu_data = = NULL )
return ;
prev_key = NULL ;
while ( ! bpf_map_get_next_key ( ts_fd , prev_key , & key ) ) {
if ( bpf_map_lookup_elem ( ts_fd , & key , cpu_data ) < 0 )
goto next ;
for ( int i = 0 ; i < total_cpus ; i + + ) {
update_lock_stat ( stat_fd , - 1 , end_ts , aggr_mode ,
& cpu_data [ i ] ) ;
}
next :
prev_key = & key ;
}
free ( cpu_data ) ;
}
2022-07-29 13:07:55 -07:00
int lock_contention_start ( void )
{
skel - > bss - > enabled = 1 ;
return 0 ;
}
int lock_contention_stop ( void )
{
skel - > bss - > enabled = 0 ;
2024-02-27 21:33:35 -08:00
mark_end_timestamp ( ) ;
2022-07-29 13:07:55 -07:00
return 0 ;
}
2023-02-02 18:13:22 -08:00
static const char * lock_contention_get_name ( struct lock_contention * con ,
struct contention_key * key ,
2023-03-13 13:48:23 -07:00
u64 * stack_trace , u32 flags )
2023-02-02 18:13:22 -08:00
{
int idx = 0 ;
u64 addr ;
const char * name = " " ;
static char name_buf [ KSYM_NAME_LEN ] ;
struct symbol * sym ;
struct map * kmap ;
struct machine * machine = con - > machine ;
if ( con - > aggr_mode = = LOCK_AGGR_TASK ) {
struct contention_task_data task ;
2023-02-02 18:13:24 -08:00
int pid = key - > pid ;
2023-02-02 18:13:22 -08:00
int task_fd = bpf_map__fd ( skel - > maps . task_data ) ;
/* do not update idle comm which contains CPU number */
if ( pid ) {
2024-02-29 21:36:42 -08:00
struct thread * t = machine__findnew_thread ( machine , /*pid=*/ - 1 , pid ) ;
2023-02-02 18:13:22 -08:00
if ( t = = NULL )
return name ;
if ( ! bpf_map_lookup_elem ( task_fd , & pid , & task ) & &
thread__set_comm ( t , task . comm , /*timestamp=*/ 0 ) )
name = task . comm ;
}
return name ;
}
if ( con - > aggr_mode = = LOCK_AGGR_ADDR ) {
2023-03-13 13:48:24 -07:00
int lock_fd = bpf_map__fd ( skel - > maps . lock_syms ) ;
/* per-process locks set upper bits of the flags */
2023-03-13 13:48:23 -07:00
if ( flags & LCD_F_MMAP_LOCK )
return " mmap_lock " ;
if ( flags & LCD_F_SIGHAND_LOCK )
return " siglock " ;
2023-03-13 13:48:24 -07:00
/* global locks with symbols */
2023-09-06 10:49:01 -07:00
sym = machine__find_kernel_symbol ( machine , key - > lock_addr_or_cgroup , & kmap ) ;
2023-02-02 18:13:22 -08:00
if ( sym )
2023-03-13 13:48:24 -07:00
return sym - > name ;
/* try semi-global locks collected separately */
2023-09-06 10:49:01 -07:00
if ( ! bpf_map_lookup_elem ( lock_fd , & key - > lock_addr_or_cgroup , & flags ) ) {
2023-03-13 13:48:24 -07:00
if ( flags = = LOCK_CLASS_RQLOCK )
return " rq_lock " ;
}
return " " ;
2023-02-02 18:13:22 -08:00
}
2023-09-06 10:49:01 -07:00
if ( con - > aggr_mode = = LOCK_AGGR_CGROUP ) {
u64 cgrp_id = key - > lock_addr_or_cgroup ;
2023-09-06 10:49:00 -07:00
struct cgroup * cgrp = __cgroup__find ( & con - > cgroups , cgrp_id ) ;
if ( cgrp )
return cgrp - > name ;
2023-11-18 02:48:57 +00:00
snprintf ( name_buf , sizeof ( name_buf ) , " cgroup:% " PRIu64 " " , cgrp_id ) ;
2023-09-06 10:49:00 -07:00
return name_buf ;
}
2023-02-02 18:13:22 -08:00
/* LOCK_AGGR_CALLER: skip lock internal functions */
while ( machine__is_lock_function ( machine , stack_trace [ idx ] ) & &
idx < con - > max_stack - 1 )
idx + + ;
addr = stack_trace [ idx ] ;
sym = machine__find_kernel_symbol ( machine , addr , & kmap ) ;
if ( sym ) {
unsigned long offset ;
2023-04-04 13:59:44 -07:00
offset = map__map_ip ( kmap , addr ) - sym - > start ;
2023-02-02 18:13:22 -08:00
if ( offset = = 0 )
return sym - > name ;
snprintf ( name_buf , sizeof ( name_buf ) , " %s+%#lx " , sym - > name , offset ) ;
} else {
snprintf ( name_buf , sizeof ( name_buf ) , " %#lx " , ( unsigned long ) addr ) ;
}
return name_buf ;
}
2022-08-02 12:10:02 -07:00
int lock_contention_read ( struct lock_contention * con )
2022-07-29 13:07:55 -07:00
{
2023-02-02 18:13:22 -08:00
int fd , stack , err = 0 ;
2023-03-23 17:19:22 -07:00
struct contention_key * prev_key , key = { } ;
2022-12-09 11:07:24 -08:00
struct contention_data data = { } ;
2022-10-28 11:01:27 -07:00
struct lock_stat * st = NULL ;
2022-08-02 12:10:02 -07:00
struct machine * machine = con - > machine ;
2022-10-28 11:01:27 -07:00
u64 * stack_trace ;
size_t stack_size = con - > max_stack * sizeof ( * stack_trace ) ;
2022-07-29 13:07:55 -07:00
fd = bpf_map__fd ( skel - > maps . lock_stat ) ;
stack = bpf_map__fd ( skel - > maps . stacks ) ;
2023-03-27 15:57:11 -07:00
con - > fails . task = skel - > bss - > task_fail ;
con - > fails . stack = skel - > bss - > stack_fail ;
con - > fails . time = skel - > bss - > time_fail ;
2023-04-06 14:06:08 -07:00
con - > fails . data = skel - > bss - > data_fail ;
2022-08-02 12:10:04 -07:00
2022-10-28 11:01:27 -07:00
stack_trace = zalloc ( stack_size ) ;
if ( stack_trace = = NULL )
return - 1 ;
2024-02-27 21:33:35 -08:00
account_end_timestamp ( con ) ;
2022-12-09 11:07:25 -08:00
if ( con - > aggr_mode = = LOCK_AGGR_TASK ) {
2024-02-29 21:36:42 -08:00
struct thread * idle = machine__findnew_thread ( machine ,
2022-12-09 11:07:25 -08:00
/*pid=*/ 0 ,
/*tid=*/ 0 ) ;
thread__set_comm ( idle , " swapper " , /*timestamp=*/ 0 ) ;
}
2023-03-13 13:48:24 -07:00
if ( con - > aggr_mode = = LOCK_AGGR_ADDR ) {
DECLARE_LIBBPF_OPTS ( bpf_test_run_opts , opts ,
. flags = BPF_F_TEST_RUN_ON_CPU ,
) ;
int prog_fd = bpf_program__fd ( skel - > progs . collect_lock_syms ) ;
bpf_prog_test_run_opts ( prog_fd , & opts ) ;
}
2022-12-09 11:07:26 -08:00
/* make sure it loads the kernel map */
2023-12-06 17:16:53 -08:00
maps__load_first ( machine - > kmaps ) ;
2022-12-09 11:07:26 -08:00
2022-12-09 11:07:24 -08:00
prev_key = NULL ;
while ( ! bpf_map_get_next_key ( fd , prev_key , & key ) ) {
2023-02-02 18:13:24 -08:00
s64 ls_key ;
2023-02-02 18:13:23 -08:00
const char * name ;
2022-07-29 13:07:55 -07:00
2022-10-28 11:01:27 -07:00
/* to handle errors in the loop body */
err = - 1 ;
2022-07-29 13:07:55 -07:00
bpf_map_lookup_elem ( fd , & key , & data ) ;
2023-02-02 18:13:23 -08:00
if ( con - > save_callstack ) {
2023-02-02 18:13:24 -08:00
bpf_map_lookup_elem ( stack , & key . stack_id , stack_trace ) ;
2023-04-06 14:06:09 -07:00
if ( ! match_callstack_filter ( machine , stack_trace ) ) {
con - > nr_filtered + = data . count ;
2023-02-02 18:13:24 -08:00
goto next ;
2023-04-06 14:06:09 -07:00
}
2023-02-02 18:13:23 -08:00
}
2023-02-02 18:13:24 -08:00
switch ( con - > aggr_mode ) {
case LOCK_AGGR_CALLER :
ls_key = key . stack_id ;
break ;
case LOCK_AGGR_TASK :
ls_key = key . pid ;
break ;
case LOCK_AGGR_ADDR :
2023-09-06 10:49:01 -07:00
case LOCK_AGGR_CGROUP :
ls_key = key . lock_addr_or_cgroup ;
2023-02-02 18:13:24 -08:00
break ;
default :
goto next ;
}
st = lock_stat_find ( ls_key ) ;
2023-02-02 18:13:23 -08:00
if ( st ! = NULL ) {
st - > wait_time_total + = data . total_time ;
if ( st - > wait_time_max < data . max_time )
st - > wait_time_max = data . max_time ;
if ( st - > wait_time_min > data . min_time )
st - > wait_time_min = data . min_time ;
st - > nr_contended + = data . count ;
if ( st - > nr_contended )
st - > avg_wait_time = st - > wait_time_total / st - > nr_contended ;
goto next ;
}
2023-03-13 13:48:23 -07:00
name = lock_contention_get_name ( con , & key , stack_trace , data . flags ) ;
2023-02-02 18:13:24 -08:00
st = lock_stat_findnew ( ls_key , name , data . flags ) ;
2022-07-29 13:07:55 -07:00
if ( st = = NULL )
2022-10-28 11:01:27 -07:00
break ;
2022-07-29 13:07:55 -07:00
st - > nr_contended = data . count ;
st - > wait_time_total = data . total_time ;
st - > wait_time_max = data . max_time ;
st - > wait_time_min = data . min_time ;
if ( data . count )
st - > avg_wait_time = data . total_time / data . count ;
2023-04-06 14:06:10 -07:00
if ( con - > aggr_mode = = LOCK_AGGR_CALLER & & verbose > 0 ) {
2022-10-28 11:01:27 -07:00
st - > callstack = memdup ( stack_trace , stack_size ) ;
if ( st - > callstack = = NULL )
break ;
2022-09-11 22:53:12 -07:00
}
2023-02-02 18:13:22 -08:00
2023-02-02 18:13:23 -08:00
next :
2022-12-09 11:07:24 -08:00
prev_key = & key ;
2022-10-28 11:01:27 -07:00
2023-02-02 18:13:23 -08:00
/* we're fine now, reset the error */
2022-10-28 11:01:27 -07:00
err = 0 ;
2022-07-29 13:07:55 -07:00
}
2022-10-28 11:01:27 -07:00
free ( stack_trace ) ;
return err ;
2022-07-29 13:07:55 -07:00
}
2023-09-06 10:49:00 -07:00
int lock_contention_finish ( struct lock_contention * con )
2022-07-29 13:07:55 -07:00
{
if ( skel ) {
skel - > bss - > enabled = 0 ;
lock_contention_bpf__destroy ( skel ) ;
}
2023-09-06 10:49:00 -07:00
while ( ! RB_EMPTY_ROOT ( & con - > cgroups ) ) {
struct rb_node * node = rb_first ( & con - > cgroups ) ;
struct cgroup * cgrp = rb_entry ( node , struct cgroup , node ) ;
rb_erase ( node , & con - > cgroups ) ;
cgroup__put ( cgrp ) ;
}
2022-07-29 13:07:55 -07:00
return 0 ;
}