2019-06-03 08:44:50 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2012-03-05 15:49:31 +04:00
/*
2019-04-15 12:49:34 +03:00
* VDSO implementations .
2012-03-05 15:49:31 +04:00
*
* Copyright ( C ) 2012 ARM Limited
*
* Author : Will Deacon < will . deacon @ arm . com >
*/
2016-08-15 09:45:46 +03:00
# include <linux/cache.h>
2012-03-05 15:49:31 +04:00
# include <linux/clocksource.h>
# include <linux/elf.h>
# include <linux/err.h>
# include <linux/errno.h>
# include <linux/gfp.h>
2016-08-15 09:45:46 +03:00
# include <linux/kernel.h>
2012-03-05 15:49:31 +04:00
# include <linux/mm.h>
# include <linux/sched.h>
# include <linux/signal.h>
# include <linux/slab.h>
2020-06-24 11:33:19 +03:00
# include <linux/time_namespace.h>
2012-10-16 14:44:53 +04:00
# include <linux/timekeeper_internal.h>
2012-03-05 15:49:31 +04:00
# include <linux/vmalloc.h>
2019-06-21 12:52:31 +03:00
# include <vdso/datapage.h>
# include <vdso/helpers.h>
# include <vdso/vsyscall.h>
2012-03-05 15:49:31 +04:00
# include <asm/cacheflush.h>
# include <asm/signal32.h>
# include <asm/vdso.h>
2017-06-06 07:52:30 +03:00
extern char vdso_start [ ] , vdso_end [ ] ;
2019-06-21 12:52:39 +03:00
extern char vdso32_start [ ] , vdso32_end [ ] ;
2019-06-21 12:52:38 +03:00
2020-04-28 19:49:20 +03:00
enum vdso_abi {
VDSO_ABI_AA64 ,
VDSO_ABI_AA32 ,
2019-06-21 12:52:38 +03:00
} ;
2020-06-24 11:33:18 +03:00
enum vvar_pages {
VVAR_DATA_PAGE_OFFSET ,
VVAR_TIMENS_PAGE_OFFSET ,
VVAR_NR_PAGES ,
} ;
2020-04-28 19:49:20 +03:00
struct vdso_abi_info {
2019-06-21 12:52:38 +03:00
const char * name ;
const char * vdso_code_start ;
const char * vdso_code_end ;
unsigned long vdso_pages ;
/* Data Mapping */
struct vm_special_mapping * dm ;
/* Code Mapping */
struct vm_special_mapping * cm ;
} ;
2020-04-28 19:49:20 +03:00
static struct vdso_abi_info vdso_info [ ] __ro_after_init = {
[ VDSO_ABI_AA64 ] = {
2019-06-21 12:52:38 +03:00
. name = " vdso " ,
. vdso_code_start = vdso_start ,
. vdso_code_end = vdso_end ,
} ,
2019-06-21 12:52:39 +03:00
# ifdef CONFIG_COMPAT_VDSO
2020-04-28 19:49:20 +03:00
[ VDSO_ABI_AA32 ] = {
2019-06-21 12:52:39 +03:00
. name = " vdso32 " ,
. vdso_code_start = vdso32_start ,
. vdso_code_end = vdso32_end ,
} ,
# endif /* CONFIG_COMPAT_VDSO */
2019-06-21 12:52:38 +03:00
} ;
2012-03-05 15:49:31 +04:00
/*
* The vDSO data page .
*/
static union {
2019-06-21 12:52:31 +03:00
struct vdso_data data [ CS_BASES ] ;
2012-03-05 15:49:31 +04:00
u8 page [ PAGE_SIZE ] ;
} vdso_data_store __page_aligned_data ;
2019-06-21 12:52:31 +03:00
struct vdso_data * vdso_data = vdso_data_store . data ;
2012-03-05 15:49:31 +04:00
2020-12-15 06:08:25 +03:00
static int vdso_mremap ( const struct vm_special_mapping * sm ,
struct vm_area_struct * new_vma )
2019-06-21 12:52:38 +03:00
{
current - > mm - > context . vdso = ( void * ) new_vma - > vm_start ;
return 0 ;
}
2021-03-30 08:54:49 +03:00
static int __init __vdso_init ( enum vdso_abi abi )
2019-06-21 12:52:38 +03:00
{
int i ;
struct page * * vdso_pagelist ;
unsigned long pfn ;
2020-04-28 19:49:20 +03:00
if ( memcmp ( vdso_info [ abi ] . vdso_code_start , " \177 ELF " , 4 ) ) {
2019-06-21 12:52:38 +03:00
pr_err ( " vDSO is not a valid ELF object! \n " ) ;
return - EINVAL ;
}
2020-04-28 19:49:20 +03:00
vdso_info [ abi ] . vdso_pages = (
vdso_info [ abi ] . vdso_code_end -
vdso_info [ abi ] . vdso_code_start ) > >
2019-06-21 12:52:38 +03:00
PAGE_SHIFT ;
2020-06-24 11:33:16 +03:00
vdso_pagelist = kcalloc ( vdso_info [ abi ] . vdso_pages ,
2019-06-21 12:52:38 +03:00
sizeof ( struct page * ) ,
GFP_KERNEL ) ;
if ( vdso_pagelist = = NULL )
return - ENOMEM ;
/* Grab the vDSO code pages. */
2020-04-28 19:49:20 +03:00
pfn = sym_to_pfn ( vdso_info [ abi ] . vdso_code_start ) ;
2019-06-21 12:52:38 +03:00
2020-04-28 19:49:20 +03:00
for ( i = 0 ; i < vdso_info [ abi ] . vdso_pages ; i + + )
2020-06-24 11:33:16 +03:00
vdso_pagelist [ i ] = pfn_to_page ( pfn + i ) ;
2019-06-21 12:52:38 +03:00
2020-06-24 11:33:16 +03:00
vdso_info [ abi ] . cm - > pages = vdso_pagelist ;
2019-06-21 12:52:38 +03:00
return 0 ;
}
2020-06-24 11:33:17 +03:00
# ifdef CONFIG_TIME_NS
2020-06-24 11:33:18 +03:00
struct vdso_data * arch_get_vdso_data ( void * vvar_page )
{
return ( struct vdso_data * ) ( vvar_page ) ;
}
2020-06-24 11:33:17 +03:00
/*
* The vvar mapping contains data for a specific time namespace , so when a task
* changes namespace we must unmap its vvar data for the old namespace .
* Subsequent faults will map in data for the new namespace .
*
* For more details see timens_setup_vdso_data ( ) .
*/
int vdso_join_timens ( struct task_struct * task , struct time_namespace * ns )
{
struct mm_struct * mm = task - > mm ;
struct vm_area_struct * vma ;
mmap_read_lock ( mm ) ;
for ( vma = mm - > mmap ; vma ; vma = vma - > vm_next ) {
unsigned long size = vma - > vm_end - vma - > vm_start ;
if ( vma_is_special_mapping ( vma , vdso_info [ VDSO_ABI_AA64 ] . dm ) )
zap_page_range ( vma , vma - > vm_start , size ) ;
# ifdef CONFIG_COMPAT_VDSO
if ( vma_is_special_mapping ( vma , vdso_info [ VDSO_ABI_AA32 ] . dm ) )
zap_page_range ( vma , vma - > vm_start , size ) ;
# endif
}
mmap_read_unlock ( mm ) ;
return 0 ;
}
2020-06-24 11:33:19 +03:00
static struct page * find_timens_vvar_page ( struct vm_area_struct * vma )
{
if ( likely ( vma - > vm_mm = = current - > mm ) )
return current - > nsproxy - > time_ns - > vvar_page ;
/*
* VM_PFNMAP | VM_IO protect . fault ( ) handler from being called
* through interfaces like / proc / $ pid / mem or
* process_vm_ { readv , writev } ( ) as long as there ' s no . access ( )
* in special_mapping_vmops .
* For more details check_vma_flags ( ) and __access_remote_vm ( )
*/
WARN ( 1 , " vvar_page accessed remotely " ) ;
return NULL ;
}
# else
static struct page * find_timens_vvar_page ( struct vm_area_struct * vma )
{
return NULL ;
}
2020-06-24 11:33:17 +03:00
# endif
2020-06-24 11:33:16 +03:00
static vm_fault_t vvar_fault ( const struct vm_special_mapping * sm ,
struct vm_area_struct * vma , struct vm_fault * vmf )
{
2020-06-24 11:33:19 +03:00
struct page * timens_page = find_timens_vvar_page ( vma ) ;
unsigned long pfn ;
switch ( vmf - > pgoff ) {
case VVAR_DATA_PAGE_OFFSET :
if ( timens_page )
pfn = page_to_pfn ( timens_page ) ;
else
pfn = sym_to_pfn ( vdso_data ) ;
break ;
# ifdef CONFIG_TIME_NS
case VVAR_TIMENS_PAGE_OFFSET :
/*
* If a task belongs to a time namespace then a namespace
* specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
* the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
* offset .
* See also the comment near timens_setup_vdso_data ( ) .
*/
if ( ! timens_page )
return VM_FAULT_SIGBUS ;
pfn = sym_to_pfn ( vdso_data ) ;
break ;
# endif /* CONFIG_TIME_NS */
default :
return VM_FAULT_SIGBUS ;
}
return vmf_insert_pfn ( vma , vmf - > address , pfn ) ;
2020-06-24 11:33:16 +03:00
}
2020-04-28 19:49:20 +03:00
static int __setup_additional_pages ( enum vdso_abi abi ,
2019-06-21 12:52:38 +03:00
struct mm_struct * mm ,
struct linux_binprm * bprm ,
int uses_interp )
{
unsigned long vdso_base , vdso_text_len , vdso_mapping_len ;
2020-05-06 22:51:38 +03:00
unsigned long gp_flags = 0 ;
2019-06-21 12:52:38 +03:00
void * ret ;
2020-06-24 11:33:18 +03:00
BUILD_BUG_ON ( VVAR_NR_PAGES ! = __VVAR_PAGES ) ;
2020-04-28 19:49:20 +03:00
vdso_text_len = vdso_info [ abi ] . vdso_pages < < PAGE_SHIFT ;
2019-06-21 12:52:38 +03:00
/* Be sure to map the data page */
2020-06-24 11:33:18 +03:00
vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE ;
2019-06-21 12:52:38 +03:00
vdso_base = get_unmapped_area ( NULL , 0 , vdso_mapping_len , 0 , 0 ) ;
if ( IS_ERR_VALUE ( vdso_base ) ) {
ret = ERR_PTR ( vdso_base ) ;
goto up_fail ;
}
2020-06-24 11:33:18 +03:00
ret = _install_special_mapping ( mm , vdso_base , VVAR_NR_PAGES * PAGE_SIZE ,
2020-06-24 11:33:16 +03:00
VM_READ | VM_MAYREAD | VM_PFNMAP ,
2020-04-28 19:49:20 +03:00
vdso_info [ abi ] . dm ) ;
2019-06-21 12:52:38 +03:00
if ( IS_ERR ( ret ) )
goto up_fail ;
2020-05-06 22:51:38 +03:00
if ( IS_ENABLED ( CONFIG_ARM64_BTI_KERNEL ) & & system_supports_bti ( ) )
gp_flags = VM_ARM64_BTI ;
2020-06-24 11:33:18 +03:00
vdso_base + = VVAR_NR_PAGES * PAGE_SIZE ;
2019-06-21 12:52:38 +03:00
mm - > context . vdso = ( void * ) vdso_base ;
ret = _install_special_mapping ( mm , vdso_base , vdso_text_len ,
2020-05-06 22:51:38 +03:00
VM_READ | VM_EXEC | gp_flags |
2019-06-21 12:52:38 +03:00
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC ,
2020-04-28 19:49:20 +03:00
vdso_info [ abi ] . cm ) ;
2019-06-21 12:52:38 +03:00
if ( IS_ERR ( ret ) )
goto up_fail ;
return 0 ;
up_fail :
mm - > context . vdso = NULL ;
return PTR_ERR ( ret ) ;
}
2012-03-05 15:49:31 +04:00
# ifdef CONFIG_COMPAT
/*
* Create and map the vectors page for AArch32 tasks .
*/
2020-04-28 19:49:21 +03:00
enum aarch32_map {
AA32_MAP_VECTORS , /* kuser helpers */
2020-06-22 15:28:34 +03:00
AA32_MAP_SIGPAGE ,
2020-04-28 19:49:21 +03:00
AA32_MAP_VVAR ,
AA32_MAP_VDSO ,
} ;
2020-04-28 19:49:18 +03:00
static struct page * aarch32_vectors_page __ro_after_init ;
static struct page * aarch32_sig_page __ro_after_init ;
2021-03-18 20:07:36 +03:00
static int aarch32_sigpage_mremap ( const struct vm_special_mapping * sm ,
struct vm_area_struct * new_vma )
{
current - > mm - > context . sigpage = ( void * ) new_vma - > vm_start ;
return 0 ;
}
2020-04-28 19:49:21 +03:00
static struct vm_special_mapping aarch32_vdso_maps [ ] = {
[ AA32_MAP_VECTORS ] = {
2019-04-15 12:49:34 +03:00
. name = " [vectors] " , /* ABI */
2020-04-28 19:49:18 +03:00
. pages = & aarch32_vectors_page ,
2019-04-15 12:49:34 +03:00
} ,
2020-06-22 15:28:34 +03:00
[ AA32_MAP_SIGPAGE ] = {
. name = " [sigpage] " , /* ABI */
. pages = & aarch32_sig_page ,
2021-03-18 20:07:36 +03:00
. mremap = aarch32_sigpage_mremap ,
2020-06-22 15:28:34 +03:00
} ,
2020-04-28 19:49:21 +03:00
[ AA32_MAP_VVAR ] = {
2019-06-21 12:52:39 +03:00
. name = " [vvar] " ,
2020-06-24 11:33:16 +03:00
. fault = vvar_fault ,
2019-06-21 12:52:39 +03:00
} ,
2020-04-28 19:49:21 +03:00
[ AA32_MAP_VDSO ] = {
2019-06-21 12:52:39 +03:00
. name = " [vdso] " ,
2020-12-15 06:08:25 +03:00
. mremap = vdso_mremap ,
2019-06-21 12:52:39 +03:00
} ,
2019-04-15 12:49:34 +03:00
} ;
2012-03-05 15:49:31 +04:00
2019-04-15 12:49:36 +03:00
static int aarch32_alloc_kuser_vdso_page ( void )
2012-03-05 15:49:31 +04:00
{
extern char __kuser_helper_start [ ] , __kuser_helper_end [ ] ;
int kuser_sz = __kuser_helper_end - __kuser_helper_start ;
2019-04-15 12:49:36 +03:00
unsigned long vdso_page ;
2012-03-05 15:49:31 +04:00
2019-04-15 12:49:37 +03:00
if ( ! IS_ENABLED ( CONFIG_KUSER_HELPERS ) )
return 0 ;
2021-03-18 20:07:34 +03:00
vdso_page = get_zeroed_page ( GFP_KERNEL ) ;
2019-04-15 12:49:36 +03:00
if ( ! vdso_page )
2019-04-15 12:49:34 +03:00
return - ENOMEM ;
2012-03-05 15:49:31 +04:00
2019-04-15 12:49:36 +03:00
memcpy ( ( void * ) ( vdso_page + 0x1000 - kuser_sz ) , __kuser_helper_start ,
2019-04-15 12:49:34 +03:00
kuser_sz ) ;
2020-04-28 19:49:18 +03:00
aarch32_vectors_page = virt_to_page ( vdso_page ) ;
2019-04-15 12:49:36 +03:00
return 0 ;
}
2021-03-18 20:07:38 +03:00
# define COMPAT_SIGPAGE_POISON_WORD 0xe7fddef1
2020-06-22 14:35:41 +03:00
static int aarch32_alloc_sigpage ( void )
2019-04-15 12:49:36 +03:00
{
extern char __aarch32_sigret_code_start [ ] , __aarch32_sigret_code_end [ ] ;
int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start ;
2021-03-18 20:07:38 +03:00
__le32 poison = cpu_to_le32 ( COMPAT_SIGPAGE_POISON_WORD ) ;
void * sigpage ;
2012-03-05 15:49:31 +04:00
2021-03-18 20:07:38 +03:00
sigpage = ( void * ) __get_free_page ( GFP_KERNEL ) ;
2019-04-15 12:49:36 +03:00
if ( ! sigpage )
return - ENOMEM ;
2019-04-15 12:49:34 +03:00
2021-03-18 20:07:38 +03:00
memset32 ( sigpage , ( __force u32 ) poison , PAGE_SIZE / sizeof ( poison ) ) ;
memcpy ( sigpage , __aarch32_sigret_code_start , sigret_sz ) ;
2020-04-28 19:49:18 +03:00
aarch32_sig_page = virt_to_page ( sigpage ) ;
2020-06-22 14:35:41 +03:00
return 0 ;
}
2012-03-05 15:49:31 +04:00
2021-03-30 08:54:49 +03:00
static int __init __aarch32_alloc_vdso_pages ( void )
2020-06-22 14:35:41 +03:00
{
2020-06-22 15:28:34 +03:00
if ( ! IS_ENABLED ( CONFIG_COMPAT_VDSO ) )
return 0 ;
2020-06-22 14:35:41 +03:00
vdso_info [ VDSO_ABI_AA32 ] . dm = & aarch32_vdso_maps [ AA32_MAP_VVAR ] ;
vdso_info [ VDSO_ABI_AA32 ] . cm = & aarch32_vdso_maps [ AA32_MAP_VDSO ] ;
2012-03-05 15:49:31 +04:00
2020-06-22 14:35:41 +03:00
return __vdso_init ( VDSO_ABI_AA32 ) ;
2012-03-05 15:49:31 +04:00
}
2019-06-21 12:52:39 +03:00
static int __init aarch32_alloc_vdso_pages ( void )
{
2020-06-22 14:35:41 +03:00
int ret ;
ret = __aarch32_alloc_vdso_pages ( ) ;
if ( ret )
return ret ;
ret = aarch32_alloc_sigpage ( ) ;
if ( ret )
return ret ;
return aarch32_alloc_kuser_vdso_page ( ) ;
2019-06-21 12:52:39 +03:00
}
2019-04-15 12:49:34 +03:00
arch_initcall ( aarch32_alloc_vdso_pages ) ;
2012-03-05 15:49:31 +04:00
2019-04-15 12:49:34 +03:00
static int aarch32_kuser_helpers_setup ( struct mm_struct * mm )
2012-03-05 15:49:31 +04:00
{
2019-04-15 12:49:34 +03:00
void * ret ;
2014-07-09 22:22:12 +04:00
2019-04-15 12:49:37 +03:00
if ( ! IS_ENABLED ( CONFIG_KUSER_HELPERS ) )
return 0 ;
2019-04-15 12:49:34 +03:00
/*
* Avoid VM_MAYWRITE for compatibility with arch / arm / , where it ' s
* not safe to CoW the page containing the CPU exception vectors .
*/
ret = _install_special_mapping ( mm , AARCH32_VECTORS_BASE , PAGE_SIZE ,
VM_READ | VM_EXEC |
VM_MAYREAD | VM_MAYEXEC ,
2020-04-28 19:49:21 +03:00
& aarch32_vdso_maps [ AA32_MAP_VECTORS ] ) ;
2019-04-15 12:49:34 +03:00
return PTR_ERR_OR_ZERO ( ret ) ;
}
static int aarch32_sigreturn_setup ( struct mm_struct * mm )
{
unsigned long addr ;
2014-07-09 22:22:12 +04:00
void * ret ;
2012-03-05 15:49:31 +04:00
2019-04-15 12:49:34 +03:00
addr = get_unmapped_area ( NULL , 0 , PAGE_SIZE , 0 , 0 ) ;
if ( IS_ERR_VALUE ( addr ) ) {
ret = ERR_PTR ( addr ) ;
goto out ;
}
2012-03-05 15:49:31 +04:00
2019-04-15 12:49:34 +03:00
/*
* VM_MAYWRITE is required to allow gdb to Copy - on - Write and
* set breakpoints .
*/
2014-07-09 22:22:12 +04:00
ret = _install_special_mapping ( mm , addr , PAGE_SIZE ,
2019-04-15 12:49:34 +03:00
VM_READ | VM_EXEC | VM_MAYREAD |
VM_MAYWRITE | VM_MAYEXEC ,
2020-04-28 19:49:21 +03:00
& aarch32_vdso_maps [ AA32_MAP_SIGPAGE ] ) ;
2019-04-15 12:49:34 +03:00
if ( IS_ERR ( ret ) )
goto out ;
2012-03-05 15:49:31 +04:00
2020-06-22 14:35:41 +03:00
mm - > context . sigpage = ( void * ) addr ;
2012-03-05 15:49:31 +04:00
2019-04-15 12:49:34 +03:00
out :
2014-07-09 22:22:12 +04:00
return PTR_ERR_OR_ZERO ( ret ) ;
2012-03-05 15:49:31 +04:00
}
2019-04-15 12:49:34 +03:00
int aarch32_setup_additional_pages ( struct linux_binprm * bprm , int uses_interp )
{
struct mm_struct * mm = current - > mm ;
int ret ;
2020-06-09 07:33:25 +03:00
if ( mmap_write_lock_killable ( mm ) )
2019-04-15 12:49:34 +03:00
return - EINTR ;
ret = aarch32_kuser_helpers_setup ( mm ) ;
if ( ret )
goto out ;
2020-06-22 15:28:34 +03:00
if ( IS_ENABLED ( CONFIG_COMPAT_VDSO ) ) {
2020-06-22 15:37:09 +03:00
ret = __setup_additional_pages ( VDSO_ABI_AA32 , mm , bprm ,
2020-06-22 15:28:34 +03:00
uses_interp ) ;
if ( ret )
goto out ;
}
2019-04-15 12:49:34 +03:00
2020-06-22 14:35:41 +03:00
ret = aarch32_sigreturn_setup ( mm ) ;
2019-04-15 12:49:34 +03:00
out :
2020-06-09 07:33:25 +03:00
mmap_write_unlock ( mm ) ;
2019-04-15 12:49:34 +03:00
return ret ;
}
2012-03-05 15:49:31 +04:00
# endif /* CONFIG_COMPAT */
2020-04-28 19:49:21 +03:00
enum aarch64_map {
AA64_MAP_VVAR ,
AA64_MAP_VDSO ,
} ;
static struct vm_special_mapping aarch64_vdso_maps [ ] __ro_after_init = {
[ AA64_MAP_VVAR ] = {
2016-08-15 09:45:46 +03:00
. name = " [vvar] " ,
2020-06-24 11:33:16 +03:00
. fault = vvar_fault ,
2016-08-15 09:45:46 +03:00
} ,
2020-04-28 19:49:21 +03:00
[ AA64_MAP_VDSO ] = {
2016-08-15 09:45:46 +03:00
. name = " [vdso] " ,
2017-07-26 20:07:37 +03:00
. mremap = vdso_mremap ,
2016-08-15 09:45:46 +03:00
} ,
} ;
2014-07-09 22:22:12 +04:00
2012-03-05 15:49:31 +04:00
static int __init vdso_init ( void )
{
2020-04-28 19:49:21 +03:00
vdso_info [ VDSO_ABI_AA64 ] . dm = & aarch64_vdso_maps [ AA64_MAP_VVAR ] ;
vdso_info [ VDSO_ABI_AA64 ] . cm = & aarch64_vdso_maps [ AA64_MAP_VDSO ] ;
2014-07-09 22:22:13 +04:00
2020-04-28 19:49:20 +03:00
return __vdso_init ( VDSO_ABI_AA64 ) ;
2012-03-05 15:49:31 +04:00
}
arch_initcall ( vdso_init ) ;
2020-06-22 15:37:09 +03:00
int arch_setup_additional_pages ( struct linux_binprm * bprm , int uses_interp )
2012-03-05 15:49:31 +04:00
{
struct mm_struct * mm = current - > mm ;
2019-06-21 12:52:38 +03:00
int ret ;
2012-03-05 15:49:31 +04:00
2020-06-09 07:33:25 +03:00
if ( mmap_write_lock_killable ( mm ) )
2016-05-24 02:25:54 +03:00
return - EINTR ;
2012-03-05 15:49:31 +04:00
2020-06-22 15:37:09 +03:00
ret = __setup_additional_pages ( VDSO_ABI_AA64 , mm , bprm , uses_interp ) ;
2020-06-09 07:33:25 +03:00
mmap_write_unlock ( mm ) ;
2012-03-05 15:49:31 +04:00
2019-06-21 12:52:38 +03:00
return ret ;
2012-03-05 15:49:31 +04:00
}