2018-11-14 16:01:21 -02:00
// SPDX-License-Identifier: GPL-2.0
/*
* DFS referral cache routines
*
2019-03-19 16:54:29 -03:00
* Copyright ( c ) 2018 - 2019 Paulo Alcantara < palcantara @ suse . de >
2018-11-14 16:01:21 -02:00
*/
# include <linux/jhash.h>
# include <linux/ktime.h>
# include <linux/slab.h>
2020-02-03 17:37:17 -08:00
# include <linux/proc_fs.h>
2018-11-14 16:01:21 -02:00
# include <linux/nls.h>
# include <linux/workqueue.h>
2021-06-04 19:25:29 -03:00
# include <linux/uuid.h>
2018-11-14 16:01:21 -02:00
# include "cifsglob.h"
# include "smb2pdu.h"
# include "smb2proto.h"
# include "cifsproto.h"
# include "cifs_debug.h"
# include "cifs_unicode.h"
# include "smb2glob.h"
# include "dfs_cache.h"
2019-12-04 17:37:58 -03:00
# define CACHE_HTABLE_SIZE 32
# define CACHE_MAX_ENTRIES 64
2021-06-04 19:25:32 -03:00
# define CACHE_MIN_TTL 120 /* 2 minutes */
2018-11-14 16:01:21 -02:00
# define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
DFSREF_STORAGE_SERVER ) )
2019-12-04 17:37:58 -03:00
struct cache_dfs_tgt {
char * name ;
2020-07-21 09:36:42 -03:00
int path_consumed ;
2019-12-04 17:37:58 -03:00
struct list_head list ;
2018-11-14 16:01:21 -02:00
} ;
2019-12-04 17:37:58 -03:00
struct cache_entry {
struct hlist_node hlist ;
const char * path ;
2021-02-24 20:59:23 -03:00
int hdr_flags ; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
int ttl ; /* DFS_REREFERRAL_V3.TimeToLive */
int srvtype ; /* DFS_REREFERRAL_V3.ServerType */
int ref_flags ; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
2019-12-04 17:37:58 -03:00
struct timespec64 etime ;
2021-02-24 20:59:23 -03:00
int path_consumed ; /* RESP_GET_DFS_REFERRAL.PathConsumed */
2019-12-04 17:37:58 -03:00
int numtgts ;
struct list_head tlist ;
struct cache_dfs_tgt * tgthint ;
2018-11-14 16:01:21 -02:00
} ;
2021-06-04 19:25:29 -03:00
/* List of referral server sessions per dfs mount */
struct mount_group {
2019-12-04 17:37:58 -03:00
struct list_head list ;
2021-06-04 19:25:29 -03:00
uuid_t id ;
struct cifs_ses * sessions [ CACHE_MAX_ENTRIES ] ;
int num_sessions ;
spinlock_t lock ;
struct list_head refresh_list ;
struct kref refcount ;
2018-11-14 16:01:21 -02:00
} ;
2019-12-04 17:37:58 -03:00
static struct kmem_cache * cache_slab __read_mostly ;
static struct workqueue_struct * dfscache_wq __read_mostly ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:37:58 -03:00
static int cache_ttl ;
2019-12-04 17:38:02 -03:00
static DEFINE_SPINLOCK ( cache_ttl_lock ) ;
2021-06-04 19:25:30 -03:00
static struct nls_table * cache_cp ;
2018-11-14 16:01:21 -02:00
/*
* Number of entries in the cache
*/
2019-12-04 17:38:03 -03:00
static atomic_t cache_count ;
2019-12-04 17:37:58 -03:00
static struct hlist_head cache_htable [ CACHE_HTABLE_SIZE ] ;
2019-12-04 17:38:03 -03:00
static DECLARE_RWSEM ( htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:29 -03:00
static LIST_HEAD ( mount_group_list ) ;
static DEFINE_MUTEX ( mount_group_list_lock ) ;
2018-11-14 16:01:21 -02:00
static void refresh_cache_worker ( struct work_struct * work ) ;
2019-12-04 17:37:58 -03:00
static DECLARE_DELAYED_WORK ( refresh_task , refresh_cache_worker ) ;
2021-06-04 19:25:29 -03:00
static void get_ipc_unc ( const char * ref_path , char * ipc , size_t ipclen )
{
const char * host ;
size_t len ;
extract_unc_hostname ( ref_path , & host , & len ) ;
scnprintf ( ipc , ipclen , " \\ \\ %.*s \\ IPC$ " , ( int ) len , host ) ;
}
static struct cifs_ses * find_ipc_from_server_path ( struct cifs_ses * * ses , const char * path )
{
char unc [ SERVER_NAME_LENGTH + sizeof ( " //x/IPC$ " ) ] = { 0 } ;
get_ipc_unc ( path , unc , sizeof ( unc ) ) ;
for ( ; * ses ; ses + + ) {
if ( ! strcasecmp ( unc , ( * ses ) - > tcon_ipc - > treeName ) )
return * ses ;
}
return ERR_PTR ( - ENOENT ) ;
}
static void __mount_group_release ( struct mount_group * mg )
{
int i ;
for ( i = 0 ; i < mg - > num_sessions ; i + + )
cifs_put_smb_ses ( mg - > sessions [ i ] ) ;
kfree ( mg ) ;
}
static void mount_group_release ( struct kref * kref )
{
struct mount_group * mg = container_of ( kref , struct mount_group , refcount ) ;
mutex_lock ( & mount_group_list_lock ) ;
list_del ( & mg - > list ) ;
mutex_unlock ( & mount_group_list_lock ) ;
__mount_group_release ( mg ) ;
}
static struct mount_group * find_mount_group_locked ( const uuid_t * id )
{
struct mount_group * mg ;
list_for_each_entry ( mg , & mount_group_list , list ) {
if ( uuid_equal ( & mg - > id , id ) )
return mg ;
}
return ERR_PTR ( - ENOENT ) ;
}
static struct mount_group * __get_mount_group_locked ( const uuid_t * id )
{
struct mount_group * mg ;
mg = find_mount_group_locked ( id ) ;
if ( ! IS_ERR ( mg ) )
return mg ;
mg = kmalloc ( sizeof ( * mg ) , GFP_KERNEL ) ;
if ( ! mg )
return ERR_PTR ( - ENOMEM ) ;
kref_init ( & mg - > refcount ) ;
uuid_copy ( & mg - > id , id ) ;
mg - > num_sessions = 0 ;
spin_lock_init ( & mg - > lock ) ;
list_add ( & mg - > list , & mount_group_list ) ;
return mg ;
}
static struct mount_group * get_mount_group ( const uuid_t * id )
{
struct mount_group * mg ;
mutex_lock ( & mount_group_list_lock ) ;
mg = __get_mount_group_locked ( id ) ;
if ( ! IS_ERR ( mg ) )
kref_get ( & mg - > refcount ) ;
mutex_unlock ( & mount_group_list_lock ) ;
return mg ;
}
static void free_mount_group_list ( void )
{
struct mount_group * mg , * tmp_mg ;
list_for_each_entry_safe ( mg , tmp_mg , & mount_group_list , list ) {
list_del_init ( & mg - > list ) ;
__mount_group_release ( mg ) ;
}
}
2021-06-04 19:25:30 -03:00
/**
* dfs_cache_canonical_path - get a canonical DFS path
*
* @ path : DFS path
* @ cp : codepage
* @ remap : mapping type
*
* Return canonical path if success , otherwise error .
*/
char * dfs_cache_canonical_path ( const char * path , const struct nls_table * cp , int remap )
2018-11-14 16:01:21 -02:00
{
2021-06-04 19:25:30 -03:00
char * tmp ;
int plen = 0 ;
char * npath ;
2019-12-04 17:38:01 -03:00
if ( ! path | | strlen ( path ) < 3 | | ( * path ! = ' \\ ' & & * path ! = ' / ' ) )
2021-06-04 19:25:30 -03:00
return ERR_PTR ( - EINVAL ) ;
if ( unlikely ( strcmp ( cp - > charset , cache_cp - > charset ) ) ) {
tmp = ( char * ) cifs_strndup_to_utf16 ( path , strlen ( path ) , & plen , cp , remap ) ;
if ( ! tmp ) {
cifs_dbg ( VFS , " %s: failed to convert path to utf16 \n " , __func__ ) ;
return ERR_PTR ( - EINVAL ) ;
}
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:30 -03:00
npath = cifs_strndup_from_utf16 ( tmp , plen , true , cache_cp ) ;
kfree ( tmp ) ;
if ( ! npath ) {
cifs_dbg ( VFS , " %s: failed to convert path from utf16 \n " , __func__ ) ;
return ERR_PTR ( - EINVAL ) ;
}
2018-11-14 16:01:21 -02:00
} else {
2021-06-04 19:25:30 -03:00
npath = kstrdup ( path , GFP_KERNEL ) ;
if ( ! npath )
return ERR_PTR ( - ENOMEM ) ;
2018-11-14 16:01:21 -02:00
}
2021-06-04 19:25:30 -03:00
convert_delimiter ( npath , ' \\ ' ) ;
return npath ;
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:37:58 -03:00
static inline bool cache_entry_expired ( const struct cache_entry * ce )
2018-11-14 16:01:21 -02:00
{
struct timespec64 ts ;
2018-12-17 20:11:46 +11:00
ktime_get_coarse_real_ts64 ( & ts ) ;
2019-12-04 17:37:58 -03:00
return timespec64_compare ( & ts , & ce - > etime ) > = 0 ;
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:37:58 -03:00
static inline void free_tgts ( struct cache_entry * ce )
2018-11-14 16:01:21 -02:00
{
2019-12-04 17:37:58 -03:00
struct cache_dfs_tgt * t , * n ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:37:58 -03:00
list_for_each_entry_safe ( t , n , & ce - > tlist , list ) {
list_del ( & t - > list ) ;
kfree ( t - > name ) ;
2018-11-14 16:01:21 -02:00
kfree ( t ) ;
}
}
2019-12-04 17:37:58 -03:00
static inline void flush_cache_ent ( struct cache_entry * ce )
2018-11-14 16:01:21 -02:00
{
2019-12-04 17:38:03 -03:00
hlist_del_init ( & ce - > hlist ) ;
2019-12-04 17:37:59 -03:00
kfree ( ce - > path ) ;
2018-11-14 16:01:21 -02:00
free_tgts ( ce ) ;
2019-12-04 17:38:03 -03:00
atomic_dec ( & cache_count ) ;
kmem_cache_free ( cache_slab , ce ) ;
2018-11-14 16:01:21 -02:00
}
static void flush_cache_ents ( void )
{
int i ;
2019-12-04 17:37:58 -03:00
for ( i = 0 ; i < CACHE_HTABLE_SIZE ; i + + ) {
struct hlist_head * l = & cache_htable [ i ] ;
2019-12-04 17:38:03 -03:00
struct hlist_node * n ;
2019-12-04 17:37:58 -03:00
struct cache_entry * ce ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
hlist_for_each_entry_safe ( ce , n , l , hlist ) {
if ( ! hlist_unhashed ( & ce - > hlist ) )
flush_cache_ent ( ce ) ;
}
2018-11-14 16:01:21 -02:00
}
}
/*
* dfs cache / proc file
*/
static int dfscache_proc_show ( struct seq_file * m , void * v )
{
2019-12-04 17:38:03 -03:00
int i ;
2019-12-04 17:37:58 -03:00
struct cache_entry * ce ;
struct cache_dfs_tgt * t ;
2018-11-14 16:01:21 -02:00
seq_puts ( m , " DFS cache \n --------- \n " ) ;
2019-12-04 17:38:03 -03:00
down_read ( & htable_rw_lock ) ;
for ( i = 0 ; i < CACHE_HTABLE_SIZE ; i + + ) {
struct hlist_head * l = & cache_htable [ i ] ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
hlist_for_each_entry ( ce , l , hlist ) {
if ( hlist_unhashed ( & ce - > hlist ) )
continue ;
seq_printf ( m ,
2021-02-24 20:59:23 -03:00
" cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s \n " ,
ce - > path , ce - > srvtype = = DFS_TYPE_ROOT ? " root " : " link " ,
ce - > ttl , ce - > etime . tv_nsec , ce - > ref_flags , ce - > hdr_flags ,
IS_INTERLINK_SET ( ce - > hdr_flags ) ? " yes " : " no " ,
ce - > path_consumed , cache_entry_expired ( ce ) ? " yes " : " no " ) ;
2019-12-04 17:38:03 -03:00
list_for_each_entry ( t , & ce - > tlist , list ) {
seq_printf ( m , " %s%s \n " ,
t - > name ,
ce - > tgthint = = t ? " (target hint) " : " " ) ;
}
}
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:38:03 -03:00
up_read ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
return 0 ;
}
static ssize_t dfscache_proc_write ( struct file * file , const char __user * buffer ,
size_t count , loff_t * ppos )
{
char c ;
int rc ;
rc = get_user ( c , buffer ) ;
if ( rc )
return rc ;
if ( c ! = ' 0 ' )
return - EINVAL ;
2020-04-14 22:42:53 -07:00
cifs_dbg ( FYI , " clearing dfs cache \n " ) ;
2019-12-04 17:38:03 -03:00
down_write ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
flush_cache_ents ( ) ;
2019-12-04 17:38:03 -03:00
up_write ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
return count ;
}
static int dfscache_proc_open ( struct inode * inode , struct file * file )
{
return single_open ( file , dfscache_proc_show , NULL ) ;
}
2020-02-03 17:37:17 -08:00
const struct proc_ops dfscache_proc_ops = {
. proc_open = dfscache_proc_open ,
. proc_read = seq_read ,
. proc_lseek = seq_lseek ,
. proc_release = single_release ,
. proc_write = dfscache_proc_write ,
2018-11-14 16:01:21 -02:00
} ;
# ifdef CONFIG_CIFS_DEBUG2
2019-12-04 17:37:58 -03:00
static inline void dump_tgts ( const struct cache_entry * ce )
2018-11-14 16:01:21 -02:00
{
2019-12-04 17:37:58 -03:00
struct cache_dfs_tgt * t ;
2018-11-14 16:01:21 -02:00
cifs_dbg ( FYI , " target list: \n " ) ;
2019-12-04 17:37:58 -03:00
list_for_each_entry ( t , & ce - > tlist , list ) {
cifs_dbg ( FYI , " %s%s \n " , t - > name ,
ce - > tgthint = = t ? " (target hint) " : " " ) ;
2018-11-14 16:01:21 -02:00
}
}
2019-12-04 17:37:58 -03:00
static inline void dump_ce ( const struct cache_entry * ce )
2018-11-14 16:01:21 -02:00
{
2021-02-24 20:59:23 -03:00
cifs_dbg ( FYI , " cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s \n " ,
2020-04-14 22:42:53 -07:00
ce - > path ,
2019-12-04 17:37:58 -03:00
ce - > srvtype = = DFS_TYPE_ROOT ? " root " : " link " , ce - > ttl ,
ce - > etime . tv_nsec ,
2021-02-24 20:59:23 -03:00
ce - > hdr_flags , ce - > ref_flags ,
IS_INTERLINK_SET ( ce - > hdr_flags ) ? " yes " : " no " ,
2019-12-04 17:37:58 -03:00
ce - > path_consumed ,
2018-11-14 16:01:21 -02:00
cache_entry_expired ( ce ) ? " yes " : " no " ) ;
dump_tgts ( ce ) ;
}
static inline void dump_refs ( const struct dfs_info3_param * refs , int numrefs )
{
int i ;
cifs_dbg ( FYI , " DFS referrals returned by the server: \n " ) ;
for ( i = 0 ; i < numrefs ; i + + ) {
const struct dfs_info3_param * ref = & refs [ i ] ;
cifs_dbg ( FYI ,
" \n "
" flags: 0x%x \n "
" path_consumed: %d \n "
" server_type: 0x%x \n "
" ref_flag: 0x%x \n "
" path_name: %s \n "
" node_name: %s \n "
" ttl: %d (%dm) \n " ,
ref - > flags , ref - > path_consumed , ref - > server_type ,
ref - > ref_flag , ref - > path_name , ref - > node_name ,
ref - > ttl , ref - > ttl / 60 ) ;
}
}
# else
# define dump_tgts(e)
# define dump_ce(e)
# define dump_refs(r, n)
# endif
/**
* dfs_cache_init - Initialize DFS referral cache .
*
* Return zero if initialized successfully , otherwise non - zero .
*/
int dfs_cache_init ( void )
{
2019-12-04 17:37:58 -03:00
int rc ;
2018-11-14 16:01:21 -02:00
int i ;
2021-06-04 19:25:29 -03:00
dfscache_wq = alloc_workqueue ( " cifs-dfscache " , WQ_FREEZABLE | WQ_UNBOUND , 1 ) ;
2019-12-04 17:37:58 -03:00
if ( ! dfscache_wq )
2018-11-14 16:01:21 -02:00
return - ENOMEM ;
2019-12-04 17:37:58 -03:00
cache_slab = kmem_cache_create ( " cifs_dfs_cache " ,
sizeof ( struct cache_entry ) , 0 ,
SLAB_HWCACHE_ALIGN , NULL ) ;
if ( ! cache_slab ) {
rc = - ENOMEM ;
goto out_destroy_wq ;
}
for ( i = 0 ; i < CACHE_HTABLE_SIZE ; i + + )
INIT_HLIST_HEAD ( & cache_htable [ i ] ) ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
atomic_set ( & cache_count , 0 ) ;
2021-06-04 19:25:30 -03:00
cache_cp = load_nls ( " utf8 " ) ;
if ( ! cache_cp )
cache_cp = load_nls_default ( ) ;
2018-11-14 16:01:21 -02:00
cifs_dbg ( FYI , " %s: initialized DFS referral cache \n " , __func__ ) ;
return 0 ;
2019-12-04 17:37:58 -03:00
out_destroy_wq :
destroy_workqueue ( dfscache_wq ) ;
return rc ;
2018-11-14 16:01:21 -02:00
}
2021-06-04 19:25:31 -03:00
static int cache_entry_hash ( const void * data , int size , unsigned int * hash )
2018-11-14 16:01:21 -02:00
{
2021-06-04 19:25:31 -03:00
int i , clen ;
const unsigned char * s = data ;
wchar_t c ;
unsigned int h = 0 ;
for ( i = 0 ; i < size ; i + = clen ) {
clen = cache_cp - > char2uni ( & s [ i ] , size - i , & c ) ;
if ( unlikely ( clen < 0 ) ) {
cifs_dbg ( VFS , " %s: can't convert char \n " , __func__ ) ;
return clen ;
}
c = cifs_toupper ( c ) ;
h = jhash ( & c , sizeof ( c ) , h ) ;
}
* hash = h % CACHE_HTABLE_SIZE ;
return 0 ;
2018-11-14 16:01:21 -02:00
}
/* Return target hint of a DFS cache entry */
2019-12-04 17:37:58 -03:00
static inline char * get_tgt_name ( const struct cache_entry * ce )
2018-11-14 16:01:21 -02:00
{
2019-12-04 17:37:58 -03:00
struct cache_dfs_tgt * t = ce - > tgthint ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:37:58 -03:00
return t ? t - > name : ERR_PTR ( - ENOENT ) ;
2018-11-14 16:01:21 -02:00
}
/* Return expire time out of a new entry's TTL */
static inline struct timespec64 get_expire_time ( int ttl )
{
struct timespec64 ts = {
. tv_sec = ttl ,
. tv_nsec = 0 ,
} ;
2018-12-17 20:11:46 +11:00
struct timespec64 now ;
2018-11-14 16:01:21 -02:00
2018-12-17 20:11:46 +11:00
ktime_get_coarse_real_ts64 ( & now ) ;
return timespec64_add ( now , ts ) ;
2018-11-14 16:01:21 -02:00
}
/* Allocate a new DFS target */
2020-07-21 09:36:42 -03:00
static struct cache_dfs_tgt * alloc_target ( const char * name , int path_consumed )
2018-11-14 16:01:21 -02:00
{
2019-12-04 17:37:58 -03:00
struct cache_dfs_tgt * t ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
t = kmalloc ( sizeof ( * t ) , GFP_ATOMIC ) ;
2018-11-14 16:01:21 -02:00
if ( ! t )
return ERR_PTR ( - ENOMEM ) ;
2021-03-05 15:02:34 -05:00
t - > name = kstrdup ( name , GFP_ATOMIC ) ;
2019-12-04 17:37:58 -03:00
if ( ! t - > name ) {
2018-11-14 16:01:21 -02:00
kfree ( t ) ;
return ERR_PTR ( - ENOMEM ) ;
}
2020-07-21 09:36:42 -03:00
t - > path_consumed = path_consumed ;
2019-12-04 17:37:58 -03:00
INIT_LIST_HEAD ( & t - > list ) ;
2018-11-14 16:01:21 -02:00
return t ;
}
/*
* Copy DFS referral information to a cache entry and conditionally update
* target hint .
*/
static int copy_ref_data ( const struct dfs_info3_param * refs , int numrefs ,
2019-12-04 17:37:58 -03:00
struct cache_entry * ce , const char * tgthint )
2018-11-14 16:01:21 -02:00
{
int i ;
2021-06-04 19:25:32 -03:00
ce - > ttl = max_t ( int , refs [ 0 ] . ttl , CACHE_MIN_TTL ) ;
2019-12-04 17:37:58 -03:00
ce - > etime = get_expire_time ( ce - > ttl ) ;
ce - > srvtype = refs [ 0 ] . server_type ;
2021-02-24 20:59:23 -03:00
ce - > hdr_flags = refs [ 0 ] . flags ;
ce - > ref_flags = refs [ 0 ] . ref_flag ;
2019-12-04 17:37:58 -03:00
ce - > path_consumed = refs [ 0 ] . path_consumed ;
2018-11-14 16:01:21 -02:00
for ( i = 0 ; i < numrefs ; i + + ) {
2019-12-04 17:37:58 -03:00
struct cache_dfs_tgt * t ;
2018-11-14 16:01:21 -02:00
2020-07-21 09:36:42 -03:00
t = alloc_target ( refs [ i ] . node_name , refs [ i ] . path_consumed ) ;
2018-11-14 16:01:21 -02:00
if ( IS_ERR ( t ) ) {
free_tgts ( ce ) ;
return PTR_ERR ( t ) ;
}
2019-12-04 17:37:58 -03:00
if ( tgthint & & ! strcasecmp ( t - > name , tgthint ) ) {
list_add ( & t - > list , & ce - > tlist ) ;
2018-11-14 16:01:21 -02:00
tgthint = NULL ;
} else {
2019-12-04 17:37:58 -03:00
list_add_tail ( & t - > list , & ce - > tlist ) ;
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:37:58 -03:00
ce - > numtgts + + ;
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:37:58 -03:00
ce - > tgthint = list_first_entry_or_null ( & ce - > tlist ,
struct cache_dfs_tgt , list ) ;
2018-11-14 16:01:21 -02:00
return 0 ;
}
/* Allocate a new cache entry */
2021-06-04 19:25:31 -03:00
static struct cache_entry * alloc_cache_entry ( struct dfs_info3_param * refs , int numrefs )
2018-11-14 16:01:21 -02:00
{
2019-12-04 17:37:58 -03:00
struct cache_entry * ce ;
2018-11-14 16:01:21 -02:00
int rc ;
2019-12-04 17:37:58 -03:00
ce = kmem_cache_zalloc ( cache_slab , GFP_KERNEL ) ;
2018-11-14 16:01:21 -02:00
if ( ! ce )
return ERR_PTR ( - ENOMEM ) ;
2021-06-04 19:25:31 -03:00
ce - > path = refs [ 0 ] . path_name ;
refs [ 0 ] . path_name = NULL ;
2019-12-04 17:37:58 -03:00
INIT_HLIST_NODE ( & ce - > hlist ) ;
INIT_LIST_HEAD ( & ce - > tlist ) ;
2018-11-14 16:01:21 -02:00
rc = copy_ref_data ( refs , numrefs , ce , NULL ) ;
if ( rc ) {
2019-12-04 17:37:59 -03:00
kfree ( ce - > path ) ;
2019-12-04 17:37:58 -03:00
kmem_cache_free ( cache_slab , ce ) ;
2018-11-14 16:01:21 -02:00
ce = ERR_PTR ( rc ) ;
}
return ce ;
}
2021-06-04 19:25:29 -03:00
static void remove_oldest_entry_locked ( void )
2018-11-14 16:01:21 -02:00
{
2019-12-04 17:38:03 -03:00
int i ;
2019-12-04 17:37:58 -03:00
struct cache_entry * ce ;
struct cache_entry * to_del = NULL ;
2018-11-14 16:01:21 -02:00
2021-06-08 13:50:06 -03:00
WARN_ON ( ! rwsem_is_locked ( & htable_rw_lock ) ) ;
2019-12-04 17:38:03 -03:00
for ( i = 0 ; i < CACHE_HTABLE_SIZE ; i + + ) {
struct hlist_head * l = & cache_htable [ i ] ;
hlist_for_each_entry ( ce , l , hlist ) {
if ( hlist_unhashed ( & ce - > hlist ) )
continue ;
if ( ! to_del | | timespec64_compare ( & ce - > etime ,
& to_del - > etime ) < 0 )
to_del = ce ;
}
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:38:03 -03:00
2018-11-14 16:01:21 -02:00
if ( ! to_del ) {
2020-04-14 22:42:53 -07:00
cifs_dbg ( FYI , " %s: no entry to remove \n " , __func__ ) ;
2019-12-04 17:38:03 -03:00
return ;
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:38:03 -03:00
2020-04-14 22:42:53 -07:00
cifs_dbg ( FYI , " %s: removing entry \n " , __func__ ) ;
2018-11-14 16:01:21 -02:00
dump_ce ( to_del ) ;
flush_cache_ent ( to_del ) ;
}
/* Add a new DFS cache entry */
2021-06-04 19:25:31 -03:00
static int add_cache_entry_locked ( struct dfs_info3_param * refs , int numrefs )
2018-11-14 16:01:21 -02:00
{
2021-06-04 19:25:31 -03:00
int rc ;
2019-12-04 17:37:58 -03:00
struct cache_entry * ce ;
2021-06-04 19:25:31 -03:00
unsigned int hash ;
2018-11-14 16:01:21 -02:00
2021-06-08 13:50:06 -03:00
WARN_ON ( ! rwsem_is_locked ( & htable_rw_lock ) ) ;
if ( atomic_read ( & cache_count ) > = CACHE_MAX_ENTRIES ) {
cifs_dbg ( FYI , " %s: reached max cache size (%d) \n " , __func__ , CACHE_MAX_ENTRIES ) ;
remove_oldest_entry_locked ( ) ;
}
2021-06-04 19:25:31 -03:00
rc = cache_entry_hash ( refs [ 0 ] . path_name , strlen ( refs [ 0 ] . path_name ) , & hash ) ;
if ( rc )
return rc ;
ce = alloc_cache_entry ( refs , numrefs ) ;
2018-11-14 16:01:21 -02:00
if ( IS_ERR ( ce ) )
2019-12-04 17:38:03 -03:00
return PTR_ERR ( ce ) ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:02 -03:00
spin_lock ( & cache_ttl_lock ) ;
if ( ! cache_ttl ) {
2019-12-04 17:37:58 -03:00
cache_ttl = ce - > ttl ;
queue_delayed_work ( dfscache_wq , & refresh_task , cache_ttl * HZ ) ;
2018-11-14 16:01:21 -02:00
} else {
2019-12-04 17:37:58 -03:00
cache_ttl = min_t ( int , cache_ttl , ce - > ttl ) ;
mod_delayed_work ( dfscache_wq , & refresh_task , cache_ttl * HZ ) ;
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:38:02 -03:00
spin_unlock ( & cache_ttl_lock ) ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
hlist_add_head ( & ce - > hlist , & cache_htable [ hash ] ) ;
dump_ce ( ce ) ;
2021-06-08 13:50:06 -03:00
atomic_inc ( & cache_count ) ;
2019-12-04 17:38:03 -03:00
return 0 ;
2018-11-14 16:01:21 -02:00
}
2021-06-04 19:25:31 -03:00
/* Check if two DFS paths are equal. @s1 and @s2 are expected to be in @cache_cp's charset */
static bool dfs_path_equal ( const char * s1 , int len1 , const char * s2 , int len2 )
2018-11-14 16:01:21 -02:00
{
2021-06-04 19:25:31 -03:00
int i , l1 , l2 ;
wchar_t c1 , c2 ;
if ( len1 ! = len2 )
return false ;
for ( i = 0 ; i < len1 ; i + = l1 ) {
l1 = cache_cp - > char2uni ( & s1 [ i ] , len1 - i , & c1 ) ;
l2 = cache_cp - > char2uni ( & s2 [ i ] , len2 - i , & c2 ) ;
if ( unlikely ( l1 < 0 & & l2 < 0 ) ) {
if ( s1 [ i ] ! = s2 [ i ] )
return false ;
l1 = 1 ;
continue ;
}
if ( l1 ! = l2 )
return false ;
if ( cifs_toupper ( c1 ) ! = cifs_toupper ( c2 ) )
return false ;
}
return true ;
}
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:31 -03:00
static struct cache_entry * __lookup_cache_entry ( const char * path , unsigned int hash , int len )
{
struct cache_entry * ce ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:31 -03:00
hlist_for_each_entry ( ce , & cache_htable [ hash ] , hlist ) {
if ( dfs_path_equal ( ce - > path , strlen ( ce - > path ) , path , len ) ) {
2019-12-04 17:37:58 -03:00
dump_ce ( ce ) ;
2021-06-04 19:25:31 -03:00
return ce ;
2018-11-14 16:01:21 -02:00
}
}
2021-06-04 19:25:31 -03:00
return ERR_PTR ( - EEXIST ) ;
2020-07-21 09:36:39 -03:00
}
/*
2021-06-04 19:25:31 -03:00
* Find a DFS cache entry in hash table and optionally check prefix path against normalized @ path .
*
* Use whole path components in the match . Must be called with htable_rw_lock held .
2020-07-21 09:36:39 -03:00
*
2021-06-04 19:25:31 -03:00
* Return ERR_PTR ( - EEXIST ) if the entry is not found .
2020-07-21 09:36:39 -03:00
*/
2021-06-04 19:25:31 -03:00
static struct cache_entry * lookup_cache_entry ( const char * path )
2020-07-21 09:36:39 -03:00
{
2021-06-04 19:25:31 -03:00
struct cache_entry * ce ;
2020-07-21 09:36:39 -03:00
int cnt = 0 ;
2021-06-04 19:25:31 -03:00
const char * s = path , * e ;
char sep = * s ;
unsigned int hash ;
int rc ;
2020-07-21 09:36:39 -03:00
while ( ( s = strchr ( s , sep ) ) & & + + cnt < 3 )
s + + ;
if ( cnt < 3 ) {
2021-06-04 19:25:31 -03:00
rc = cache_entry_hash ( path , strlen ( path ) , & hash ) ;
if ( rc )
return ERR_PTR ( rc ) ;
return __lookup_cache_entry ( path , hash , strlen ( path ) ) ;
2020-07-21 09:36:39 -03:00
}
/*
* Handle paths that have more than two path components and are a complete prefix of the DFS
* referral request path ( @ path ) .
*
* See MS - DFSC 3.2 .5 .5 " Receiving a Root Referral Request or Link Referral Request " .
*/
2021-06-04 19:25:31 -03:00
e = path + strlen ( path ) - 1 ;
2020-07-21 09:36:39 -03:00
while ( e > s ) {
2021-06-04 19:25:31 -03:00
int len ;
2020-07-21 09:36:39 -03:00
/* skip separators */
while ( e > s & & * e = = sep )
e - - ;
if ( e = = s )
break ;
2021-06-04 19:25:31 -03:00
len = e + 1 - path ;
rc = cache_entry_hash ( path , len , & hash ) ;
if ( rc )
return ERR_PTR ( rc ) ;
ce = __lookup_cache_entry ( path , hash , len ) ;
if ( ! IS_ERR ( ce ) )
return ce ;
2020-07-21 09:36:39 -03:00
/* backward until separator */
while ( e > s & & * e ! = sep )
e - - ;
}
2021-06-04 19:25:31 -03:00
return ERR_PTR ( - EEXIST ) ;
2018-11-14 16:01:21 -02:00
}
/**
* dfs_cache_destroy - destroy DFS referral cache
*/
void dfs_cache_destroy ( void )
{
2019-12-04 17:37:58 -03:00
cancel_delayed_work_sync ( & refresh_task ) ;
2021-06-04 19:25:30 -03:00
unload_nls ( cache_cp ) ;
2021-06-04 19:25:29 -03:00
free_mount_group_list ( ) ;
2018-11-14 16:01:21 -02:00
flush_cache_ents ( ) ;
2019-12-04 17:38:03 -03:00
kmem_cache_destroy ( cache_slab ) ;
2019-12-04 17:37:58 -03:00
destroy_workqueue ( dfscache_wq ) ;
2018-11-14 16:01:21 -02:00
cifs_dbg ( FYI , " %s: destroyed DFS referral cache \n " , __func__ ) ;
}
2021-06-04 19:25:29 -03:00
/* Update a cache entry with the new referral in @refs */
2021-06-08 13:50:06 -03:00
static int update_cache_entry_locked ( struct cache_entry * ce , const struct dfs_info3_param * refs ,
2021-06-04 19:25:29 -03:00
int numrefs )
2018-11-14 16:01:21 -02:00
{
int rc ;
char * s , * th = NULL ;
2021-06-08 13:50:06 -03:00
WARN_ON ( ! rwsem_is_locked ( & htable_rw_lock ) ) ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:37:58 -03:00
if ( ce - > tgthint ) {
s = ce - > tgthint - > name ;
2021-03-05 15:02:34 -05:00
th = kstrdup ( s , GFP_ATOMIC ) ;
2018-11-14 16:01:21 -02:00
if ( ! th )
2019-12-04 17:38:03 -03:00
return - ENOMEM ;
2018-11-14 16:01:21 -02:00
}
free_tgts ( ce ) ;
2019-12-04 17:37:58 -03:00
ce - > numtgts = 0 ;
2018-11-14 16:01:21 -02:00
rc = copy_ref_data ( refs , numrefs , ce , th ) ;
2019-12-04 17:38:03 -03:00
kfree ( th ) ;
2018-11-14 16:01:21 -02:00
2020-01-17 10:21:56 +08:00
return rc ;
2018-11-14 16:01:21 -02:00
}
2021-06-04 19:25:30 -03:00
static int get_dfs_referral ( const unsigned int xid , struct cifs_ses * ses , const char * path ,
struct dfs_info3_param * * refs , int * numrefs )
2018-11-14 16:01:21 -02:00
{
2021-06-08 13:50:06 -03:00
int rc ;
int i ;
2019-12-04 17:38:03 -03:00
cifs_dbg ( FYI , " %s: get an DFS referral for %s \n " , __func__ , path ) ;
2018-11-14 16:01:21 -02:00
2021-06-08 13:50:06 -03:00
* refs = NULL ;
* numrefs = 0 ;
2018-11-14 16:01:21 -02:00
if ( ! ses | | ! ses - > server | | ! ses - > server - > ops - > get_dfs_refer )
2019-12-04 17:38:03 -03:00
return - EOPNOTSUPP ;
2021-06-04 19:25:30 -03:00
if ( unlikely ( ! cache_cp ) )
2019-12-04 17:38:03 -03:00
return - EINVAL ;
2018-11-14 16:01:21 -02:00
2021-06-08 13:50:06 -03:00
rc = ses - > server - > ops - > get_dfs_refer ( xid , ses , path , refs , numrefs , cache_cp ,
NO_MAP_UNI_RSVD ) ;
if ( ! rc ) {
struct dfs_info3_param * ref = * refs ;
2018-11-14 16:01:21 -02:00
2021-06-08 13:50:06 -03:00
for ( i = 0 ; i < * numrefs ; i + + )
convert_delimiter ( ref [ i ] . path_name , ' \\ ' ) ;
}
return rc ;
2019-12-04 17:38:03 -03:00
}
2018-11-14 16:01:21 -02:00
/*
* Find , create or update a DFS cache entry .
*
* If the entry wasn ' t found , it will create a new one . Or if it was found but
* expired , then it will update the entry accordingly .
*
2021-06-04 19:25:29 -03:00
* For interlinks , cifs_mount ( ) and expand_dfs_referral ( ) are supposed to
2018-11-14 16:01:21 -02:00
* handle them properly .
*/
2021-06-04 19:25:30 -03:00
static int cache_refresh_path ( const unsigned int xid , struct cifs_ses * ses , const char * path )
2018-11-14 16:01:21 -02:00
{
int rc ;
2019-12-04 17:37:58 -03:00
struct cache_entry * ce ;
2019-12-04 17:38:03 -03:00
struct dfs_info3_param * refs = NULL ;
int numrefs = 0 ;
bool newent = false ;
2018-11-14 16:01:21 -02:00
cifs_dbg ( FYI , " %s: search path: %s \n " , __func__ , path ) ;
2021-06-04 19:25:29 -03:00
down_write ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:31 -03:00
ce = lookup_cache_entry ( path ) ;
2019-12-04 17:38:03 -03:00
if ( ! IS_ERR ( ce ) ) {
if ( ! cache_entry_expired ( ce ) ) {
dump_ce ( ce ) ;
2021-06-04 19:25:29 -03:00
up_write ( & htable_rw_lock ) ;
2019-12-04 17:38:03 -03:00
return 0 ;
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:38:03 -03:00
} else {
newent = true ;
}
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
/*
2021-06-04 19:25:29 -03:00
* Either the entry was not found , or it is expired .
* Request a new DFS referral in order to create or update a cache entry .
2019-12-04 17:38:03 -03:00
*/
2021-06-04 19:25:30 -03:00
rc = get_dfs_referral ( xid , ses , path , & refs , & numrefs ) ;
2019-12-04 17:38:03 -03:00
if ( rc )
2021-06-04 19:25:29 -03:00
goto out_unlock ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
dump_refs ( refs , numrefs ) ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
if ( ! newent ) {
2021-06-08 13:50:06 -03:00
rc = update_cache_entry_locked ( ce , refs , numrefs ) ;
2021-06-04 19:25:29 -03:00
goto out_unlock ;
2019-12-04 17:38:03 -03:00
}
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:31 -03:00
rc = add_cache_entry_locked ( refs , numrefs ) ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:29 -03:00
out_unlock :
up_write ( & htable_rw_lock ) ;
2019-12-04 17:38:03 -03:00
free_dfs_info_array ( refs , numrefs ) ;
return rc ;
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:38:03 -03:00
/*
* Set up a DFS referral from a given cache entry .
*
* Must be called with htable_rw_lock held .
*/
static int setup_referral ( const char * path , struct cache_entry * ce ,
struct dfs_info3_param * ref , const char * target )
2018-11-14 16:01:21 -02:00
{
int rc ;
cifs_dbg ( FYI , " %s: set up new ref \n " , __func__ ) ;
memset ( ref , 0 , sizeof ( * ref ) ) ;
2021-03-05 15:02:34 -05:00
ref - > path_name = kstrdup ( path , GFP_ATOMIC ) ;
2018-11-14 16:01:21 -02:00
if ( ! ref - > path_name )
return - ENOMEM ;
2021-03-05 15:02:34 -05:00
ref - > node_name = kstrdup ( target , GFP_ATOMIC ) ;
2018-11-14 16:01:21 -02:00
if ( ! ref - > node_name ) {
rc = - ENOMEM ;
goto err_free_path ;
}
2019-12-04 17:38:03 -03:00
ref - > path_consumed = ce - > path_consumed ;
2019-12-04 17:37:58 -03:00
ref - > ttl = ce - > ttl ;
ref - > server_type = ce - > srvtype ;
2021-02-24 20:59:23 -03:00
ref - > ref_flag = ce - > ref_flags ;
ref - > flags = ce - > hdr_flags ;
2018-11-14 16:01:21 -02:00
return 0 ;
err_free_path :
kfree ( ref - > path_name ) ;
ref - > path_name = NULL ;
return rc ;
}
/* Return target list of a DFS cache entry */
2019-12-04 17:38:03 -03:00
static int get_targets ( struct cache_entry * ce , struct dfs_cache_tgt_list * tl )
2018-11-14 16:01:21 -02:00
{
int rc ;
struct list_head * head = & tl - > tl_list ;
2019-12-04 17:37:58 -03:00
struct cache_dfs_tgt * t ;
2018-11-14 16:01:21 -02:00
struct dfs_cache_tgt_iterator * it , * nit ;
memset ( tl , 0 , sizeof ( * tl ) ) ;
INIT_LIST_HEAD ( head ) ;
2019-12-04 17:37:58 -03:00
list_for_each_entry ( t , & ce - > tlist , list ) {
2019-12-04 17:38:03 -03:00
it = kzalloc ( sizeof ( * it ) , GFP_ATOMIC ) ;
2018-11-14 16:01:21 -02:00
if ( ! it ) {
rc = - ENOMEM ;
goto err_free_it ;
}
2021-03-05 15:02:34 -05:00
it - > it_name = kstrdup ( t - > name , GFP_ATOMIC ) ;
2018-11-14 16:01:21 -02:00
if ( ! it - > it_name ) {
2019-01-05 21:18:03 +03:00
kfree ( it ) ;
2018-11-14 16:01:21 -02:00
rc = - ENOMEM ;
goto err_free_it ;
}
2020-07-21 09:36:42 -03:00
it - > it_path_consumed = t - > path_consumed ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:37:58 -03:00
if ( ce - > tgthint = = t )
2018-11-14 16:01:21 -02:00
list_add ( & it - > it_list , head ) ;
else
list_add_tail ( & it - > it_list , head ) ;
}
2019-12-04 17:38:03 -03:00
2019-12-04 17:37:58 -03:00
tl - > tl_numtgts = ce - > numtgts ;
2018-11-14 16:01:21 -02:00
return 0 ;
err_free_it :
list_for_each_entry_safe ( it , nit , head , it_list ) {
kfree ( it - > it_name ) ;
kfree ( it ) ;
}
return rc ;
}
/**
* dfs_cache_find - find a DFS cache entry
*
* If it doesn ' t find the cache entry , then it will get a DFS referral
* for @ path and create a new entry .
*
* In case the cache entry exists but expired , it will get a DFS referral
* for @ path and then update the respective cache entry .
*
* These parameters are passed down to the get_dfs_refer ( ) call if it
* needs to be issued :
* @ xid : syscall xid
* @ ses : smb session to issue the request on
2021-06-04 19:25:30 -03:00
* @ cp : codepage
2018-11-14 16:01:21 -02:00
* @ remap : path character remapping type
* @ path : path to lookup in DFS referral cache .
*
* @ ref : when non - NULL , store single DFS referral result in it .
* @ tgt_list : when non - NULL , store complete DFS target list in it .
*
* Return zero if the target was found , otherwise non - zero .
*/
2021-06-04 19:25:30 -03:00
int dfs_cache_find ( const unsigned int xid , struct cifs_ses * ses , const struct nls_table * cp ,
int remap , const char * path , struct dfs_info3_param * ref ,
2018-11-14 16:01:21 -02:00
struct dfs_cache_tgt_list * tgt_list )
{
int rc ;
2021-03-18 01:03:34 -04:00
const char * npath ;
2019-12-04 17:37:58 -03:00
struct cache_entry * ce ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:30 -03:00
npath = dfs_cache_canonical_path ( path , cp , remap ) ;
if ( IS_ERR ( npath ) )
return PTR_ERR ( npath ) ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:30 -03:00
rc = cache_refresh_path ( xid , ses , npath ) ;
2019-12-04 17:38:03 -03:00
if ( rc )
goto out_free_path ;
down_read ( & htable_rw_lock ) ;
2021-06-04 19:25:31 -03:00
ce = lookup_cache_entry ( npath ) ;
2019-12-04 17:38:03 -03:00
if ( IS_ERR ( ce ) ) {
up_read ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
rc = PTR_ERR ( ce ) ;
2019-12-04 17:38:03 -03:00
goto out_free_path ;
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:38:03 -03:00
if ( ref )
rc = setup_referral ( path , ce , ref , get_tgt_name ( ce ) ) ;
else
rc = 0 ;
if ( ! rc & & tgt_list )
rc = get_targets ( ce , tgt_list ) ;
up_read ( & htable_rw_lock ) ;
out_free_path :
2021-06-04 19:25:30 -03:00
kfree ( npath ) ;
2018-11-14 16:01:21 -02:00
return rc ;
}
/**
* dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
* the currently connected server .
*
* NOTE : This function will neither update a cache entry in case it was
* expired , nor create a new cache entry if @ path hasn ' t been found . It heavily
* relies on an existing cache entry .
*
2021-06-04 19:25:30 -03:00
* @ path : canonical DFS path to lookup in the DFS referral cache .
2018-11-14 16:01:21 -02:00
* @ ref : when non - NULL , store single DFS referral result in it .
* @ tgt_list : when non - NULL , store complete DFS target list in it .
*
* Return 0 if successful .
* Return - ENOENT if the entry was not found .
* Return non - zero for other errors .
*/
int dfs_cache_noreq_find ( const char * path , struct dfs_info3_param * ref ,
struct dfs_cache_tgt_list * tgt_list )
{
int rc ;
2019-12-04 17:37:58 -03:00
struct cache_entry * ce ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:30 -03:00
cifs_dbg ( FYI , " %s: path: %s \n " , __func__ , path ) ;
2019-12-04 17:38:03 -03:00
down_read ( & htable_rw_lock ) ;
2021-06-04 19:25:31 -03:00
ce = lookup_cache_entry ( path ) ;
2018-11-14 16:01:21 -02:00
if ( IS_ERR ( ce ) ) {
rc = PTR_ERR ( ce ) ;
2019-12-04 17:38:03 -03:00
goto out_unlock ;
2018-11-14 16:01:21 -02:00
}
if ( ref )
2019-12-04 17:38:03 -03:00
rc = setup_referral ( path , ce , ref , get_tgt_name ( ce ) ) ;
2018-11-14 16:01:21 -02:00
else
rc = 0 ;
if ( ! rc & & tgt_list )
2019-12-04 17:38:03 -03:00
rc = get_targets ( ce , tgt_list ) ;
out_unlock :
up_read ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
return rc ;
}
/**
* dfs_cache_update_tgthint - update target hint of a DFS cache entry
*
* If it doesn ' t find the cache entry , then it will get a DFS referral for @ path
* and create a new entry .
*
* In case the cache entry exists but expired , it will get a DFS referral
* for @ path and then update the respective cache entry .
*
* @ xid : syscall id
* @ ses : smb session
2021-06-04 19:25:30 -03:00
* @ cp : codepage
2018-11-14 16:01:21 -02:00
* @ remap : type of character remapping for paths
2021-06-04 19:25:30 -03:00
* @ path : path to lookup in DFS referral cache
2018-11-14 16:01:21 -02:00
* @ it : DFS target iterator
*
* Return zero if the target hint was updated successfully , otherwise non - zero .
*/
int dfs_cache_update_tgthint ( const unsigned int xid , struct cifs_ses * ses ,
2021-06-04 19:25:30 -03:00
const struct nls_table * cp , int remap , const char * path ,
2018-11-14 16:01:21 -02:00
const struct dfs_cache_tgt_iterator * it )
{
int rc ;
2021-03-18 01:03:34 -04:00
const char * npath ;
2019-12-04 17:37:58 -03:00
struct cache_entry * ce ;
struct cache_dfs_tgt * t ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:30 -03:00
npath = dfs_cache_canonical_path ( path , cp , remap ) ;
if ( IS_ERR ( npath ) )
return PTR_ERR ( npath ) ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
cifs_dbg ( FYI , " %s: update target hint - path: %s \n " , __func__ , npath ) ;
2021-06-04 19:25:30 -03:00
rc = cache_refresh_path ( xid , ses , npath ) ;
2019-12-04 17:38:03 -03:00
if ( rc )
goto out_free_path ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
down_write ( & htable_rw_lock ) ;
2021-06-04 19:25:31 -03:00
ce = lookup_cache_entry ( npath ) ;
2018-11-14 16:01:21 -02:00
if ( IS_ERR ( ce ) ) {
rc = PTR_ERR ( ce ) ;
2019-12-04 17:38:03 -03:00
goto out_unlock ;
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:37:58 -03:00
t = ce - > tgthint ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:37:58 -03:00
if ( likely ( ! strcasecmp ( it - > it_name , t - > name ) ) )
2019-12-04 17:38:03 -03:00
goto out_unlock ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:37:58 -03:00
list_for_each_entry ( t , & ce - > tlist , list ) {
if ( ! strcasecmp ( t - > name , it - > it_name ) ) {
ce - > tgthint = t ;
2018-11-14 16:01:21 -02:00
cifs_dbg ( FYI , " %s: new target hint: %s \n " , __func__ ,
it - > it_name ) ;
break ;
}
}
2019-12-04 17:38:03 -03:00
out_unlock :
up_write ( & htable_rw_lock ) ;
out_free_path :
2021-06-04 19:25:30 -03:00
kfree ( npath ) ;
2018-11-14 16:01:21 -02:00
return rc ;
}
/**
* dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
* without sending any requests to the currently connected server .
*
* NOTE : This function will neither update a cache entry in case it was
* expired , nor create a new cache entry if @ path hasn ' t been found . It heavily
* relies on an existing cache entry .
*
2021-06-04 19:25:30 -03:00
* @ path : canonical DFS path to lookup in DFS referral cache .
2018-11-14 16:01:21 -02:00
* @ it : target iterator which contains the target hint to update the cache
* entry with .
*
* Return zero if the target hint was updated successfully , otherwise non - zero .
*/
2021-06-04 19:25:30 -03:00
int dfs_cache_noreq_update_tgthint ( const char * path , const struct dfs_cache_tgt_iterator * it )
2018-11-14 16:01:21 -02:00
{
int rc ;
2019-12-04 17:37:58 -03:00
struct cache_entry * ce ;
struct cache_dfs_tgt * t ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:01 -03:00
if ( ! it )
2018-11-14 16:01:21 -02:00
return - EINVAL ;
2021-06-04 19:25:30 -03:00
cifs_dbg ( FYI , " %s: path: %s \n " , __func__ , path ) ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
down_write ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:31 -03:00
ce = lookup_cache_entry ( path ) ;
2018-11-14 16:01:21 -02:00
if ( IS_ERR ( ce ) ) {
rc = PTR_ERR ( ce ) ;
2019-12-04 17:38:03 -03:00
goto out_unlock ;
2018-11-14 16:01:21 -02:00
}
rc = 0 ;
2019-12-04 17:37:58 -03:00
t = ce - > tgthint ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:37:58 -03:00
if ( unlikely ( ! strcasecmp ( it - > it_name , t - > name ) ) )
2019-12-04 17:38:03 -03:00
goto out_unlock ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:37:58 -03:00
list_for_each_entry ( t , & ce - > tlist , list ) {
if ( ! strcasecmp ( t - > name , it - > it_name ) ) {
ce - > tgthint = t ;
2018-11-14 16:01:21 -02:00
cifs_dbg ( FYI , " %s: new target hint: %s \n " , __func__ ,
it - > it_name ) ;
break ;
}
}
2019-12-04 17:38:03 -03:00
out_unlock :
up_write ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
return rc ;
}
/**
* dfs_cache_get_tgt_referral - returns a DFS referral ( @ ref ) from a given
* target iterator ( @ it ) .
*
2021-06-04 19:25:30 -03:00
* @ path : canonical DFS path to lookup in DFS referral cache .
2018-11-14 16:01:21 -02:00
* @ it : DFS target iterator .
* @ ref : DFS referral pointer to set up the gathered information .
*
* Return zero if the DFS referral was set up correctly , otherwise non - zero .
*/
2021-06-04 19:25:30 -03:00
int dfs_cache_get_tgt_referral ( const char * path , const struct dfs_cache_tgt_iterator * it ,
2018-11-14 16:01:21 -02:00
struct dfs_info3_param * ref )
{
int rc ;
2019-12-04 17:37:58 -03:00
struct cache_entry * ce ;
2018-11-14 16:01:21 -02:00
if ( ! it | | ! ref )
return - EINVAL ;
2021-06-04 19:25:30 -03:00
cifs_dbg ( FYI , " %s: path: %s \n " , __func__ , path ) ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
down_read ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:31 -03:00
ce = lookup_cache_entry ( path ) ;
2018-11-14 16:01:21 -02:00
if ( IS_ERR ( ce ) ) {
rc = PTR_ERR ( ce ) ;
2019-12-04 17:38:03 -03:00
goto out_unlock ;
2018-11-14 16:01:21 -02:00
}
cifs_dbg ( FYI , " %s: target name: %s \n " , __func__ , it - > it_name ) ;
2019-12-04 17:38:03 -03:00
rc = setup_referral ( path , ce , ref , it - > it_name ) ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
out_unlock :
up_read ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
return rc ;
}
/**
2021-06-04 19:25:29 -03:00
* dfs_cache_add_refsrv_session - add SMB session of referral server
2018-11-14 16:01:21 -02:00
*
2021-06-04 19:25:29 -03:00
* @ mount_id : mount group uuid to lookup .
* @ ses : reference counted SMB session of referral server .
2018-11-14 16:01:21 -02:00
*/
2021-06-04 19:25:29 -03:00
void dfs_cache_add_refsrv_session ( const uuid_t * mount_id , struct cifs_ses * ses )
2018-11-14 16:01:21 -02:00
{
2021-06-04 19:25:29 -03:00
struct mount_group * mg ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:29 -03:00
if ( WARN_ON_ONCE ( ! mount_id | | uuid_is_null ( mount_id ) | | ! ses ) )
return ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:29 -03:00
mg = get_mount_group ( mount_id ) ;
if ( WARN_ON_ONCE ( IS_ERR ( mg ) ) )
return ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:29 -03:00
spin_lock ( & mg - > lock ) ;
if ( mg - > num_sessions < ARRAY_SIZE ( mg - > sessions ) )
mg - > sessions [ mg - > num_sessions + + ] = ses ;
spin_unlock ( & mg - > lock ) ;
kref_put ( & mg - > refcount , mount_group_release ) ;
2018-11-14 16:01:21 -02:00
}
/**
2021-06-04 19:25:29 -03:00
* dfs_cache_put_refsrv_sessions - put all referral server sessions
2018-11-14 16:01:21 -02:00
*
2021-06-04 19:25:29 -03:00
* Put all SMB sessions from the given mount group id .
2018-11-14 16:01:21 -02:00
*
2021-06-04 19:25:29 -03:00
* @ mount_id : mount group uuid to lookup .
2018-11-14 16:01:21 -02:00
*/
2021-06-04 19:25:29 -03:00
void dfs_cache_put_refsrv_sessions ( const uuid_t * mount_id )
2018-11-14 16:01:21 -02:00
{
2021-06-04 19:25:29 -03:00
struct mount_group * mg ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:29 -03:00
if ( ! mount_id | | uuid_is_null ( mount_id ) )
2018-11-14 16:01:21 -02:00
return ;
2021-06-04 19:25:29 -03:00
mutex_lock ( & mount_group_list_lock ) ;
mg = find_mount_group_locked ( mount_id ) ;
if ( IS_ERR ( mg ) ) {
mutex_unlock ( & mount_group_list_lock ) ;
return ;
}
mutex_unlock ( & mount_group_list_lock ) ;
kref_put ( & mg - > refcount , mount_group_release ) ;
2018-11-14 16:01:21 -02:00
}
2020-02-20 19:49:34 -03:00
/**
* dfs_cache_get_tgt_share - parse a DFS target
*
2020-07-21 09:36:42 -03:00
* @ path : DFS full path
2020-02-20 19:49:34 -03:00
* @ it : DFS target iterator .
* @ share : tree name .
* @ prefix : prefix path .
*
* Return zero if target was parsed correctly , otherwise non - zero .
*/
2021-06-04 19:25:30 -03:00
int dfs_cache_get_tgt_share ( char * path , const struct dfs_cache_tgt_iterator * it , char * * share ,
char * * prefix )
2020-02-20 19:49:34 -03:00
{
2020-07-21 09:36:42 -03:00
char * s , sep , * p ;
size_t len ;
size_t plen1 , plen2 ;
2020-02-20 19:49:34 -03:00
2020-07-21 09:36:42 -03:00
if ( ! it | | ! path | | ! share | | ! prefix | | strlen ( path ) < it - > it_path_consumed )
2020-02-20 19:49:34 -03:00
return - EINVAL ;
2020-07-21 09:36:42 -03:00
* share = NULL ;
* prefix = NULL ;
2020-02-20 19:49:34 -03:00
sep = it - > it_name [ 0 ] ;
if ( sep ! = ' \\ ' & & sep ! = ' / ' )
return - EINVAL ;
s = strchr ( it - > it_name + 1 , sep ) ;
if ( ! s )
return - EINVAL ;
2020-07-21 09:36:42 -03:00
/* point to prefix in target node */
2020-02-20 19:49:34 -03:00
s = strchrnul ( s + 1 , sep ) ;
2020-07-21 09:36:42 -03:00
/* extract target share */
* share = kstrndup ( it - > it_name , s - it - > it_name , GFP_KERNEL ) ;
if ( ! * share )
return - ENOMEM ;
2020-02-20 19:49:34 -03:00
2020-07-21 09:36:42 -03:00
/* skip separator */
if ( * s )
s + + ;
/* point to prefix in DFS path */
p = path + it - > it_path_consumed ;
if ( * p = = sep )
p + + ;
/* merge prefix paths from DFS path and target node */
plen1 = it - > it_name + strlen ( it - > it_name ) - s ;
plen2 = path + strlen ( path ) - p ;
if ( plen1 | | plen2 ) {
len = plen1 + plen2 + 2 ;
* prefix = kmalloc ( len , GFP_KERNEL ) ;
if ( ! * prefix ) {
kfree ( * share ) ;
* share = NULL ;
return - ENOMEM ;
}
if ( plen1 )
scnprintf ( * prefix , len , " %.*s%c%.*s " , ( int ) plen1 , s , sep , ( int ) plen2 , p ) ;
else
strscpy ( * prefix , p , len ) ;
}
2020-02-20 19:49:34 -03:00
return 0 ;
}
2021-06-04 19:25:29 -03:00
/*
* Refresh all active dfs mounts regardless of whether they are in cache or not .
* ( cache can be cleared )
*/
static void refresh_mounts ( struct cifs_ses * * sessions )
2018-11-14 16:01:21 -02:00
{
2021-06-04 19:25:29 -03:00
struct TCP_Server_Info * server ;
2018-11-14 16:01:21 -02:00
struct cifs_ses * ses ;
2021-06-04 19:25:29 -03:00
struct cifs_tcon * tcon , * ntcon ;
struct list_head tcons ;
unsigned int xid ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:29 -03:00
INIT_LIST_HEAD ( & tcons ) ;
2018-11-14 16:01:21 -02:00
spin_lock ( & cifs_tcp_ses_lock ) ;
2021-06-04 19:25:29 -03:00
list_for_each_entry ( server , & cifs_tcp_ses_list , tcp_ses_list ) {
list_for_each_entry ( ses , & server - > smb_ses_list , smb_ses_list ) {
list_for_each_entry ( tcon , & ses - > tcon_list , tcon_list ) {
if ( tcon - > dfs_path ) {
tcon - > tc_count + + ;
list_add_tail ( & tcon - > ulist , & tcons ) ;
}
2018-11-14 16:01:21 -02:00
}
}
}
spin_unlock ( & cifs_tcp_ses_lock ) ;
2019-03-19 16:54:29 -03:00
2021-06-04 19:25:29 -03:00
list_for_each_entry_safe ( tcon , ntcon , & tcons , ulist ) {
const char * path = tcon - > dfs_path + 1 ;
2021-06-08 13:50:06 -03:00
struct cache_entry * ce ;
struct dfs_info3_param * refs = NULL ;
int numrefs = 0 ;
bool needs_refresh = false ;
2021-06-04 19:25:29 -03:00
int rc = 0 ;
2019-03-19 16:54:29 -03:00
2021-06-04 19:25:29 -03:00
list_del_init ( & tcon - > ulist ) ;
2021-06-08 13:50:06 -03:00
2021-06-04 19:25:29 -03:00
ses = find_ipc_from_server_path ( sessions , path ) ;
2021-06-08 13:50:06 -03:00
if ( IS_ERR ( ses ) )
goto next_tcon ;
down_read ( & htable_rw_lock ) ;
ce = lookup_cache_entry ( path ) ;
needs_refresh = IS_ERR ( ce ) | | cache_entry_expired ( ce ) ;
up_read ( & htable_rw_lock ) ;
if ( ! needs_refresh )
goto next_tcon ;
xid = get_xid ( ) ;
rc = get_dfs_referral ( xid , ses , path , & refs , & numrefs ) ;
free_xid ( xid ) ;
/* Create or update a cache entry with the new referral */
if ( ! rc ) {
down_write ( & htable_rw_lock ) ;
ce = lookup_cache_entry ( path ) ;
if ( IS_ERR ( ce ) )
add_cache_entry_locked ( refs , numrefs ) ;
else if ( cache_entry_expired ( ce ) )
update_cache_entry_locked ( ce , refs , numrefs ) ;
up_write ( & htable_rw_lock ) ;
2021-06-04 19:25:29 -03:00
}
2021-06-08 13:50:06 -03:00
next_tcon :
free_dfs_info_array ( refs , numrefs ) ;
2021-06-04 19:25:29 -03:00
cifs_put_tcon ( tcon ) ;
2019-12-04 17:38:00 -03:00
}
}
2021-06-04 19:25:29 -03:00
static void refresh_cache ( struct cifs_ses * * sessions )
2019-03-19 16:54:29 -03:00
{
2021-06-04 19:25:29 -03:00
int i ;
2019-03-19 16:54:29 -03:00
struct cifs_ses * ses ;
2018-11-14 16:01:21 -02:00
unsigned int xid ;
2021-06-08 13:50:06 -03:00
char * ref_paths [ CACHE_MAX_ENTRIES ] ;
int count = 0 ;
struct cache_entry * ce ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:29 -03:00
/*
2021-06-08 13:50:06 -03:00
* Refresh all cached entries . Get all new referrals outside critical section to avoid
* starvation while performing SMB2 IOCTL on broken or slow connections .
2021-06-04 19:25:29 -03:00
* The cache entries may cover more paths than the active mounts
* ( e . g . domain - based DFS referrals or multi tier DFS setups ) .
*/
2021-06-08 13:50:06 -03:00
down_read ( & htable_rw_lock ) ;
2021-06-04 19:25:29 -03:00
for ( i = 0 ; i < CACHE_HTABLE_SIZE ; i + + ) {
struct hlist_head * l = & cache_htable [ i ] ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:29 -03:00
hlist_for_each_entry ( ce , l , hlist ) {
2021-06-08 13:50:06 -03:00
if ( count = = ARRAY_SIZE ( ref_paths ) )
goto out_unlock ;
if ( hlist_unhashed ( & ce - > hlist ) | | ! cache_entry_expired ( ce ) | |
IS_ERR ( find_ipc_from_server_path ( sessions , ce - > path ) ) )
2021-06-04 19:25:29 -03:00
continue ;
2021-06-08 13:50:06 -03:00
ref_paths [ count + + ] = kstrdup ( ce - > path , GFP_ATOMIC ) ;
}
}
2018-11-14 16:01:21 -02:00
2021-06-08 13:50:06 -03:00
out_unlock :
up_read ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
2021-06-08 13:50:06 -03:00
for ( i = 0 ; i < count ; i + + ) {
char * path = ref_paths [ i ] ;
struct dfs_info3_param * refs = NULL ;
int numrefs = 0 ;
int rc = 0 ;
2019-12-04 17:38:03 -03:00
2021-06-08 13:50:06 -03:00
if ( ! path )
continue ;
2018-11-14 16:01:21 -02:00
2021-06-08 13:50:06 -03:00
ses = find_ipc_from_server_path ( sessions , path ) ;
if ( IS_ERR ( ses ) )
goto next_referral ;
xid = get_xid ( ) ;
rc = get_dfs_referral ( xid , ses , path , & refs , & numrefs ) ;
free_xid ( xid ) ;
if ( ! rc ) {
down_write ( & htable_rw_lock ) ;
ce = lookup_cache_entry ( path ) ;
/*
* We need to re - check it because other tasks might have it deleted or
* updated .
*/
if ( ! IS_ERR ( ce ) & & cache_entry_expired ( ce ) )
update_cache_entry_locked ( ce , refs , numrefs ) ;
up_write ( & htable_rw_lock ) ;
2019-03-19 16:54:29 -03:00
}
2021-06-08 13:50:06 -03:00
next_referral :
kfree ( path ) ;
free_dfs_info_array ( refs , numrefs ) ;
2018-11-14 16:01:21 -02:00
}
}
/*
2021-06-04 19:25:29 -03:00
* Worker that will refresh DFS cache and active mounts based on lowest TTL value from a DFS
2018-11-14 16:01:21 -02:00
* referral .
*/
static void refresh_cache_worker ( struct work_struct * work )
{
2021-06-04 19:25:29 -03:00
struct list_head mglist ;
struct mount_group * mg , * tmp_mg ;
struct cifs_ses * sessions [ CACHE_MAX_ENTRIES + 1 ] = { NULL } ;
int max_sessions = ARRAY_SIZE ( sessions ) - 1 ;
int i = 0 , count ;
INIT_LIST_HEAD ( & mglist ) ;
/* Get refereces of mount groups */
mutex_lock ( & mount_group_list_lock ) ;
list_for_each_entry ( mg , & mount_group_list , list ) {
kref_get ( & mg - > refcount ) ;
list_add ( & mg - > refresh_list , & mglist ) ;
2019-12-04 17:38:02 -03:00
}
2021-06-04 19:25:29 -03:00
mutex_unlock ( & mount_group_list_lock ) ;
2019-12-04 17:38:02 -03:00
2021-06-04 19:25:29 -03:00
/* Fill in local array with an NULL-terminated list of all referral server sessions */
list_for_each_entry ( mg , & mglist , refresh_list ) {
if ( i > = max_sessions )
break ;
2019-12-04 17:38:03 -03:00
2021-06-04 19:25:29 -03:00
spin_lock ( & mg - > lock ) ;
if ( i + mg - > num_sessions > max_sessions )
count = max_sessions - i ;
else
count = mg - > num_sessions ;
memcpy ( & sessions [ i ] , mg - > sessions , count * sizeof ( mg - > sessions [ 0 ] ) ) ;
spin_unlock ( & mg - > lock ) ;
i + = count ;
}
2019-12-04 17:38:00 -03:00
2021-06-04 19:25:29 -03:00
if ( sessions [ 0 ] ) {
/* Refresh all active mounts and cached entries */
refresh_mounts ( sessions ) ;
refresh_cache ( sessions ) ;
}
2019-12-04 17:38:02 -03:00
2021-06-04 19:25:29 -03:00
list_for_each_entry_safe ( mg , tmp_mg , & mglist , refresh_list ) {
list_del_init ( & mg - > refresh_list ) ;
kref_put ( & mg - > refcount , mount_group_release ) ;
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:38:02 -03:00
spin_lock ( & cache_ttl_lock ) ;
2019-12-04 17:37:58 -03:00
queue_delayed_work ( dfscache_wq , & refresh_task , cache_ttl * HZ ) ;
2019-12-04 17:38:02 -03:00
spin_unlock ( & cache_ttl_lock ) ;
2018-11-14 16:01:21 -02:00
}