2018-11-14 21:01:21 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* DFS referral cache routines
*
2019-03-19 22:54:29 +03:00
* Copyright ( c ) 2018 - 2019 Paulo Alcantara < palcantara @ suse . de >
2018-11-14 21:01:21 +03:00
*/
# include <linux/jhash.h>
# include <linux/ktime.h>
# include <linux/slab.h>
2020-02-04 04:37:17 +03:00
# include <linux/proc_fs.h>
2018-11-14 21:01:21 +03:00
# include <linux/nls.h>
# include <linux/workqueue.h>
# include "cifsglob.h"
# include "smb2pdu.h"
# include "smb2proto.h"
# include "cifsproto.h"
# include "cifs_debug.h"
# include "cifs_unicode.h"
# include "smb2glob.h"
# include "dfs_cache.h"
2019-12-04 23:37:58 +03:00
# define CACHE_HTABLE_SIZE 32
# define CACHE_MAX_ENTRIES 64
2018-11-14 21:01:21 +03:00
# define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
DFSREF_STORAGE_SERVER ) )
2019-12-04 23:37:58 +03:00
struct cache_dfs_tgt {
char * name ;
2020-07-21 15:36:42 +03:00
int path_consumed ;
2019-12-04 23:37:58 +03:00
struct list_head list ;
2018-11-14 21:01:21 +03:00
} ;
2019-12-04 23:37:58 +03:00
struct cache_entry {
struct hlist_node hlist ;
const char * path ;
int ttl ;
int srvtype ;
int flags ;
struct timespec64 etime ;
int path_consumed ;
int numtgts ;
struct list_head tlist ;
struct cache_dfs_tgt * tgthint ;
2018-11-14 21:01:21 +03:00
} ;
2019-12-04 23:37:58 +03:00
struct vol_info {
char * fullpath ;
2019-12-04 23:38:02 +03:00
spinlock_t smb_vol_lock ;
2019-12-04 23:37:58 +03:00
struct smb_vol smb_vol ;
char * mntdata ;
struct list_head list ;
2019-12-04 23:38:02 +03:00
struct list_head rlist ;
struct kref refcnt ;
2018-11-14 21:01:21 +03:00
} ;
2019-12-04 23:37:58 +03:00
static struct kmem_cache * cache_slab __read_mostly ;
static struct workqueue_struct * dfscache_wq __read_mostly ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:37:58 +03:00
static int cache_ttl ;
2019-12-04 23:38:02 +03:00
static DEFINE_SPINLOCK ( cache_ttl_lock ) ;
2019-12-04 23:37:58 +03:00
static struct nls_table * cache_nlsc ;
2018-11-14 21:01:21 +03:00
/*
* Number of entries in the cache
*/
2019-12-04 23:38:03 +03:00
static atomic_t cache_count ;
2019-12-04 23:37:58 +03:00
static struct hlist_head cache_htable [ CACHE_HTABLE_SIZE ] ;
2019-12-04 23:38:03 +03:00
static DECLARE_RWSEM ( htable_rw_lock ) ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:37:58 +03:00
static LIST_HEAD ( vol_list ) ;
2019-12-04 23:38:02 +03:00
static DEFINE_SPINLOCK ( vol_list_lock ) ;
2018-11-14 21:01:21 +03:00
static void refresh_cache_worker ( struct work_struct * work ) ;
2019-12-04 23:37:58 +03:00
static DECLARE_DELAYED_WORK ( refresh_task , refresh_cache_worker ) ;
2019-12-04 23:38:01 +03:00
static int get_normalized_path ( const char * path , char * * npath )
2018-11-14 21:01:21 +03:00
{
2019-12-04 23:38:01 +03:00
if ( ! path | | strlen ( path ) < 3 | | ( * path ! = ' \\ ' & & * path ! = ' / ' ) )
return - EINVAL ;
2018-11-14 21:01:21 +03:00
if ( * path = = ' \\ ' ) {
* npath = ( char * ) path ;
} else {
* npath = kstrndup ( path , strlen ( path ) , GFP_KERNEL ) ;
if ( ! * npath )
return - ENOMEM ;
convert_delimiter ( * npath , ' \\ ' ) ;
}
return 0 ;
}
static inline void free_normalized_path ( const char * path , char * npath )
{
if ( path ! = npath )
kfree ( npath ) ;
}
2019-12-04 23:37:58 +03:00
static inline bool cache_entry_expired ( const struct cache_entry * ce )
2018-11-14 21:01:21 +03:00
{
struct timespec64 ts ;
2018-12-17 12:11:46 +03:00
ktime_get_coarse_real_ts64 ( & ts ) ;
2019-12-04 23:37:58 +03:00
return timespec64_compare ( & ts , & ce - > etime ) > = 0 ;
2018-11-14 21:01:21 +03:00
}
2019-12-04 23:37:58 +03:00
static inline void free_tgts ( struct cache_entry * ce )
2018-11-14 21:01:21 +03:00
{
2019-12-04 23:37:58 +03:00
struct cache_dfs_tgt * t , * n ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:37:58 +03:00
list_for_each_entry_safe ( t , n , & ce - > tlist , list ) {
list_del ( & t - > list ) ;
kfree ( t - > name ) ;
2018-11-14 21:01:21 +03:00
kfree ( t ) ;
}
}
2019-12-04 23:37:58 +03:00
static inline void flush_cache_ent ( struct cache_entry * ce )
2018-11-14 21:01:21 +03:00
{
2019-12-04 23:38:03 +03:00
hlist_del_init ( & ce - > hlist ) ;
2019-12-04 23:37:59 +03:00
kfree ( ce - > path ) ;
2018-11-14 21:01:21 +03:00
free_tgts ( ce ) ;
2019-12-04 23:38:03 +03:00
atomic_dec ( & cache_count ) ;
kmem_cache_free ( cache_slab , ce ) ;
2018-11-14 21:01:21 +03:00
}
static void flush_cache_ents ( void )
{
int i ;
2019-12-04 23:37:58 +03:00
for ( i = 0 ; i < CACHE_HTABLE_SIZE ; i + + ) {
struct hlist_head * l = & cache_htable [ i ] ;
2019-12-04 23:38:03 +03:00
struct hlist_node * n ;
2019-12-04 23:37:58 +03:00
struct cache_entry * ce ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
hlist_for_each_entry_safe ( ce , n , l , hlist ) {
if ( ! hlist_unhashed ( & ce - > hlist ) )
flush_cache_ent ( ce ) ;
}
2018-11-14 21:01:21 +03:00
}
}
/*
* dfs cache / proc file
*/
static int dfscache_proc_show ( struct seq_file * m , void * v )
{
2019-12-04 23:38:03 +03:00
int i ;
2019-12-04 23:37:58 +03:00
struct cache_entry * ce ;
struct cache_dfs_tgt * t ;
2018-11-14 21:01:21 +03:00
seq_puts ( m , " DFS cache \n --------- \n " ) ;
2019-12-04 23:38:03 +03:00
down_read ( & htable_rw_lock ) ;
for ( i = 0 ; i < CACHE_HTABLE_SIZE ; i + + ) {
struct hlist_head * l = & cache_htable [ i ] ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
hlist_for_each_entry ( ce , l , hlist ) {
if ( hlist_unhashed ( & ce - > hlist ) )
continue ;
seq_printf ( m ,
" cache entry: path=%s,type=%s,ttl=%d,etime=%ld, "
" interlink=%s,path_consumed=%d,expired=%s \n " ,
ce - > path ,
ce - > srvtype = = DFS_TYPE_ROOT ? " root " : " link " ,
ce - > ttl , ce - > etime . tv_nsec ,
IS_INTERLINK_SET ( ce - > flags ) ? " yes " : " no " ,
ce - > path_consumed ,
cache_entry_expired ( ce ) ? " yes " : " no " ) ;
list_for_each_entry ( t , & ce - > tlist , list ) {
seq_printf ( m , " %s%s \n " ,
t - > name ,
ce - > tgthint = = t ? " (target hint) " : " " ) ;
}
}
2018-11-14 21:01:21 +03:00
}
2019-12-04 23:38:03 +03:00
up_read ( & htable_rw_lock ) ;
2018-11-14 21:01:21 +03:00
return 0 ;
}
static ssize_t dfscache_proc_write ( struct file * file , const char __user * buffer ,
size_t count , loff_t * ppos )
{
char c ;
int rc ;
rc = get_user ( c , buffer ) ;
if ( rc )
return rc ;
if ( c ! = ' 0 ' )
return - EINVAL ;
2020-04-15 08:42:53 +03:00
cifs_dbg ( FYI , " clearing dfs cache \n " ) ;
2019-12-04 23:38:03 +03:00
down_write ( & htable_rw_lock ) ;
2018-11-14 21:01:21 +03:00
flush_cache_ents ( ) ;
2019-12-04 23:38:03 +03:00
up_write ( & htable_rw_lock ) ;
2018-11-14 21:01:21 +03:00
return count ;
}
static int dfscache_proc_open ( struct inode * inode , struct file * file )
{
return single_open ( file , dfscache_proc_show , NULL ) ;
}
2020-02-04 04:37:17 +03:00
const struct proc_ops dfscache_proc_ops = {
. proc_open = dfscache_proc_open ,
. proc_read = seq_read ,
. proc_lseek = seq_lseek ,
. proc_release = single_release ,
. proc_write = dfscache_proc_write ,
2018-11-14 21:01:21 +03:00
} ;
# ifdef CONFIG_CIFS_DEBUG2
2019-12-04 23:37:58 +03:00
static inline void dump_tgts ( const struct cache_entry * ce )
2018-11-14 21:01:21 +03:00
{
2019-12-04 23:37:58 +03:00
struct cache_dfs_tgt * t ;
2018-11-14 21:01:21 +03:00
cifs_dbg ( FYI , " target list: \n " ) ;
2019-12-04 23:37:58 +03:00
list_for_each_entry ( t , & ce - > tlist , list ) {
cifs_dbg ( FYI , " %s%s \n " , t - > name ,
ce - > tgthint = = t ? " (target hint) " : " " ) ;
2018-11-14 21:01:21 +03:00
}
}
2019-12-04 23:37:58 +03:00
static inline void dump_ce ( const struct cache_entry * ce )
2018-11-14 21:01:21 +03:00
{
2020-04-15 08:42:53 +03:00
cifs_dbg ( FYI , " cache entry: path=%s,type=%s,ttl=%d,etime=%ld,interlink=%s,path_consumed=%d,expired=%s \n " ,
ce - > path ,
2019-12-04 23:37:58 +03:00
ce - > srvtype = = DFS_TYPE_ROOT ? " root " : " link " , ce - > ttl ,
ce - > etime . tv_nsec ,
IS_INTERLINK_SET ( ce - > flags ) ? " yes " : " no " ,
ce - > path_consumed ,
2018-11-14 21:01:21 +03:00
cache_entry_expired ( ce ) ? " yes " : " no " ) ;
dump_tgts ( ce ) ;
}
static inline void dump_refs ( const struct dfs_info3_param * refs , int numrefs )
{
int i ;
cifs_dbg ( FYI , " DFS referrals returned by the server: \n " ) ;
for ( i = 0 ; i < numrefs ; i + + ) {
const struct dfs_info3_param * ref = & refs [ i ] ;
cifs_dbg ( FYI ,
" \n "
" flags: 0x%x \n "
" path_consumed: %d \n "
" server_type: 0x%x \n "
" ref_flag: 0x%x \n "
" path_name: %s \n "
" node_name: %s \n "
" ttl: %d (%dm) \n " ,
ref - > flags , ref - > path_consumed , ref - > server_type ,
ref - > ref_flag , ref - > path_name , ref - > node_name ,
ref - > ttl , ref - > ttl / 60 ) ;
}
}
# else
# define dump_tgts(e)
# define dump_ce(e)
# define dump_refs(r, n)
# endif
/**
* dfs_cache_init - Initialize DFS referral cache .
*
* Return zero if initialized successfully , otherwise non - zero .
*/
int dfs_cache_init ( void )
{
2019-12-04 23:37:58 +03:00
int rc ;
2018-11-14 21:01:21 +03:00
int i ;
2019-12-04 23:37:58 +03:00
dfscache_wq = alloc_workqueue ( " cifs-dfscache " ,
WQ_FREEZABLE | WQ_MEM_RECLAIM , 1 ) ;
if ( ! dfscache_wq )
2018-11-14 21:01:21 +03:00
return - ENOMEM ;
2019-12-04 23:37:58 +03:00
cache_slab = kmem_cache_create ( " cifs_dfs_cache " ,
sizeof ( struct cache_entry ) , 0 ,
SLAB_HWCACHE_ALIGN , NULL ) ;
if ( ! cache_slab ) {
rc = - ENOMEM ;
goto out_destroy_wq ;
}
for ( i = 0 ; i < CACHE_HTABLE_SIZE ; i + + )
INIT_HLIST_HEAD ( & cache_htable [ i ] ) ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
atomic_set ( & cache_count , 0 ) ;
2019-12-04 23:37:58 +03:00
cache_nlsc = load_nls_default ( ) ;
2018-11-14 21:01:21 +03:00
cifs_dbg ( FYI , " %s: initialized DFS referral cache \n " , __func__ ) ;
return 0 ;
2019-12-04 23:37:58 +03:00
out_destroy_wq :
destroy_workqueue ( dfscache_wq ) ;
return rc ;
2018-11-14 21:01:21 +03:00
}
static inline unsigned int cache_entry_hash ( const void * data , int size )
{
unsigned int h ;
h = jhash ( data , size , 0 ) ;
2019-12-04 23:37:58 +03:00
return h & ( CACHE_HTABLE_SIZE - 1 ) ;
2018-11-14 21:01:21 +03:00
}
/* Check whether second path component of @path is SYSVOL or NETLOGON */
static inline bool is_sysvol_or_netlogon ( const char * path )
{
const char * s ;
char sep = path [ 0 ] ;
s = strchr ( path + 1 , sep ) + 1 ;
return ! strncasecmp ( s , " sysvol " , strlen ( " sysvol " ) ) | |
! strncasecmp ( s , " netlogon " , strlen ( " netlogon " ) ) ;
}
/* Return target hint of a DFS cache entry */
2019-12-04 23:37:58 +03:00
static inline char * get_tgt_name ( const struct cache_entry * ce )
2018-11-14 21:01:21 +03:00
{
2019-12-04 23:37:58 +03:00
struct cache_dfs_tgt * t = ce - > tgthint ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:37:58 +03:00
return t ? t - > name : ERR_PTR ( - ENOENT ) ;
2018-11-14 21:01:21 +03:00
}
/* Return expire time out of a new entry's TTL */
static inline struct timespec64 get_expire_time ( int ttl )
{
struct timespec64 ts = {
. tv_sec = ttl ,
. tv_nsec = 0 ,
} ;
2018-12-17 12:11:46 +03:00
struct timespec64 now ;
2018-11-14 21:01:21 +03:00
2018-12-17 12:11:46 +03:00
ktime_get_coarse_real_ts64 ( & now ) ;
return timespec64_add ( now , ts ) ;
2018-11-14 21:01:21 +03:00
}
/* Allocate a new DFS target */
2020-07-21 15:36:42 +03:00
static struct cache_dfs_tgt * alloc_target ( const char * name , int path_consumed )
2018-11-14 21:01:21 +03:00
{
2019-12-04 23:37:58 +03:00
struct cache_dfs_tgt * t ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
t = kmalloc ( sizeof ( * t ) , GFP_ATOMIC ) ;
2018-11-14 21:01:21 +03:00
if ( ! t )
return ERR_PTR ( - ENOMEM ) ;
2019-12-04 23:38:03 +03:00
t - > name = kstrndup ( name , strlen ( name ) , GFP_ATOMIC ) ;
2019-12-04 23:37:58 +03:00
if ( ! t - > name ) {
2018-11-14 21:01:21 +03:00
kfree ( t ) ;
return ERR_PTR ( - ENOMEM ) ;
}
2020-07-21 15:36:42 +03:00
t - > path_consumed = path_consumed ;
2019-12-04 23:37:58 +03:00
INIT_LIST_HEAD ( & t - > list ) ;
2018-11-14 21:01:21 +03:00
return t ;
}
/*
* Copy DFS referral information to a cache entry and conditionally update
* target hint .
*/
static int copy_ref_data ( const struct dfs_info3_param * refs , int numrefs ,
2019-12-04 23:37:58 +03:00
struct cache_entry * ce , const char * tgthint )
2018-11-14 21:01:21 +03:00
{
int i ;
2019-12-04 23:37:58 +03:00
ce - > ttl = refs [ 0 ] . ttl ;
ce - > etime = get_expire_time ( ce - > ttl ) ;
ce - > srvtype = refs [ 0 ] . server_type ;
ce - > flags = refs [ 0 ] . ref_flag ;
ce - > path_consumed = refs [ 0 ] . path_consumed ;
2018-11-14 21:01:21 +03:00
for ( i = 0 ; i < numrefs ; i + + ) {
2019-12-04 23:37:58 +03:00
struct cache_dfs_tgt * t ;
2018-11-14 21:01:21 +03:00
2020-07-21 15:36:42 +03:00
t = alloc_target ( refs [ i ] . node_name , refs [ i ] . path_consumed ) ;
2018-11-14 21:01:21 +03:00
if ( IS_ERR ( t ) ) {
free_tgts ( ce ) ;
return PTR_ERR ( t ) ;
}
2019-12-04 23:37:58 +03:00
if ( tgthint & & ! strcasecmp ( t - > name , tgthint ) ) {
list_add ( & t - > list , & ce - > tlist ) ;
2018-11-14 21:01:21 +03:00
tgthint = NULL ;
} else {
2019-12-04 23:37:58 +03:00
list_add_tail ( & t - > list , & ce - > tlist ) ;
2018-11-14 21:01:21 +03:00
}
2019-12-04 23:37:58 +03:00
ce - > numtgts + + ;
2018-11-14 21:01:21 +03:00
}
2019-12-04 23:37:58 +03:00
ce - > tgthint = list_first_entry_or_null ( & ce - > tlist ,
struct cache_dfs_tgt , list ) ;
2018-11-14 21:01:21 +03:00
return 0 ;
}
/* Allocate a new cache entry */
2019-12-04 23:37:58 +03:00
static struct cache_entry * alloc_cache_entry ( const char * path ,
const struct dfs_info3_param * refs ,
int numrefs )
2018-11-14 21:01:21 +03:00
{
2019-12-04 23:37:58 +03:00
struct cache_entry * ce ;
2018-11-14 21:01:21 +03:00
int rc ;
2019-12-04 23:37:58 +03:00
ce = kmem_cache_zalloc ( cache_slab , GFP_KERNEL ) ;
2018-11-14 21:01:21 +03:00
if ( ! ce )
return ERR_PTR ( - ENOMEM ) ;
2019-12-04 23:37:59 +03:00
ce - > path = kstrndup ( path , strlen ( path ) , GFP_KERNEL ) ;
2019-12-04 23:37:58 +03:00
if ( ! ce - > path ) {
kmem_cache_free ( cache_slab , ce ) ;
2018-11-14 21:01:21 +03:00
return ERR_PTR ( - ENOMEM ) ;
}
2019-12-04 23:37:58 +03:00
INIT_HLIST_NODE ( & ce - > hlist ) ;
INIT_LIST_HEAD ( & ce - > tlist ) ;
2018-11-14 21:01:21 +03:00
rc = copy_ref_data ( refs , numrefs , ce , NULL ) ;
if ( rc ) {
2019-12-04 23:37:59 +03:00
kfree ( ce - > path ) ;
2019-12-04 23:37:58 +03:00
kmem_cache_free ( cache_slab , ce ) ;
2018-11-14 21:01:21 +03:00
ce = ERR_PTR ( rc ) ;
}
return ce ;
}
2019-12-04 23:38:03 +03:00
/* Must be called with htable_rw_lock held */
2018-11-14 21:01:21 +03:00
static void remove_oldest_entry ( void )
{
2019-12-04 23:38:03 +03:00
int i ;
2019-12-04 23:37:58 +03:00
struct cache_entry * ce ;
struct cache_entry * to_del = NULL ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
for ( i = 0 ; i < CACHE_HTABLE_SIZE ; i + + ) {
struct hlist_head * l = & cache_htable [ i ] ;
hlist_for_each_entry ( ce , l , hlist ) {
if ( hlist_unhashed ( & ce - > hlist ) )
continue ;
if ( ! to_del | | timespec64_compare ( & ce - > etime ,
& to_del - > etime ) < 0 )
to_del = ce ;
}
2018-11-14 21:01:21 +03:00
}
2019-12-04 23:38:03 +03:00
2018-11-14 21:01:21 +03:00
if ( ! to_del ) {
2020-04-15 08:42:53 +03:00
cifs_dbg ( FYI , " %s: no entry to remove \n " , __func__ ) ;
2019-12-04 23:38:03 +03:00
return ;
2018-11-14 21:01:21 +03:00
}
2019-12-04 23:38:03 +03:00
2020-04-15 08:42:53 +03:00
cifs_dbg ( FYI , " %s: removing entry \n " , __func__ ) ;
2018-11-14 21:01:21 +03:00
dump_ce ( to_del ) ;
flush_cache_ent ( to_del ) ;
}
/* Add a new DFS cache entry */
2019-12-04 23:38:03 +03:00
static int add_cache_entry ( const char * path , unsigned int hash ,
struct dfs_info3_param * refs , int numrefs )
2018-11-14 21:01:21 +03:00
{
2019-12-04 23:37:58 +03:00
struct cache_entry * ce ;
2018-11-14 21:01:21 +03:00
ce = alloc_cache_entry ( path , refs , numrefs ) ;
if ( IS_ERR ( ce ) )
2019-12-04 23:38:03 +03:00
return PTR_ERR ( ce ) ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:02 +03:00
spin_lock ( & cache_ttl_lock ) ;
if ( ! cache_ttl ) {
2019-12-04 23:37:58 +03:00
cache_ttl = ce - > ttl ;
queue_delayed_work ( dfscache_wq , & refresh_task , cache_ttl * HZ ) ;
2018-11-14 21:01:21 +03:00
} else {
2019-12-04 23:37:58 +03:00
cache_ttl = min_t ( int , cache_ttl , ce - > ttl ) ;
mod_delayed_work ( dfscache_wq , & refresh_task , cache_ttl * HZ ) ;
2018-11-14 21:01:21 +03:00
}
2019-12-04 23:38:02 +03:00
spin_unlock ( & cache_ttl_lock ) ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
down_write ( & htable_rw_lock ) ;
hlist_add_head ( & ce - > hlist , & cache_htable [ hash ] ) ;
dump_ce ( ce ) ;
up_write ( & htable_rw_lock ) ;
return 0 ;
2018-11-14 21:01:21 +03:00
}
2020-07-21 15:36:39 +03:00
static struct cache_entry * __lookup_cache_entry ( const char * path )
2018-11-14 21:01:21 +03:00
{
2019-12-04 23:37:58 +03:00
struct cache_entry * ce ;
unsigned int h ;
2018-11-14 21:01:21 +03:00
bool found = false ;
2019-12-04 23:37:58 +03:00
h = cache_entry_hash ( path , strlen ( path ) ) ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
hlist_for_each_entry ( ce , & cache_htable [ h ] , hlist ) {
2019-12-04 23:37:58 +03:00
if ( ! strcasecmp ( path , ce - > path ) ) {
2018-11-14 21:01:21 +03:00
found = true ;
2019-12-04 23:37:58 +03:00
dump_ce ( ce ) ;
2018-11-14 21:01:21 +03:00
break ;
}
}
2019-12-04 23:37:58 +03:00
if ( ! found )
ce = ERR_PTR ( - ENOENT ) ;
2020-07-21 15:36:39 +03:00
return ce ;
}
/*
* Find a DFS cache entry in hash table and optionally check prefix path against
* @ path .
* Use whole path components in the match .
* Must be called with htable_rw_lock held .
*
* Return ERR_PTR ( - ENOENT ) if the entry is not found .
*/
static struct cache_entry * lookup_cache_entry ( const char * path , unsigned int * hash )
{
struct cache_entry * ce = ERR_PTR ( - ENOENT ) ;
unsigned int h ;
int cnt = 0 ;
char * npath ;
char * s , * e ;
char sep ;
npath = kstrndup ( path , strlen ( path ) , GFP_KERNEL ) ;
if ( ! npath )
return ERR_PTR ( - ENOMEM ) ;
s = npath ;
sep = * npath ;
while ( ( s = strchr ( s , sep ) ) & & + + cnt < 3 )
s + + ;
if ( cnt < 3 ) {
h = cache_entry_hash ( path , strlen ( path ) ) ;
ce = __lookup_cache_entry ( path ) ;
goto out ;
}
/*
* Handle paths that have more than two path components and are a complete prefix of the DFS
* referral request path ( @ path ) .
*
* See MS - DFSC 3.2 .5 .5 " Receiving a Root Referral Request or Link Referral Request " .
*/
h = cache_entry_hash ( npath , strlen ( npath ) ) ;
e = npath + strlen ( npath ) - 1 ;
while ( e > s ) {
char tmp ;
/* skip separators */
while ( e > s & & * e = = sep )
e - - ;
if ( e = = s )
goto out ;
tmp = * ( e + 1 ) ;
* ( e + 1 ) = 0 ;
ce = __lookup_cache_entry ( npath ) ;
if ( ! IS_ERR ( ce ) ) {
h = cache_entry_hash ( npath , strlen ( npath ) ) ;
break ;
}
* ( e + 1 ) = tmp ;
/* backward until separator */
while ( e > s & & * e ! = sep )
e - - ;
}
out :
2019-12-04 23:37:58 +03:00
if ( hash )
* hash = h ;
2020-07-21 15:36:39 +03:00
kfree ( npath ) ;
2019-12-04 23:37:58 +03:00
return ce ;
2018-11-14 21:01:21 +03:00
}
2019-12-04 23:38:02 +03:00
static void __vol_release ( struct vol_info * vi )
2018-11-14 21:01:21 +03:00
{
2019-12-04 23:37:58 +03:00
kfree ( vi - > fullpath ) ;
kfree ( vi - > mntdata ) ;
cifs_cleanup_volume_info_contents ( & vi - > smb_vol ) ;
2018-11-14 21:01:21 +03:00
kfree ( vi ) ;
}
2019-12-04 23:38:02 +03:00
static void vol_release ( struct kref * kref )
{
struct vol_info * vi = container_of ( kref , struct vol_info , refcnt ) ;
spin_lock ( & vol_list_lock ) ;
list_del ( & vi - > list ) ;
spin_unlock ( & vol_list_lock ) ;
__vol_release ( vi ) ;
}
2018-11-14 21:01:21 +03:00
static inline void free_vol_list ( void )
{
2019-12-04 23:37:58 +03:00
struct vol_info * vi , * nvi ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:02 +03:00
list_for_each_entry_safe ( vi , nvi , & vol_list , list ) {
list_del_init ( & vi - > list ) ;
__vol_release ( vi ) ;
}
2018-11-14 21:01:21 +03:00
}
/**
* dfs_cache_destroy - destroy DFS referral cache
*/
void dfs_cache_destroy ( void )
{
2019-12-04 23:37:58 +03:00
cancel_delayed_work_sync ( & refresh_task ) ;
unload_nls ( cache_nlsc ) ;
2018-11-14 21:01:21 +03:00
free_vol_list ( ) ;
flush_cache_ents ( ) ;
2019-12-04 23:38:03 +03:00
kmem_cache_destroy ( cache_slab ) ;
2019-12-04 23:37:58 +03:00
destroy_workqueue ( dfscache_wq ) ;
2018-11-14 21:01:21 +03:00
cifs_dbg ( FYI , " %s: destroyed DFS referral cache \n " , __func__ ) ;
}
2019-12-04 23:38:03 +03:00
/* Must be called with htable_rw_lock held */
static int __update_cache_entry ( const char * path ,
const struct dfs_info3_param * refs ,
int numrefs )
2018-11-14 21:01:21 +03:00
{
int rc ;
2019-12-04 23:37:58 +03:00
struct cache_entry * ce ;
2018-11-14 21:01:21 +03:00
char * s , * th = NULL ;
2019-12-04 23:38:03 +03:00
ce = lookup_cache_entry ( path , NULL ) ;
2018-11-14 21:01:21 +03:00
if ( IS_ERR ( ce ) )
2019-12-04 23:38:03 +03:00
return PTR_ERR ( ce ) ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:37:58 +03:00
if ( ce - > tgthint ) {
s = ce - > tgthint - > name ;
2019-12-04 23:38:03 +03:00
th = kstrndup ( s , strlen ( s ) , GFP_ATOMIC ) ;
2018-11-14 21:01:21 +03:00
if ( ! th )
2019-12-04 23:38:03 +03:00
return - ENOMEM ;
2018-11-14 21:01:21 +03:00
}
free_tgts ( ce ) ;
2019-12-04 23:37:58 +03:00
ce - > numtgts = 0 ;
2018-11-14 21:01:21 +03:00
rc = copy_ref_data ( refs , numrefs , ce , th ) ;
2019-12-04 23:38:03 +03:00
kfree ( th ) ;
2018-11-14 21:01:21 +03:00
2020-01-17 05:21:56 +03:00
return rc ;
2018-11-14 21:01:21 +03:00
}
2019-12-04 23:38:03 +03:00
static int get_dfs_referral ( const unsigned int xid , struct cifs_ses * ses ,
const struct nls_table * nls_codepage , int remap ,
const char * path , struct dfs_info3_param * * refs ,
int * numrefs )
2018-11-14 21:01:21 +03:00
{
2019-12-04 23:38:03 +03:00
cifs_dbg ( FYI , " %s: get an DFS referral for %s \n " , __func__ , path ) ;
2018-11-14 21:01:21 +03:00
if ( ! ses | | ! ses - > server | | ! ses - > server - > ops - > get_dfs_refer )
2019-12-04 23:38:03 +03:00
return - EOPNOTSUPP ;
2018-11-14 21:01:21 +03:00
if ( unlikely ( ! nls_codepage ) )
2019-12-04 23:38:03 +03:00
return - EINVAL ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
* refs = NULL ;
* numrefs = 0 ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
return ses - > server - > ops - > get_dfs_refer ( xid , ses , path , refs , numrefs ,
nls_codepage , remap ) ;
}
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
/* Update an expired cache entry by getting a new DFS referral from server */
static int update_cache_entry ( const char * path ,
const struct dfs_info3_param * refs ,
int numrefs )
{
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
int rc ;
down_write ( & htable_rw_lock ) ;
rc = __update_cache_entry ( path , refs , numrefs ) ;
up_write ( & htable_rw_lock ) ;
return rc ;
2018-11-14 21:01:21 +03:00
}
/*
* Find , create or update a DFS cache entry .
*
* If the entry wasn ' t found , it will create a new one . Or if it was found but
* expired , then it will update the entry accordingly .
*
* For interlinks , __cifs_dfs_mount ( ) and expand_dfs_referral ( ) are supposed to
* handle them properly .
*/
2019-12-04 23:38:03 +03:00
static int __dfs_cache_find ( const unsigned int xid , struct cifs_ses * ses ,
const struct nls_table * nls_codepage , int remap ,
const char * path , bool noreq )
2018-11-14 21:01:21 +03:00
{
int rc ;
2019-12-04 23:38:03 +03:00
unsigned int hash ;
2019-12-04 23:37:58 +03:00
struct cache_entry * ce ;
2019-12-04 23:38:03 +03:00
struct dfs_info3_param * refs = NULL ;
int numrefs = 0 ;
bool newent = false ;
2018-11-14 21:01:21 +03:00
cifs_dbg ( FYI , " %s: search path: %s \n " , __func__ , path ) ;
2019-12-04 23:38:03 +03:00
down_read ( & htable_rw_lock ) ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
ce = lookup_cache_entry ( path , & hash ) ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
/*
* If @ noreq is set , no requests will be sent to the server . Just return
* the cache entry .
*/
if ( noreq ) {
up_read ( & htable_rw_lock ) ;
2020-01-22 13:20:30 +03:00
return PTR_ERR_OR_ZERO ( ce ) ;
2019-12-04 23:38:03 +03:00
}
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
if ( ! IS_ERR ( ce ) ) {
if ( ! cache_entry_expired ( ce ) ) {
dump_ce ( ce ) ;
up_read ( & htable_rw_lock ) ;
return 0 ;
2018-11-14 21:01:21 +03:00
}
2019-12-04 23:38:03 +03:00
} else {
newent = true ;
}
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
up_read ( & htable_rw_lock ) ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
/*
* No entry was found .
*
* Request a new DFS referral in order to create a new cache entry , or
* updating an existing one .
*/
rc = get_dfs_referral ( xid , ses , nls_codepage , remap , path ,
& refs , & numrefs ) ;
if ( rc )
return rc ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
dump_refs ( refs , numrefs ) ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
if ( ! newent ) {
rc = update_cache_entry ( path , refs , numrefs ) ;
goto out_free_refs ;
}
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
if ( atomic_read ( & cache_count ) > = CACHE_MAX_ENTRIES ) {
2020-04-15 08:42:53 +03:00
cifs_dbg ( FYI , " %s: reached max cache size (%d) \n " ,
__func__ , CACHE_MAX_ENTRIES ) ;
2019-12-04 23:38:03 +03:00
down_write ( & htable_rw_lock ) ;
remove_oldest_entry ( ) ;
up_write ( & htable_rw_lock ) ;
2018-11-14 21:01:21 +03:00
}
2019-12-04 23:38:03 +03:00
rc = add_cache_entry ( path , hash , refs , numrefs ) ;
if ( ! rc )
atomic_inc ( & cache_count ) ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
out_free_refs :
free_dfs_info_array ( refs , numrefs ) ;
return rc ;
2018-11-14 21:01:21 +03:00
}
2019-12-04 23:38:03 +03:00
/*
* Set up a DFS referral from a given cache entry .
*
* Must be called with htable_rw_lock held .
*/
static int setup_referral ( const char * path , struct cache_entry * ce ,
struct dfs_info3_param * ref , const char * target )
2018-11-14 21:01:21 +03:00
{
int rc ;
cifs_dbg ( FYI , " %s: set up new ref \n " , __func__ ) ;
memset ( ref , 0 , sizeof ( * ref ) ) ;
2019-12-04 23:38:03 +03:00
ref - > path_name = kstrndup ( path , strlen ( path ) , GFP_ATOMIC ) ;
2018-11-14 21:01:21 +03:00
if ( ! ref - > path_name )
return - ENOMEM ;
2019-12-04 23:38:03 +03:00
ref - > node_name = kstrndup ( target , strlen ( target ) , GFP_ATOMIC ) ;
2018-11-14 21:01:21 +03:00
if ( ! ref - > node_name ) {
rc = - ENOMEM ;
goto err_free_path ;
}
2019-12-04 23:38:03 +03:00
ref - > path_consumed = ce - > path_consumed ;
2019-12-04 23:37:58 +03:00
ref - > ttl = ce - > ttl ;
ref - > server_type = ce - > srvtype ;
ref - > ref_flag = ce - > flags ;
2018-11-14 21:01:21 +03:00
return 0 ;
err_free_path :
kfree ( ref - > path_name ) ;
ref - > path_name = NULL ;
return rc ;
}
/* Return target list of a DFS cache entry */
2019-12-04 23:38:03 +03:00
static int get_targets ( struct cache_entry * ce , struct dfs_cache_tgt_list * tl )
2018-11-14 21:01:21 +03:00
{
int rc ;
struct list_head * head = & tl - > tl_list ;
2019-12-04 23:37:58 +03:00
struct cache_dfs_tgt * t ;
2018-11-14 21:01:21 +03:00
struct dfs_cache_tgt_iterator * it , * nit ;
memset ( tl , 0 , sizeof ( * tl ) ) ;
INIT_LIST_HEAD ( head ) ;
2019-12-04 23:37:58 +03:00
list_for_each_entry ( t , & ce - > tlist , list ) {
2019-12-04 23:38:03 +03:00
it = kzalloc ( sizeof ( * it ) , GFP_ATOMIC ) ;
2018-11-14 21:01:21 +03:00
if ( ! it ) {
rc = - ENOMEM ;
goto err_free_it ;
}
2019-12-04 23:38:03 +03:00
it - > it_name = kstrndup ( t - > name , strlen ( t - > name ) , GFP_ATOMIC ) ;
2018-11-14 21:01:21 +03:00
if ( ! it - > it_name ) {
2019-01-05 21:18:03 +03:00
kfree ( it ) ;
2018-11-14 21:01:21 +03:00
rc = - ENOMEM ;
goto err_free_it ;
}
2020-07-21 15:36:42 +03:00
it - > it_path_consumed = t - > path_consumed ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:37:58 +03:00
if ( ce - > tgthint = = t )
2018-11-14 21:01:21 +03:00
list_add ( & it - > it_list , head ) ;
else
list_add_tail ( & it - > it_list , head ) ;
}
2019-12-04 23:38:03 +03:00
2019-12-04 23:37:58 +03:00
tl - > tl_numtgts = ce - > numtgts ;
2018-11-14 21:01:21 +03:00
return 0 ;
err_free_it :
list_for_each_entry_safe ( it , nit , head , it_list ) {
kfree ( it - > it_name ) ;
kfree ( it ) ;
}
return rc ;
}
/**
* dfs_cache_find - find a DFS cache entry
*
* If it doesn ' t find the cache entry , then it will get a DFS referral
* for @ path and create a new entry .
*
* In case the cache entry exists but expired , it will get a DFS referral
* for @ path and then update the respective cache entry .
*
* These parameters are passed down to the get_dfs_refer ( ) call if it
* needs to be issued :
* @ xid : syscall xid
* @ ses : smb session to issue the request on
* @ nls_codepage : charset conversion
* @ remap : path character remapping type
* @ path : path to lookup in DFS referral cache .
*
* @ ref : when non - NULL , store single DFS referral result in it .
* @ tgt_list : when non - NULL , store complete DFS target list in it .
*
* Return zero if the target was found , otherwise non - zero .
*/
int dfs_cache_find ( const unsigned int xid , struct cifs_ses * ses ,
const struct nls_table * nls_codepage , int remap ,
const char * path , struct dfs_info3_param * ref ,
struct dfs_cache_tgt_list * tgt_list )
{
int rc ;
char * npath ;
2019-12-04 23:37:58 +03:00
struct cache_entry * ce ;
2018-11-14 21:01:21 +03:00
rc = get_normalized_path ( path , & npath ) ;
if ( rc )
return rc ;
2019-12-04 23:38:03 +03:00
rc = __dfs_cache_find ( xid , ses , nls_codepage , remap , npath , false ) ;
if ( rc )
goto out_free_path ;
down_read ( & htable_rw_lock ) ;
ce = lookup_cache_entry ( npath , NULL ) ;
if ( IS_ERR ( ce ) ) {
up_read ( & htable_rw_lock ) ;
2018-11-14 21:01:21 +03:00
rc = PTR_ERR ( ce ) ;
2019-12-04 23:38:03 +03:00
goto out_free_path ;
2018-11-14 21:01:21 +03:00
}
2019-12-04 23:38:03 +03:00
if ( ref )
rc = setup_referral ( path , ce , ref , get_tgt_name ( ce ) ) ;
else
rc = 0 ;
if ( ! rc & & tgt_list )
rc = get_targets ( ce , tgt_list ) ;
up_read ( & htable_rw_lock ) ;
out_free_path :
2018-11-14 21:01:21 +03:00
free_normalized_path ( path , npath ) ;
return rc ;
}
/**
* dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
* the currently connected server .
*
* NOTE : This function will neither update a cache entry in case it was
* expired , nor create a new cache entry if @ path hasn ' t been found . It heavily
* relies on an existing cache entry .
*
* @ path : path to lookup in the DFS referral cache .
* @ ref : when non - NULL , store single DFS referral result in it .
* @ tgt_list : when non - NULL , store complete DFS target list in it .
*
* Return 0 if successful .
* Return - ENOENT if the entry was not found .
* Return non - zero for other errors .
*/
int dfs_cache_noreq_find ( const char * path , struct dfs_info3_param * ref ,
struct dfs_cache_tgt_list * tgt_list )
{
int rc ;
char * npath ;
2019-12-04 23:37:58 +03:00
struct cache_entry * ce ;
2018-11-14 21:01:21 +03:00
rc = get_normalized_path ( path , & npath ) ;
if ( rc )
return rc ;
2019-12-04 23:38:03 +03:00
cifs_dbg ( FYI , " %s: path: %s \n " , __func__ , npath ) ;
down_read ( & htable_rw_lock ) ;
ce = lookup_cache_entry ( npath , NULL ) ;
2018-11-14 21:01:21 +03:00
if ( IS_ERR ( ce ) ) {
rc = PTR_ERR ( ce ) ;
2019-12-04 23:38:03 +03:00
goto out_unlock ;
2018-11-14 21:01:21 +03:00
}
if ( ref )
2019-12-04 23:38:03 +03:00
rc = setup_referral ( path , ce , ref , get_tgt_name ( ce ) ) ;
2018-11-14 21:01:21 +03:00
else
rc = 0 ;
if ( ! rc & & tgt_list )
2019-12-04 23:38:03 +03:00
rc = get_targets ( ce , tgt_list ) ;
out_unlock :
up_read ( & htable_rw_lock ) ;
2018-11-14 21:01:21 +03:00
free_normalized_path ( path , npath ) ;
2019-12-04 23:38:03 +03:00
2018-11-14 21:01:21 +03:00
return rc ;
}
/**
* dfs_cache_update_tgthint - update target hint of a DFS cache entry
*
* If it doesn ' t find the cache entry , then it will get a DFS referral for @ path
* and create a new entry .
*
* In case the cache entry exists but expired , it will get a DFS referral
* for @ path and then update the respective cache entry .
*
* @ xid : syscall id
* @ ses : smb session
* @ nls_codepage : charset conversion
* @ remap : type of character remapping for paths
* @ path : path to lookup in DFS referral cache .
* @ it : DFS target iterator
*
* Return zero if the target hint was updated successfully , otherwise non - zero .
*/
int dfs_cache_update_tgthint ( const unsigned int xid , struct cifs_ses * ses ,
const struct nls_table * nls_codepage , int remap ,
const char * path ,
const struct dfs_cache_tgt_iterator * it )
{
int rc ;
char * npath ;
2019-12-04 23:37:58 +03:00
struct cache_entry * ce ;
struct cache_dfs_tgt * t ;
2018-11-14 21:01:21 +03:00
rc = get_normalized_path ( path , & npath ) ;
if ( rc )
return rc ;
2019-12-04 23:38:03 +03:00
cifs_dbg ( FYI , " %s: update target hint - path: %s \n " , __func__ , npath ) ;
rc = __dfs_cache_find ( xid , ses , nls_codepage , remap , npath , false ) ;
if ( rc )
goto out_free_path ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
down_write ( & htable_rw_lock ) ;
ce = lookup_cache_entry ( npath , NULL ) ;
2018-11-14 21:01:21 +03:00
if ( IS_ERR ( ce ) ) {
rc = PTR_ERR ( ce ) ;
2019-12-04 23:38:03 +03:00
goto out_unlock ;
2018-11-14 21:01:21 +03:00
}
2019-12-04 23:37:58 +03:00
t = ce - > tgthint ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:37:58 +03:00
if ( likely ( ! strcasecmp ( it - > it_name , t - > name ) ) )
2019-12-04 23:38:03 +03:00
goto out_unlock ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:37:58 +03:00
list_for_each_entry ( t , & ce - > tlist , list ) {
if ( ! strcasecmp ( t - > name , it - > it_name ) ) {
ce - > tgthint = t ;
2018-11-14 21:01:21 +03:00
cifs_dbg ( FYI , " %s: new target hint: %s \n " , __func__ ,
it - > it_name ) ;
break ;
}
}
2019-12-04 23:38:03 +03:00
out_unlock :
up_write ( & htable_rw_lock ) ;
out_free_path :
2018-11-14 21:01:21 +03:00
free_normalized_path ( path , npath ) ;
2019-12-04 23:38:03 +03:00
2018-11-14 21:01:21 +03:00
return rc ;
}
/**
* dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
* without sending any requests to the currently connected server .
*
* NOTE : This function will neither update a cache entry in case it was
* expired , nor create a new cache entry if @ path hasn ' t been found . It heavily
* relies on an existing cache entry .
*
* @ path : path to lookup in DFS referral cache .
* @ it : target iterator which contains the target hint to update the cache
* entry with .
*
* Return zero if the target hint was updated successfully , otherwise non - zero .
*/
int dfs_cache_noreq_update_tgthint ( const char * path ,
const struct dfs_cache_tgt_iterator * it )
{
int rc ;
char * npath ;
2019-12-04 23:37:58 +03:00
struct cache_entry * ce ;
struct cache_dfs_tgt * t ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:01 +03:00
if ( ! it )
2018-11-14 21:01:21 +03:00
return - EINVAL ;
rc = get_normalized_path ( path , & npath ) ;
if ( rc )
return rc ;
cifs_dbg ( FYI , " %s: path: %s \n " , __func__ , npath ) ;
2019-12-04 23:38:03 +03:00
down_write ( & htable_rw_lock ) ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
ce = lookup_cache_entry ( npath , NULL ) ;
2018-11-14 21:01:21 +03:00
if ( IS_ERR ( ce ) ) {
rc = PTR_ERR ( ce ) ;
2019-12-04 23:38:03 +03:00
goto out_unlock ;
2018-11-14 21:01:21 +03:00
}
rc = 0 ;
2019-12-04 23:37:58 +03:00
t = ce - > tgthint ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:37:58 +03:00
if ( unlikely ( ! strcasecmp ( it - > it_name , t - > name ) ) )
2019-12-04 23:38:03 +03:00
goto out_unlock ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:37:58 +03:00
list_for_each_entry ( t , & ce - > tlist , list ) {
if ( ! strcasecmp ( t - > name , it - > it_name ) ) {
ce - > tgthint = t ;
2018-11-14 21:01:21 +03:00
cifs_dbg ( FYI , " %s: new target hint: %s \n " , __func__ ,
it - > it_name ) ;
break ;
}
}
2019-12-04 23:38:03 +03:00
out_unlock :
up_write ( & htable_rw_lock ) ;
2018-11-14 21:01:21 +03:00
free_normalized_path ( path , npath ) ;
2019-12-04 23:38:03 +03:00
2018-11-14 21:01:21 +03:00
return rc ;
}
/**
* dfs_cache_get_tgt_referral - returns a DFS referral ( @ ref ) from a given
* target iterator ( @ it ) .
*
* @ path : path to lookup in DFS referral cache .
* @ it : DFS target iterator .
* @ ref : DFS referral pointer to set up the gathered information .
*
* Return zero if the DFS referral was set up correctly , otherwise non - zero .
*/
int dfs_cache_get_tgt_referral ( const char * path ,
const struct dfs_cache_tgt_iterator * it ,
struct dfs_info3_param * ref )
{
int rc ;
char * npath ;
2019-12-04 23:37:58 +03:00
struct cache_entry * ce ;
2018-11-14 21:01:21 +03:00
if ( ! it | | ! ref )
return - EINVAL ;
rc = get_normalized_path ( path , & npath ) ;
if ( rc )
return rc ;
cifs_dbg ( FYI , " %s: path: %s \n " , __func__ , npath ) ;
2019-12-04 23:38:03 +03:00
down_read ( & htable_rw_lock ) ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
ce = lookup_cache_entry ( npath , NULL ) ;
2018-11-14 21:01:21 +03:00
if ( IS_ERR ( ce ) ) {
rc = PTR_ERR ( ce ) ;
2019-12-04 23:38:03 +03:00
goto out_unlock ;
2018-11-14 21:01:21 +03:00
}
cifs_dbg ( FYI , " %s: target name: %s \n " , __func__ , it - > it_name ) ;
2019-12-04 23:38:03 +03:00
rc = setup_referral ( path , ce , ref , it - > it_name ) ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
out_unlock :
up_read ( & htable_rw_lock ) ;
2018-11-14 21:01:21 +03:00
free_normalized_path ( path , npath ) ;
2019-12-04 23:38:03 +03:00
2018-11-14 21:01:21 +03:00
return rc ;
}
static int dup_vol ( struct smb_vol * vol , struct smb_vol * new )
{
memcpy ( new , vol , sizeof ( * new ) ) ;
if ( vol - > username ) {
new - > username = kstrndup ( vol - > username , strlen ( vol - > username ) ,
2019-12-04 23:37:58 +03:00
GFP_KERNEL ) ;
2018-11-14 21:01:21 +03:00
if ( ! new - > username )
return - ENOMEM ;
}
if ( vol - > password ) {
new - > password = kstrndup ( vol - > password , strlen ( vol - > password ) ,
GFP_KERNEL ) ;
if ( ! new - > password )
goto err_free_username ;
}
if ( vol - > UNC ) {
cifs_dbg ( FYI , " %s: vol->UNC: %s \n " , __func__ , vol - > UNC ) ;
new - > UNC = kstrndup ( vol - > UNC , strlen ( vol - > UNC ) , GFP_KERNEL ) ;
if ( ! new - > UNC )
goto err_free_password ;
}
if ( vol - > domainname ) {
new - > domainname = kstrndup ( vol - > domainname ,
2019-12-04 23:37:58 +03:00
strlen ( vol - > domainname ) , GFP_KERNEL ) ;
2018-11-14 21:01:21 +03:00
if ( ! new - > domainname )
goto err_free_unc ;
}
if ( vol - > iocharset ) {
new - > iocharset = kstrndup ( vol - > iocharset ,
strlen ( vol - > iocharset ) , GFP_KERNEL ) ;
if ( ! new - > iocharset )
goto err_free_domainname ;
}
if ( vol - > prepath ) {
cifs_dbg ( FYI , " %s: vol->prepath: %s \n " , __func__ , vol - > prepath ) ;
new - > prepath = kstrndup ( vol - > prepath , strlen ( vol - > prepath ) ,
GFP_KERNEL ) ;
if ( ! new - > prepath )
goto err_free_iocharset ;
}
return 0 ;
err_free_iocharset :
kfree ( new - > iocharset ) ;
err_free_domainname :
kfree ( new - > domainname ) ;
err_free_unc :
kfree ( new - > UNC ) ;
err_free_password :
2020-08-07 09:18:13 +03:00
kfree_sensitive ( new - > password ) ;
2018-11-14 21:01:21 +03:00
err_free_username :
kfree ( new - > username ) ;
kfree ( new ) ;
return - ENOMEM ;
}
/**
* dfs_cache_add_vol - add a cifs volume during mount ( ) that will be handled by
* DFS cache refresh worker .
*
2019-03-19 22:54:29 +03:00
* @ mntdata : mount data .
2018-11-14 21:01:21 +03:00
* @ vol : cifs volume .
* @ fullpath : origin full path .
*
* Return zero if volume was set up correctly , otherwise non - zero .
*/
2019-03-19 22:54:29 +03:00
int dfs_cache_add_vol ( char * mntdata , struct smb_vol * vol , const char * fullpath )
2018-11-14 21:01:21 +03:00
{
int rc ;
2019-12-04 23:37:58 +03:00
struct vol_info * vi ;
2018-11-14 21:01:21 +03:00
2019-03-19 22:54:29 +03:00
if ( ! vol | | ! fullpath | | ! mntdata )
2018-11-14 21:01:21 +03:00
return - EINVAL ;
cifs_dbg ( FYI , " %s: fullpath: %s \n " , __func__ , fullpath ) ;
vi = kzalloc ( sizeof ( * vi ) , GFP_KERNEL ) ;
if ( ! vi )
return - ENOMEM ;
2019-12-04 23:37:58 +03:00
vi - > fullpath = kstrndup ( fullpath , strlen ( fullpath ) , GFP_KERNEL ) ;
if ( ! vi - > fullpath ) {
2018-11-14 21:01:21 +03:00
rc = - ENOMEM ;
goto err_free_vi ;
}
2019-12-04 23:37:58 +03:00
rc = dup_vol ( vol , & vi - > smb_vol ) ;
2018-11-14 21:01:21 +03:00
if ( rc )
goto err_free_fullpath ;
2019-12-04 23:37:58 +03:00
vi - > mntdata = mntdata ;
2019-12-04 23:38:02 +03:00
spin_lock_init ( & vi - > smb_vol_lock ) ;
kref_init ( & vi - > refcnt ) ;
2019-03-19 22:54:29 +03:00
2019-12-04 23:38:02 +03:00
spin_lock ( & vol_list_lock ) ;
2019-12-04 23:37:58 +03:00
list_add_tail ( & vi - > list , & vol_list ) ;
2019-12-04 23:38:02 +03:00
spin_unlock ( & vol_list_lock ) ;
2018-11-14 21:01:21 +03:00
return 0 ;
err_free_fullpath :
2019-12-04 23:37:58 +03:00
kfree ( vi - > fullpath ) ;
2018-11-14 21:01:21 +03:00
err_free_vi :
kfree ( vi ) ;
return rc ;
}
2019-12-04 23:38:02 +03:00
/* Must be called with vol_list_lock held */
static struct vol_info * find_vol ( const char * fullpath )
2018-11-14 21:01:21 +03:00
{
2019-12-04 23:37:58 +03:00
struct vol_info * vi ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:37:58 +03:00
list_for_each_entry ( vi , & vol_list , list ) {
cifs_dbg ( FYI , " %s: vi->fullpath: %s \n " , __func__ , vi - > fullpath ) ;
if ( ! strcasecmp ( vi - > fullpath , fullpath ) )
2018-11-14 21:01:21 +03:00
return vi ;
}
return ERR_PTR ( - ENOENT ) ;
}
/**
* dfs_cache_update_vol - update vol info in DFS cache after failover
*
* @ fullpath : fullpath to look up in volume list .
* @ server : TCP ses pointer .
*
* Return zero if volume was updated , otherwise non - zero .
*/
int dfs_cache_update_vol ( const char * fullpath , struct TCP_Server_Info * server )
{
2019-12-04 23:37:58 +03:00
struct vol_info * vi ;
2018-11-14 21:01:21 +03:00
if ( ! fullpath | | ! server )
return - EINVAL ;
cifs_dbg ( FYI , " %s: fullpath: %s \n " , __func__ , fullpath ) ;
2019-12-04 23:38:02 +03:00
spin_lock ( & vol_list_lock ) ;
2018-11-14 21:01:21 +03:00
vi = find_vol ( fullpath ) ;
if ( IS_ERR ( vi ) ) {
2019-12-04 23:38:02 +03:00
spin_unlock ( & vol_list_lock ) ;
return PTR_ERR ( vi ) ;
2018-11-14 21:01:21 +03:00
}
2019-12-04 23:38:02 +03:00
kref_get ( & vi - > refcnt ) ;
spin_unlock ( & vol_list_lock ) ;
2018-11-14 21:01:21 +03:00
cifs_dbg ( FYI , " %s: updating volume info \n " , __func__ ) ;
2019-12-04 23:38:02 +03:00
spin_lock ( & vi - > smb_vol_lock ) ;
2019-12-04 23:37:58 +03:00
memcpy ( & vi - > smb_vol . dstaddr , & server - > dstaddr ,
sizeof ( vi - > smb_vol . dstaddr ) ) ;
2019-12-04 23:38:02 +03:00
spin_unlock ( & vi - > smb_vol_lock ) ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:02 +03:00
kref_put ( & vi - > refcnt , vol_release ) ;
return 0 ;
2018-11-14 21:01:21 +03:00
}
/**
* dfs_cache_del_vol - remove volume info in DFS cache during umount ( )
*
* @ fullpath : fullpath to look up in volume list .
*/
void dfs_cache_del_vol ( const char * fullpath )
{
2019-12-04 23:37:58 +03:00
struct vol_info * vi ;
2018-11-14 21:01:21 +03:00
if ( ! fullpath | | ! * fullpath )
return ;
cifs_dbg ( FYI , " %s: fullpath: %s \n " , __func__ , fullpath ) ;
2019-12-04 23:38:02 +03:00
spin_lock ( & vol_list_lock ) ;
2018-11-14 21:01:21 +03:00
vi = find_vol ( fullpath ) ;
2019-12-04 23:38:02 +03:00
spin_unlock ( & vol_list_lock ) ;
kref_put ( & vi - > refcnt , vol_release ) ;
2018-11-14 21:01:21 +03:00
}
2020-02-21 01:49:34 +03:00
/**
* dfs_cache_get_tgt_share - parse a DFS target
*
2020-07-21 15:36:42 +03:00
* @ path : DFS full path
2020-02-21 01:49:34 +03:00
* @ it : DFS target iterator .
* @ share : tree name .
* @ prefix : prefix path .
*
* Return zero if target was parsed correctly , otherwise non - zero .
*/
2020-07-21 15:36:42 +03:00
int dfs_cache_get_tgt_share ( char * path , const struct dfs_cache_tgt_iterator * it ,
char * * share , char * * prefix )
2020-02-21 01:49:34 +03:00
{
2020-07-21 15:36:42 +03:00
char * s , sep , * p ;
size_t len ;
size_t plen1 , plen2 ;
2020-02-21 01:49:34 +03:00
2020-07-21 15:36:42 +03:00
if ( ! it | | ! path | | ! share | | ! prefix | | strlen ( path ) < it - > it_path_consumed )
2020-02-21 01:49:34 +03:00
return - EINVAL ;
2020-07-21 15:36:42 +03:00
* share = NULL ;
* prefix = NULL ;
2020-02-21 01:49:34 +03:00
sep = it - > it_name [ 0 ] ;
if ( sep ! = ' \\ ' & & sep ! = ' / ' )
return - EINVAL ;
s = strchr ( it - > it_name + 1 , sep ) ;
if ( ! s )
return - EINVAL ;
2020-07-21 15:36:42 +03:00
/* point to prefix in target node */
2020-02-21 01:49:34 +03:00
s = strchrnul ( s + 1 , sep ) ;
2020-07-21 15:36:42 +03:00
/* extract target share */
* share = kstrndup ( it - > it_name , s - it - > it_name , GFP_KERNEL ) ;
if ( ! * share )
return - ENOMEM ;
2020-02-21 01:49:34 +03:00
2020-07-21 15:36:42 +03:00
/* skip separator */
if ( * s )
s + + ;
/* point to prefix in DFS path */
p = path + it - > it_path_consumed ;
if ( * p = = sep )
p + + ;
/* merge prefix paths from DFS path and target node */
plen1 = it - > it_name + strlen ( it - > it_name ) - s ;
plen2 = path + strlen ( path ) - p ;
if ( plen1 | | plen2 ) {
len = plen1 + plen2 + 2 ;
* prefix = kmalloc ( len , GFP_KERNEL ) ;
if ( ! * prefix ) {
kfree ( * share ) ;
* share = NULL ;
return - ENOMEM ;
}
if ( plen1 )
scnprintf ( * prefix , len , " %.*s%c%.*s " , ( int ) plen1 , s , sep , ( int ) plen2 , p ) ;
else
strscpy ( * prefix , p , len ) ;
}
2020-02-21 01:49:34 +03:00
return 0 ;
}
2018-11-14 21:01:21 +03:00
/* Get all tcons that are within a DFS namespace and can be refreshed */
static void get_tcons ( struct TCP_Server_Info * server , struct list_head * head )
{
struct cifs_ses * ses ;
struct cifs_tcon * tcon ;
INIT_LIST_HEAD ( head ) ;
spin_lock ( & cifs_tcp_ses_lock ) ;
list_for_each_entry ( ses , & server - > smb_ses_list , smb_ses_list ) {
list_for_each_entry ( tcon , & ses - > tcon_list , tcon_list ) {
if ( ! tcon - > need_reconnect & & ! tcon - > need_reopen_files & &
tcon - > dfs_path ) {
tcon - > tc_count + + ;
list_add_tail ( & tcon - > ulist , head ) ;
}
}
if ( ses - > tcon_ipc & & ! ses - > tcon_ipc - > need_reconnect & &
ses - > tcon_ipc - > dfs_path ) {
list_add_tail ( & ses - > tcon_ipc - > ulist , head ) ;
}
}
spin_unlock ( & cifs_tcp_ses_lock ) ;
}
2019-12-04 23:37:58 +03:00
static bool is_dfs_link ( const char * path )
2019-03-19 22:54:29 +03:00
{
char * s ;
s = strchr ( path + 1 , ' \\ ' ) ;
if ( ! s )
return false ;
return ! ! strchr ( s + 1 , ' \\ ' ) ;
}
2019-12-04 23:37:58 +03:00
static char * get_dfs_root ( const char * path )
2019-03-19 22:54:29 +03:00
{
char * s , * npath ;
s = strchr ( path + 1 , ' \\ ' ) ;
if ( ! s )
return ERR_PTR ( - EINVAL ) ;
s = strchr ( s + 1 , ' \\ ' ) ;
if ( ! s )
return ERR_PTR ( - EINVAL ) ;
npath = kstrndup ( path , s - path , GFP_KERNEL ) ;
if ( ! npath )
return ERR_PTR ( - ENOMEM ) ;
return npath ;
}
2019-12-04 23:38:00 +03:00
static inline void put_tcp_server ( struct TCP_Server_Info * server )
{
cifs_put_tcp_session ( server , 0 ) ;
}
static struct TCP_Server_Info * get_tcp_server ( struct smb_vol * vol )
{
struct TCP_Server_Info * server ;
server = cifs_find_tcp_session ( vol ) ;
if ( IS_ERR_OR_NULL ( server ) )
return NULL ;
spin_lock ( & GlobalMid_Lock ) ;
if ( server - > tcpStatus ! = CifsGood ) {
spin_unlock ( & GlobalMid_Lock ) ;
put_tcp_server ( server ) ;
return NULL ;
}
spin_unlock ( & GlobalMid_Lock ) ;
return server ;
}
2019-03-19 22:54:29 +03:00
/* Find root SMB session out of a DFS link path */
2019-12-04 23:37:58 +03:00
static struct cifs_ses * find_root_ses ( struct vol_info * vi ,
struct cifs_tcon * tcon ,
const char * path )
2019-03-19 22:54:29 +03:00
{
char * rpath ;
int rc ;
2019-12-04 23:38:03 +03:00
struct cache_entry * ce ;
2019-03-19 22:54:29 +03:00
struct dfs_info3_param ref = { 0 } ;
char * mdata = NULL , * devname = NULL ;
struct TCP_Server_Info * server ;
struct cifs_ses * ses ;
2020-01-17 00:58:00 +03:00
struct smb_vol vol = { NULL } ;
2019-03-19 22:54:29 +03:00
rpath = get_dfs_root ( path ) ;
if ( IS_ERR ( rpath ) )
return ERR_CAST ( rpath ) ;
2019-12-04 23:38:03 +03:00
down_read ( & htable_rw_lock ) ;
ce = lookup_cache_entry ( rpath , NULL ) ;
if ( IS_ERR ( ce ) ) {
up_read ( & htable_rw_lock ) ;
ses = ERR_CAST ( ce ) ;
goto out ;
}
2019-03-19 22:54:29 +03:00
2019-12-04 23:38:03 +03:00
rc = setup_referral ( path , ce , & ref , get_tgt_name ( ce ) ) ;
2019-03-19 22:54:29 +03:00
if ( rc ) {
2019-12-04 23:38:03 +03:00
up_read ( & htable_rw_lock ) ;
2019-03-19 22:54:29 +03:00
ses = ERR_PTR ( rc ) ;
goto out ;
}
2019-12-04 23:38:03 +03:00
up_read ( & htable_rw_lock ) ;
mdata = cifs_compose_mount_options ( vi - > mntdata , rpath , & ref ,
& devname ) ;
2019-03-19 22:54:29 +03:00
free_dfs_info_param ( & ref ) ;
if ( IS_ERR ( mdata ) ) {
ses = ERR_CAST ( mdata ) ;
mdata = NULL ;
goto out ;
}
2019-11-22 18:30:52 +03:00
rc = cifs_setup_volume_info ( & vol , mdata , devname , false ) ;
2019-03-19 22:54:29 +03:00
kfree ( devname ) ;
if ( rc ) {
ses = ERR_PTR ( rc ) ;
goto out ;
}
2019-12-04 23:38:00 +03:00
server = get_tcp_server ( & vol ) ;
if ( ! server ) {
2019-03-19 22:54:29 +03:00
ses = ERR_PTR ( - EHOSTDOWN ) ;
goto out ;
}
ses = cifs_get_smb_ses ( server , & vol ) ;
out :
cifs_cleanup_volume_info_contents ( & vol ) ;
kfree ( mdata ) ;
kfree ( rpath ) ;
return ses ;
}
2018-11-14 21:01:21 +03:00
/* Refresh DFS cache entry from a given tcon */
2019-12-04 23:38:03 +03:00
static int refresh_tcon ( struct vol_info * vi , struct cifs_tcon * tcon )
2018-11-14 21:01:21 +03:00
{
int rc = 0 ;
unsigned int xid ;
char * path , * npath ;
2019-12-04 23:37:58 +03:00
struct cache_entry * ce ;
2019-12-04 23:38:03 +03:00
struct cifs_ses * root_ses = NULL , * ses ;
2018-11-14 21:01:21 +03:00
struct dfs_info3_param * refs = NULL ;
int numrefs = 0 ;
xid = get_xid ( ) ;
path = tcon - > dfs_path + 1 ;
rc = get_normalized_path ( path , & npath ) ;
if ( rc )
2019-12-04 23:38:03 +03:00
goto out_free_xid ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
down_read ( & htable_rw_lock ) ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:03 +03:00
ce = lookup_cache_entry ( npath , NULL ) ;
2018-11-14 21:01:21 +03:00
if ( IS_ERR ( ce ) ) {
rc = PTR_ERR ( ce ) ;
2019-12-04 23:38:03 +03:00
up_read ( & htable_rw_lock ) ;
goto out_free_path ;
2018-11-14 21:01:21 +03:00
}
2019-12-04 23:38:03 +03:00
if ( ! cache_entry_expired ( ce ) ) {
up_read ( & htable_rw_lock ) ;
goto out_free_path ;
}
up_read ( & htable_rw_lock ) ;
2018-11-14 21:01:21 +03:00
2019-03-19 22:54:29 +03:00
/* If it's a DFS Link, then use root SMB session for refreshing it */
if ( is_dfs_link ( npath ) ) {
ses = root_ses = find_root_ses ( vi , tcon , npath ) ;
if ( IS_ERR ( ses ) ) {
rc = PTR_ERR ( ses ) ;
root_ses = NULL ;
2019-12-04 23:38:03 +03:00
goto out_free_path ;
2019-03-19 22:54:29 +03:00
}
} else {
ses = tcon - > ses ;
}
2019-12-04 23:38:03 +03:00
rc = get_dfs_referral ( xid , ses , cache_nlsc , tcon - > remap , npath , & refs ,
& numrefs ) ;
if ( ! rc ) {
dump_refs ( refs , numrefs ) ;
rc = update_cache_entry ( npath , refs , numrefs ) ;
free_dfs_info_array ( refs , numrefs ) ;
2018-11-14 21:01:21 +03:00
}
2019-03-19 22:54:29 +03:00
if ( root_ses )
cifs_put_smb_ses ( root_ses ) ;
2019-12-04 23:38:03 +03:00
out_free_path :
2018-11-14 21:01:21 +03:00
free_normalized_path ( path , npath ) ;
2019-12-04 23:38:03 +03:00
out_free_xid :
free_xid ( xid ) ;
return rc ;
2018-11-14 21:01:21 +03:00
}
/*
* Worker that will refresh DFS cache based on lowest TTL value from a DFS
* referral .
*/
static void refresh_cache_worker ( struct work_struct * work )
{
2019-12-04 23:38:02 +03:00
struct vol_info * vi , * nvi ;
2018-11-14 21:01:21 +03:00
struct TCP_Server_Info * server ;
2019-12-04 23:38:02 +03:00
LIST_HEAD ( vols ) ;
LIST_HEAD ( tcons ) ;
2018-11-14 21:01:21 +03:00
struct cifs_tcon * tcon , * ntcon ;
2019-12-04 23:38:03 +03:00
int rc ;
2018-11-14 21:01:21 +03:00
2019-12-04 23:38:02 +03:00
/*
* Find SMB volumes that are eligible ( server - > tcpStatus = = CifsGood )
* for refreshing .
*/
spin_lock ( & vol_list_lock ) ;
2019-12-04 23:37:58 +03:00
list_for_each_entry ( vi , & vol_list , list ) {
2019-12-04 23:38:00 +03:00
server = get_tcp_server ( & vi - > smb_vol ) ;
if ( ! server )
2018-11-14 21:01:21 +03:00
continue ;
2019-12-04 23:38:00 +03:00
2019-12-04 23:38:02 +03:00
kref_get ( & vi - > refcnt ) ;
list_add_tail ( & vi - > rlist , & vols ) ;
put_tcp_server ( server ) ;
}
spin_unlock ( & vol_list_lock ) ;
/* Walk through all TCONs and refresh any expired cache entry */
list_for_each_entry_safe ( vi , nvi , & vols , rlist ) {
spin_lock ( & vi - > smb_vol_lock ) ;
server = get_tcp_server ( & vi - > smb_vol ) ;
spin_unlock ( & vi - > smb_vol_lock ) ;
if ( ! server )
goto next_vol ;
get_tcons ( server , & tcons ) ;
2019-12-04 23:38:03 +03:00
rc = 0 ;
2019-12-04 23:38:02 +03:00
list_for_each_entry_safe ( tcon , ntcon , & tcons , ulist ) {
2019-12-04 23:38:03 +03:00
/*
* Skip tcp server if any of its tcons failed to refresh
* ( possibily due to reconnects ) .
*/
if ( ! rc )
rc = refresh_tcon ( vi , tcon ) ;
2018-11-14 21:01:21 +03:00
list_del_init ( & tcon - > ulist ) ;
cifs_put_tcon ( tcon ) ;
}
2019-12-04 23:38:00 +03:00
put_tcp_server ( server ) ;
2019-12-04 23:38:02 +03:00
next_vol :
list_del_init ( & vi - > rlist ) ;
kref_put ( & vi - > refcnt , vol_release ) ;
2018-11-14 21:01:21 +03:00
}
2019-12-04 23:38:02 +03:00
spin_lock ( & cache_ttl_lock ) ;
2019-12-04 23:37:58 +03:00
queue_delayed_work ( dfscache_wq , & refresh_task , cache_ttl * HZ ) ;
2019-12-04 23:38:02 +03:00
spin_unlock ( & cache_ttl_lock ) ;
2018-11-14 21:01:21 +03:00
}