2018-11-14 16:01:21 -02:00
// SPDX-License-Identifier: GPL-2.0
/*
* DFS referral cache routines
*
2019-03-19 16:54:29 -03:00
* Copyright ( c ) 2018 - 2019 Paulo Alcantara < palcantara @ suse . de >
2018-11-14 16:01:21 -02:00
*/
# include <linux/jhash.h>
# include <linux/ktime.h>
# include <linux/slab.h>
2020-02-03 17:37:17 -08:00
# include <linux/proc_fs.h>
2018-11-14 16:01:21 -02:00
# include <linux/nls.h>
# include <linux/workqueue.h>
2021-06-04 19:25:29 -03:00
# include <linux/uuid.h>
2018-11-14 16:01:21 -02:00
# include "cifsglob.h"
# include "smb2pdu.h"
# include "smb2proto.h"
# include "cifsproto.h"
# include "cifs_debug.h"
# include "cifs_unicode.h"
# include "smb2glob.h"
2021-07-16 03:26:41 -03:00
# include "dns_resolve.h"
2018-11-14 16:01:21 -02:00
# include "dfs_cache.h"
2019-12-04 17:37:58 -03:00
# define CACHE_HTABLE_SIZE 32
# define CACHE_MAX_ENTRIES 64
2021-06-04 19:25:32 -03:00
# define CACHE_MIN_TTL 120 /* 2 minutes */
2018-11-14 16:01:21 -02:00
2021-06-14 12:58:20 -03:00
# define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
2018-11-14 16:01:21 -02:00
2019-12-04 17:37:58 -03:00
struct cache_dfs_tgt {
char * name ;
2020-07-21 09:36:42 -03:00
int path_consumed ;
2019-12-04 17:37:58 -03:00
struct list_head list ;
2018-11-14 16:01:21 -02:00
} ;
2019-12-04 17:37:58 -03:00
struct cache_entry {
struct hlist_node hlist ;
const char * path ;
2021-02-24 20:59:23 -03:00
int hdr_flags ; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
int ttl ; /* DFS_REREFERRAL_V3.TimeToLive */
int srvtype ; /* DFS_REREFERRAL_V3.ServerType */
int ref_flags ; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
2019-12-04 17:37:58 -03:00
struct timespec64 etime ;
2021-02-24 20:59:23 -03:00
int path_consumed ; /* RESP_GET_DFS_REFERRAL.PathConsumed */
2019-12-04 17:37:58 -03:00
int numtgts ;
struct list_head tlist ;
struct cache_dfs_tgt * tgthint ;
2018-11-14 16:01:21 -02:00
} ;
2019-12-04 17:37:58 -03:00
static struct kmem_cache * cache_slab __read_mostly ;
static struct workqueue_struct * dfscache_wq __read_mostly ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:37:58 -03:00
static int cache_ttl ;
2019-12-04 17:38:02 -03:00
static DEFINE_SPINLOCK ( cache_ttl_lock ) ;
2021-06-04 19:25:30 -03:00
static struct nls_table * cache_cp ;
2018-11-14 16:01:21 -02:00
/*
* Number of entries in the cache
*/
2019-12-04 17:38:03 -03:00
static atomic_t cache_count ;
2019-12-04 17:37:58 -03:00
static struct hlist_head cache_htable [ CACHE_HTABLE_SIZE ] ;
2019-12-04 17:38:03 -03:00
static DECLARE_RWSEM ( htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
static void refresh_cache_worker ( struct work_struct * work ) ;
2019-12-04 17:37:58 -03:00
static DECLARE_DELAYED_WORK ( refresh_task , refresh_cache_worker ) ;
2021-06-04 19:25:30 -03:00
/**
* dfs_cache_canonical_path - get a canonical DFS path
*
* @ path : DFS path
* @ cp : codepage
* @ remap : mapping type
*
* Return canonical path if success , otherwise error .
*/
char * dfs_cache_canonical_path ( const char * path , const struct nls_table * cp , int remap )
2018-11-14 16:01:21 -02:00
{
2021-06-04 19:25:30 -03:00
char * tmp ;
int plen = 0 ;
char * npath ;
2019-12-04 17:38:01 -03:00
if ( ! path | | strlen ( path ) < 3 | | ( * path ! = ' \\ ' & & * path ! = ' / ' ) )
2021-06-04 19:25:30 -03:00
return ERR_PTR ( - EINVAL ) ;
if ( unlikely ( strcmp ( cp - > charset , cache_cp - > charset ) ) ) {
tmp = ( char * ) cifs_strndup_to_utf16 ( path , strlen ( path ) , & plen , cp , remap ) ;
if ( ! tmp ) {
cifs_dbg ( VFS , " %s: failed to convert path to utf16 \n " , __func__ ) ;
return ERR_PTR ( - EINVAL ) ;
}
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:30 -03:00
npath = cifs_strndup_from_utf16 ( tmp , plen , true , cache_cp ) ;
kfree ( tmp ) ;
if ( ! npath ) {
cifs_dbg ( VFS , " %s: failed to convert path from utf16 \n " , __func__ ) ;
return ERR_PTR ( - EINVAL ) ;
}
2018-11-14 16:01:21 -02:00
} else {
2021-06-04 19:25:30 -03:00
npath = kstrdup ( path , GFP_KERNEL ) ;
if ( ! npath )
return ERR_PTR ( - ENOMEM ) ;
2018-11-14 16:01:21 -02:00
}
2021-06-04 19:25:30 -03:00
convert_delimiter ( npath , ' \\ ' ) ;
return npath ;
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:37:58 -03:00
static inline bool cache_entry_expired ( const struct cache_entry * ce )
2018-11-14 16:01:21 -02:00
{
struct timespec64 ts ;
2018-12-17 20:11:46 +11:00
ktime_get_coarse_real_ts64 ( & ts ) ;
2019-12-04 17:37:58 -03:00
return timespec64_compare ( & ts , & ce - > etime ) > = 0 ;
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:37:58 -03:00
static inline void free_tgts ( struct cache_entry * ce )
2018-11-14 16:01:21 -02:00
{
2019-12-04 17:37:58 -03:00
struct cache_dfs_tgt * t , * n ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:37:58 -03:00
list_for_each_entry_safe ( t , n , & ce - > tlist , list ) {
list_del ( & t - > list ) ;
kfree ( t - > name ) ;
2018-11-14 16:01:21 -02:00
kfree ( t ) ;
}
}
2019-12-04 17:37:58 -03:00
static inline void flush_cache_ent ( struct cache_entry * ce )
2018-11-14 16:01:21 -02:00
{
2019-12-04 17:38:03 -03:00
hlist_del_init ( & ce - > hlist ) ;
2019-12-04 17:37:59 -03:00
kfree ( ce - > path ) ;
2018-11-14 16:01:21 -02:00
free_tgts ( ce ) ;
2019-12-04 17:38:03 -03:00
atomic_dec ( & cache_count ) ;
kmem_cache_free ( cache_slab , ce ) ;
2018-11-14 16:01:21 -02:00
}
static void flush_cache_ents ( void )
{
int i ;
2019-12-04 17:37:58 -03:00
for ( i = 0 ; i < CACHE_HTABLE_SIZE ; i + + ) {
struct hlist_head * l = & cache_htable [ i ] ;
2019-12-04 17:38:03 -03:00
struct hlist_node * n ;
2019-12-04 17:37:58 -03:00
struct cache_entry * ce ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
hlist_for_each_entry_safe ( ce , n , l , hlist ) {
if ( ! hlist_unhashed ( & ce - > hlist ) )
flush_cache_ent ( ce ) ;
}
2018-11-14 16:01:21 -02:00
}
}
/*
* dfs cache / proc file
*/
static int dfscache_proc_show ( struct seq_file * m , void * v )
{
2019-12-04 17:38:03 -03:00
int i ;
2019-12-04 17:37:58 -03:00
struct cache_entry * ce ;
struct cache_dfs_tgt * t ;
2018-11-14 16:01:21 -02:00
seq_puts ( m , " DFS cache \n --------- \n " ) ;
2019-12-04 17:38:03 -03:00
down_read ( & htable_rw_lock ) ;
for ( i = 0 ; i < CACHE_HTABLE_SIZE ; i + + ) {
struct hlist_head * l = & cache_htable [ i ] ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
hlist_for_each_entry ( ce , l , hlist ) {
if ( hlist_unhashed ( & ce - > hlist ) )
continue ;
seq_printf ( m ,
2021-02-24 20:59:23 -03:00
" cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s \n " ,
ce - > path , ce - > srvtype = = DFS_TYPE_ROOT ? " root " : " link " ,
2021-10-11 18:45:35 -03:00
ce - > ttl , ce - > etime . tv_nsec , ce - > hdr_flags , ce - > ref_flags ,
2021-06-14 12:58:20 -03:00
IS_DFS_INTERLINK ( ce - > hdr_flags ) ? " yes " : " no " ,
2021-02-24 20:59:23 -03:00
ce - > path_consumed , cache_entry_expired ( ce ) ? " yes " : " no " ) ;
2019-12-04 17:38:03 -03:00
list_for_each_entry ( t , & ce - > tlist , list ) {
seq_printf ( m , " %s%s \n " ,
t - > name ,
2023-01-17 19:00:39 -03:00
READ_ONCE ( ce - > tgthint ) = = t ? " (target hint) " : " " ) ;
2019-12-04 17:38:03 -03:00
}
}
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:38:03 -03:00
up_read ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
return 0 ;
}
static ssize_t dfscache_proc_write ( struct file * file , const char __user * buffer ,
size_t count , loff_t * ppos )
{
char c ;
int rc ;
rc = get_user ( c , buffer ) ;
if ( rc )
return rc ;
if ( c ! = ' 0 ' )
return - EINVAL ;
2020-04-14 22:42:53 -07:00
cifs_dbg ( FYI , " clearing dfs cache \n " ) ;
2019-12-04 17:38:03 -03:00
down_write ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
flush_cache_ents ( ) ;
2019-12-04 17:38:03 -03:00
up_write ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
return count ;
}
static int dfscache_proc_open ( struct inode * inode , struct file * file )
{
return single_open ( file , dfscache_proc_show , NULL ) ;
}
2020-02-03 17:37:17 -08:00
const struct proc_ops dfscache_proc_ops = {
. proc_open = dfscache_proc_open ,
. proc_read = seq_read ,
. proc_lseek = seq_lseek ,
. proc_release = single_release ,
. proc_write = dfscache_proc_write ,
2018-11-14 16:01:21 -02:00
} ;
# ifdef CONFIG_CIFS_DEBUG2
2019-12-04 17:37:58 -03:00
static inline void dump_tgts ( const struct cache_entry * ce )
2018-11-14 16:01:21 -02:00
{
2019-12-04 17:37:58 -03:00
struct cache_dfs_tgt * t ;
2018-11-14 16:01:21 -02:00
cifs_dbg ( FYI , " target list: \n " ) ;
2019-12-04 17:37:58 -03:00
list_for_each_entry ( t , & ce - > tlist , list ) {
cifs_dbg ( FYI , " %s%s \n " , t - > name ,
2023-01-17 19:00:39 -03:00
READ_ONCE ( ce - > tgthint ) = = t ? " (target hint) " : " " ) ;
2018-11-14 16:01:21 -02:00
}
}
2019-12-04 17:37:58 -03:00
static inline void dump_ce ( const struct cache_entry * ce )
2018-11-14 16:01:21 -02:00
{
2021-02-24 20:59:23 -03:00
cifs_dbg ( FYI , " cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s \n " ,
2020-04-14 22:42:53 -07:00
ce - > path ,
2019-12-04 17:37:58 -03:00
ce - > srvtype = = DFS_TYPE_ROOT ? " root " : " link " , ce - > ttl ,
ce - > etime . tv_nsec ,
2021-02-24 20:59:23 -03:00
ce - > hdr_flags , ce - > ref_flags ,
2021-06-14 12:58:20 -03:00
IS_DFS_INTERLINK ( ce - > hdr_flags ) ? " yes " : " no " ,
2019-12-04 17:37:58 -03:00
ce - > path_consumed ,
2018-11-14 16:01:21 -02:00
cache_entry_expired ( ce ) ? " yes " : " no " ) ;
dump_tgts ( ce ) ;
}
static inline void dump_refs ( const struct dfs_info3_param * refs , int numrefs )
{
int i ;
cifs_dbg ( FYI , " DFS referrals returned by the server: \n " ) ;
for ( i = 0 ; i < numrefs ; i + + ) {
const struct dfs_info3_param * ref = & refs [ i ] ;
cifs_dbg ( FYI ,
" \n "
" flags: 0x%x \n "
" path_consumed: %d \n "
" server_type: 0x%x \n "
" ref_flag: 0x%x \n "
" path_name: %s \n "
" node_name: %s \n "
" ttl: %d (%dm) \n " ,
ref - > flags , ref - > path_consumed , ref - > server_type ,
ref - > ref_flag , ref - > path_name , ref - > node_name ,
ref - > ttl , ref - > ttl / 60 ) ;
}
}
# else
# define dump_tgts(e)
# define dump_ce(e)
# define dump_refs(r, n)
# endif
/**
* dfs_cache_init - Initialize DFS referral cache .
*
* Return zero if initialized successfully , otherwise non - zero .
*/
int dfs_cache_init ( void )
{
2019-12-04 17:37:58 -03:00
int rc ;
2018-11-14 16:01:21 -02:00
int i ;
2021-06-04 19:25:29 -03:00
dfscache_wq = alloc_workqueue ( " cifs-dfscache " , WQ_FREEZABLE | WQ_UNBOUND , 1 ) ;
2019-12-04 17:37:58 -03:00
if ( ! dfscache_wq )
2018-11-14 16:01:21 -02:00
return - ENOMEM ;
2019-12-04 17:37:58 -03:00
cache_slab = kmem_cache_create ( " cifs_dfs_cache " ,
sizeof ( struct cache_entry ) , 0 ,
SLAB_HWCACHE_ALIGN , NULL ) ;
if ( ! cache_slab ) {
rc = - ENOMEM ;
goto out_destroy_wq ;
}
for ( i = 0 ; i < CACHE_HTABLE_SIZE ; i + + )
INIT_HLIST_HEAD ( & cache_htable [ i ] ) ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
atomic_set ( & cache_count , 0 ) ;
2021-06-04 19:25:30 -03:00
cache_cp = load_nls ( " utf8 " ) ;
if ( ! cache_cp )
cache_cp = load_nls_default ( ) ;
2018-11-14 16:01:21 -02:00
cifs_dbg ( FYI , " %s: initialized DFS referral cache \n " , __func__ ) ;
return 0 ;
2019-12-04 17:37:58 -03:00
out_destroy_wq :
destroy_workqueue ( dfscache_wq ) ;
return rc ;
2018-11-14 16:01:21 -02:00
}
2021-06-04 19:25:31 -03:00
static int cache_entry_hash ( const void * data , int size , unsigned int * hash )
2018-11-14 16:01:21 -02:00
{
2021-06-04 19:25:31 -03:00
int i , clen ;
const unsigned char * s = data ;
wchar_t c ;
unsigned int h = 0 ;
for ( i = 0 ; i < size ; i + = clen ) {
clen = cache_cp - > char2uni ( & s [ i ] , size - i , & c ) ;
if ( unlikely ( clen < 0 ) ) {
cifs_dbg ( VFS , " %s: can't convert char \n " , __func__ ) ;
return clen ;
}
c = cifs_toupper ( c ) ;
h = jhash ( & c , sizeof ( c ) , h ) ;
}
* hash = h % CACHE_HTABLE_SIZE ;
return 0 ;
2018-11-14 16:01:21 -02:00
}
/* Return target hint of a DFS cache entry */
2019-12-04 17:37:58 -03:00
static inline char * get_tgt_name ( const struct cache_entry * ce )
2018-11-14 16:01:21 -02:00
{
2023-01-17 19:00:39 -03:00
struct cache_dfs_tgt * t = READ_ONCE ( ce - > tgthint ) ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:37:58 -03:00
return t ? t - > name : ERR_PTR ( - ENOENT ) ;
2018-11-14 16:01:21 -02:00
}
/* Return expire time out of a new entry's TTL */
static inline struct timespec64 get_expire_time ( int ttl )
{
struct timespec64 ts = {
. tv_sec = ttl ,
. tv_nsec = 0 ,
} ;
2018-12-17 20:11:46 +11:00
struct timespec64 now ;
2018-11-14 16:01:21 -02:00
2018-12-17 20:11:46 +11:00
ktime_get_coarse_real_ts64 ( & now ) ;
return timespec64_add ( now , ts ) ;
2018-11-14 16:01:21 -02:00
}
/* Allocate a new DFS target */
2020-07-21 09:36:42 -03:00
static struct cache_dfs_tgt * alloc_target ( const char * name , int path_consumed )
2018-11-14 16:01:21 -02:00
{
2019-12-04 17:37:58 -03:00
struct cache_dfs_tgt * t ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
t = kmalloc ( sizeof ( * t ) , GFP_ATOMIC ) ;
2018-11-14 16:01:21 -02:00
if ( ! t )
return ERR_PTR ( - ENOMEM ) ;
2021-03-05 15:02:34 -05:00
t - > name = kstrdup ( name , GFP_ATOMIC ) ;
2019-12-04 17:37:58 -03:00
if ( ! t - > name ) {
2018-11-14 16:01:21 -02:00
kfree ( t ) ;
return ERR_PTR ( - ENOMEM ) ;
}
2020-07-21 09:36:42 -03:00
t - > path_consumed = path_consumed ;
2019-12-04 17:37:58 -03:00
INIT_LIST_HEAD ( & t - > list ) ;
2018-11-14 16:01:21 -02:00
return t ;
}
/*
* Copy DFS referral information to a cache entry and conditionally update
* target hint .
*/
static int copy_ref_data ( const struct dfs_info3_param * refs , int numrefs ,
2019-12-04 17:37:58 -03:00
struct cache_entry * ce , const char * tgthint )
2018-11-14 16:01:21 -02:00
{
2023-01-17 19:00:39 -03:00
struct cache_dfs_tgt * target ;
2018-11-14 16:01:21 -02:00
int i ;
2021-06-04 19:25:32 -03:00
ce - > ttl = max_t ( int , refs [ 0 ] . ttl , CACHE_MIN_TTL ) ;
2019-12-04 17:37:58 -03:00
ce - > etime = get_expire_time ( ce - > ttl ) ;
ce - > srvtype = refs [ 0 ] . server_type ;
2021-02-24 20:59:23 -03:00
ce - > hdr_flags = refs [ 0 ] . flags ;
ce - > ref_flags = refs [ 0 ] . ref_flag ;
2019-12-04 17:37:58 -03:00
ce - > path_consumed = refs [ 0 ] . path_consumed ;
2018-11-14 16:01:21 -02:00
for ( i = 0 ; i < numrefs ; i + + ) {
2019-12-04 17:37:58 -03:00
struct cache_dfs_tgt * t ;
2018-11-14 16:01:21 -02:00
2020-07-21 09:36:42 -03:00
t = alloc_target ( refs [ i ] . node_name , refs [ i ] . path_consumed ) ;
2018-11-14 16:01:21 -02:00
if ( IS_ERR ( t ) ) {
free_tgts ( ce ) ;
return PTR_ERR ( t ) ;
}
2019-12-04 17:37:58 -03:00
if ( tgthint & & ! strcasecmp ( t - > name , tgthint ) ) {
list_add ( & t - > list , & ce - > tlist ) ;
2018-11-14 16:01:21 -02:00
tgthint = NULL ;
} else {
2019-12-04 17:37:58 -03:00
list_add_tail ( & t - > list , & ce - > tlist ) ;
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:37:58 -03:00
ce - > numtgts + + ;
2018-11-14 16:01:21 -02:00
}
2023-01-17 19:00:39 -03:00
target = list_first_entry_or_null ( & ce - > tlist , struct cache_dfs_tgt ,
list ) ;
WRITE_ONCE ( ce - > tgthint , target ) ;
2018-11-14 16:01:21 -02:00
return 0 ;
}
/* Allocate a new cache entry */
2021-06-04 19:25:31 -03:00
static struct cache_entry * alloc_cache_entry ( struct dfs_info3_param * refs , int numrefs )
2018-11-14 16:01:21 -02:00
{
2019-12-04 17:37:58 -03:00
struct cache_entry * ce ;
2018-11-14 16:01:21 -02:00
int rc ;
2019-12-04 17:37:58 -03:00
ce = kmem_cache_zalloc ( cache_slab , GFP_KERNEL ) ;
2018-11-14 16:01:21 -02:00
if ( ! ce )
return ERR_PTR ( - ENOMEM ) ;
2021-06-04 19:25:31 -03:00
ce - > path = refs [ 0 ] . path_name ;
refs [ 0 ] . path_name = NULL ;
2019-12-04 17:37:58 -03:00
INIT_HLIST_NODE ( & ce - > hlist ) ;
INIT_LIST_HEAD ( & ce - > tlist ) ;
2018-11-14 16:01:21 -02:00
rc = copy_ref_data ( refs , numrefs , ce , NULL ) ;
if ( rc ) {
2019-12-04 17:37:59 -03:00
kfree ( ce - > path ) ;
2019-12-04 17:37:58 -03:00
kmem_cache_free ( cache_slab , ce ) ;
2018-11-14 16:01:21 -02:00
ce = ERR_PTR ( rc ) ;
}
return ce ;
}
2021-06-04 19:25:29 -03:00
static void remove_oldest_entry_locked ( void )
2018-11-14 16:01:21 -02:00
{
2019-12-04 17:38:03 -03:00
int i ;
2019-12-04 17:37:58 -03:00
struct cache_entry * ce ;
struct cache_entry * to_del = NULL ;
2018-11-14 16:01:21 -02:00
2021-06-08 13:50:06 -03:00
WARN_ON ( ! rwsem_is_locked ( & htable_rw_lock ) ) ;
2019-12-04 17:38:03 -03:00
for ( i = 0 ; i < CACHE_HTABLE_SIZE ; i + + ) {
struct hlist_head * l = & cache_htable [ i ] ;
hlist_for_each_entry ( ce , l , hlist ) {
if ( hlist_unhashed ( & ce - > hlist ) )
continue ;
if ( ! to_del | | timespec64_compare ( & ce - > etime ,
& to_del - > etime ) < 0 )
to_del = ce ;
}
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:38:03 -03:00
2018-11-14 16:01:21 -02:00
if ( ! to_del ) {
2020-04-14 22:42:53 -07:00
cifs_dbg ( FYI , " %s: no entry to remove \n " , __func__ ) ;
2019-12-04 17:38:03 -03:00
return ;
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:38:03 -03:00
2020-04-14 22:42:53 -07:00
cifs_dbg ( FYI , " %s: removing entry \n " , __func__ ) ;
2018-11-14 16:01:21 -02:00
dump_ce ( to_del ) ;
flush_cache_ent ( to_del ) ;
}
/* Add a new DFS cache entry */
2023-01-17 19:00:38 -03:00
static struct cache_entry * add_cache_entry_locked ( struct dfs_info3_param * refs ,
int numrefs )
2018-11-14 16:01:21 -02:00
{
2021-06-04 19:25:31 -03:00
int rc ;
2019-12-04 17:37:58 -03:00
struct cache_entry * ce ;
2021-06-04 19:25:31 -03:00
unsigned int hash ;
2018-11-14 16:01:21 -02:00
2021-06-08 13:50:06 -03:00
WARN_ON ( ! rwsem_is_locked ( & htable_rw_lock ) ) ;
if ( atomic_read ( & cache_count ) > = CACHE_MAX_ENTRIES ) {
cifs_dbg ( FYI , " %s: reached max cache size (%d) \n " , __func__ , CACHE_MAX_ENTRIES ) ;
remove_oldest_entry_locked ( ) ;
}
2021-06-04 19:25:31 -03:00
rc = cache_entry_hash ( refs [ 0 ] . path_name , strlen ( refs [ 0 ] . path_name ) , & hash ) ;
if ( rc )
2023-01-17 19:00:38 -03:00
return ERR_PTR ( rc ) ;
2021-06-04 19:25:31 -03:00
ce = alloc_cache_entry ( refs , numrefs ) ;
2018-11-14 16:01:21 -02:00
if ( IS_ERR ( ce ) )
2023-01-17 19:00:38 -03:00
return ce ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:02 -03:00
spin_lock ( & cache_ttl_lock ) ;
if ( ! cache_ttl ) {
2019-12-04 17:37:58 -03:00
cache_ttl = ce - > ttl ;
queue_delayed_work ( dfscache_wq , & refresh_task , cache_ttl * HZ ) ;
2018-11-14 16:01:21 -02:00
} else {
2019-12-04 17:37:58 -03:00
cache_ttl = min_t ( int , cache_ttl , ce - > ttl ) ;
mod_delayed_work ( dfscache_wq , & refresh_task , cache_ttl * HZ ) ;
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:38:02 -03:00
spin_unlock ( & cache_ttl_lock ) ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
hlist_add_head ( & ce - > hlist , & cache_htable [ hash ] ) ;
dump_ce ( ce ) ;
2021-06-08 13:50:06 -03:00
atomic_inc ( & cache_count ) ;
2023-01-17 19:00:38 -03:00
return ce ;
2018-11-14 16:01:21 -02:00
}
2021-06-04 19:25:31 -03:00
/* Check if two DFS paths are equal. @s1 and @s2 are expected to be in @cache_cp's charset */
static bool dfs_path_equal ( const char * s1 , int len1 , const char * s2 , int len2 )
2018-11-14 16:01:21 -02:00
{
2021-06-04 19:25:31 -03:00
int i , l1 , l2 ;
wchar_t c1 , c2 ;
if ( len1 ! = len2 )
return false ;
for ( i = 0 ; i < len1 ; i + = l1 ) {
l1 = cache_cp - > char2uni ( & s1 [ i ] , len1 - i , & c1 ) ;
l2 = cache_cp - > char2uni ( & s2 [ i ] , len2 - i , & c2 ) ;
if ( unlikely ( l1 < 0 & & l2 < 0 ) ) {
if ( s1 [ i ] ! = s2 [ i ] )
return false ;
l1 = 1 ;
continue ;
}
if ( l1 ! = l2 )
return false ;
if ( cifs_toupper ( c1 ) ! = cifs_toupper ( c2 ) )
return false ;
}
return true ;
}
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:31 -03:00
static struct cache_entry * __lookup_cache_entry ( const char * path , unsigned int hash , int len )
{
struct cache_entry * ce ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:31 -03:00
hlist_for_each_entry ( ce , & cache_htable [ hash ] , hlist ) {
if ( dfs_path_equal ( ce - > path , strlen ( ce - > path ) , path , len ) ) {
2019-12-04 17:37:58 -03:00
dump_ce ( ce ) ;
2021-06-04 19:25:31 -03:00
return ce ;
2018-11-14 16:01:21 -02:00
}
}
2022-05-18 11:41:05 -03:00
return ERR_PTR ( - ENOENT ) ;
2020-07-21 09:36:39 -03:00
}
/*
2021-06-04 19:25:31 -03:00
* Find a DFS cache entry in hash table and optionally check prefix path against normalized @ path .
*
* Use whole path components in the match . Must be called with htable_rw_lock held .
2020-07-21 09:36:39 -03:00
*
2023-01-17 19:00:41 -03:00
* Return cached entry if successful .
2022-05-18 11:41:05 -03:00
* Return ERR_PTR ( - ENOENT ) if the entry is not found .
2023-01-17 19:00:41 -03:00
* Return error ptr otherwise .
2020-07-21 09:36:39 -03:00
*/
2021-06-04 19:25:31 -03:00
static struct cache_entry * lookup_cache_entry ( const char * path )
2020-07-21 09:36:39 -03:00
{
2021-06-04 19:25:31 -03:00
struct cache_entry * ce ;
2020-07-21 09:36:39 -03:00
int cnt = 0 ;
2021-06-04 19:25:31 -03:00
const char * s = path , * e ;
char sep = * s ;
unsigned int hash ;
int rc ;
2020-07-21 09:36:39 -03:00
while ( ( s = strchr ( s , sep ) ) & & + + cnt < 3 )
s + + ;
if ( cnt < 3 ) {
2021-06-04 19:25:31 -03:00
rc = cache_entry_hash ( path , strlen ( path ) , & hash ) ;
if ( rc )
return ERR_PTR ( rc ) ;
return __lookup_cache_entry ( path , hash , strlen ( path ) ) ;
2020-07-21 09:36:39 -03:00
}
/*
* Handle paths that have more than two path components and are a complete prefix of the DFS
* referral request path ( @ path ) .
*
* See MS - DFSC 3.2 .5 .5 " Receiving a Root Referral Request or Link Referral Request " .
*/
2021-06-04 19:25:31 -03:00
e = path + strlen ( path ) - 1 ;
2020-07-21 09:36:39 -03:00
while ( e > s ) {
2021-06-04 19:25:31 -03:00
int len ;
2020-07-21 09:36:39 -03:00
/* skip separators */
while ( e > s & & * e = = sep )
e - - ;
if ( e = = s )
break ;
2021-06-04 19:25:31 -03:00
len = e + 1 - path ;
rc = cache_entry_hash ( path , len , & hash ) ;
if ( rc )
return ERR_PTR ( rc ) ;
ce = __lookup_cache_entry ( path , hash , len ) ;
if ( ! IS_ERR ( ce ) )
return ce ;
2020-07-21 09:36:39 -03:00
/* backward until separator */
while ( e > s & & * e ! = sep )
e - - ;
}
2022-05-18 11:41:05 -03:00
return ERR_PTR ( - ENOENT ) ;
2018-11-14 16:01:21 -02:00
}
/**
* dfs_cache_destroy - destroy DFS referral cache
*/
void dfs_cache_destroy ( void )
{
2019-12-04 17:37:58 -03:00
cancel_delayed_work_sync ( & refresh_task ) ;
2021-06-04 19:25:30 -03:00
unload_nls ( cache_cp ) ;
2018-11-14 16:01:21 -02:00
flush_cache_ents ( ) ;
2019-12-04 17:38:03 -03:00
kmem_cache_destroy ( cache_slab ) ;
2019-12-04 17:37:58 -03:00
destroy_workqueue ( dfscache_wq ) ;
2018-11-14 16:01:21 -02:00
cifs_dbg ( FYI , " %s: destroyed DFS referral cache \n " , __func__ ) ;
}
2021-06-04 19:25:29 -03:00
/* Update a cache entry with the new referral in @refs */
2021-06-08 13:50:06 -03:00
static int update_cache_entry_locked ( struct cache_entry * ce , const struct dfs_info3_param * refs ,
2021-06-04 19:25:29 -03:00
int numrefs )
2018-11-14 16:01:21 -02:00
{
2023-01-17 19:00:39 -03:00
struct cache_dfs_tgt * target ;
char * th = NULL ;
2018-11-14 16:01:21 -02:00
int rc ;
2021-06-08 13:50:06 -03:00
WARN_ON ( ! rwsem_is_locked ( & htable_rw_lock ) ) ;
2018-11-14 16:01:21 -02:00
2023-01-17 19:00:39 -03:00
target = READ_ONCE ( ce - > tgthint ) ;
if ( target ) {
th = kstrdup ( target - > name , GFP_ATOMIC ) ;
2018-11-14 16:01:21 -02:00
if ( ! th )
2019-12-04 17:38:03 -03:00
return - ENOMEM ;
2018-11-14 16:01:21 -02:00
}
free_tgts ( ce ) ;
2019-12-04 17:37:58 -03:00
ce - > numtgts = 0 ;
2018-11-14 16:01:21 -02:00
rc = copy_ref_data ( refs , numrefs , ce , th ) ;
2019-12-04 17:38:03 -03:00
kfree ( th ) ;
2018-11-14 16:01:21 -02:00
2020-01-17 10:21:56 +08:00
return rc ;
2018-11-14 16:01:21 -02:00
}
2021-06-04 19:25:30 -03:00
static int get_dfs_referral ( const unsigned int xid , struct cifs_ses * ses , const char * path ,
struct dfs_info3_param * * refs , int * numrefs )
2018-11-14 16:01:21 -02:00
{
2021-06-08 13:50:06 -03:00
int rc ;
int i ;
* refs = NULL ;
* numrefs = 0 ;
2018-11-14 16:01:21 -02:00
if ( ! ses | | ! ses - > server | | ! ses - > server - > ops - > get_dfs_refer )
2019-12-04 17:38:03 -03:00
return - EOPNOTSUPP ;
2021-06-04 19:25:30 -03:00
if ( unlikely ( ! cache_cp ) )
2019-12-04 17:38:03 -03:00
return - EINVAL ;
2018-11-14 16:01:21 -02:00
2022-12-13 01:23:16 -03:00
cifs_dbg ( FYI , " %s: ipc=%s referral=%s \n " , __func__ , ses - > tcon_ipc - > tree_name , path ) ;
2021-06-08 13:50:06 -03:00
rc = ses - > server - > ops - > get_dfs_refer ( xid , ses , path , refs , numrefs , cache_cp ,
NO_MAP_UNI_RSVD ) ;
if ( ! rc ) {
struct dfs_info3_param * ref = * refs ;
2018-11-14 16:01:21 -02:00
2021-06-08 13:50:06 -03:00
for ( i = 0 ; i < * numrefs ; i + + )
convert_delimiter ( ref [ i ] . path_name , ' \\ ' ) ;
}
return rc ;
2019-12-04 17:38:03 -03:00
}
2018-11-14 16:01:21 -02:00
/*
* Find , create or update a DFS cache entry .
*
* If the entry wasn ' t found , it will create a new one . Or if it was found but
* expired , then it will update the entry accordingly .
*
2021-06-04 19:25:29 -03:00
* For interlinks , cifs_mount ( ) and expand_dfs_referral ( ) are supposed to
2018-11-14 16:01:21 -02:00
* handle them properly .
2023-01-17 19:00:38 -03:00
*
* On success , return entry with acquired lock for reading , otherwise error ptr .
2018-11-14 16:01:21 -02:00
*/
2023-01-17 19:00:38 -03:00
static struct cache_entry * cache_refresh_path ( const unsigned int xid ,
struct cifs_ses * ses ,
2023-01-17 19:00:40 -03:00
const char * path ,
bool force_refresh )
2018-11-14 16:01:21 -02:00
{
2019-12-04 17:38:03 -03:00
struct dfs_info3_param * refs = NULL ;
2023-01-17 19:00:37 -03:00
struct cache_entry * ce ;
2019-12-04 17:38:03 -03:00
int numrefs = 0 ;
2023-01-17 19:00:37 -03:00
int rc ;
2018-11-14 16:01:21 -02:00
cifs_dbg ( FYI , " %s: search path: %s \n " , __func__ , path ) ;
2023-01-17 19:00:37 -03:00
down_read ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:31 -03:00
ce = lookup_cache_entry ( path ) ;
2023-01-17 19:00:41 -03:00
if ( ! IS_ERR ( ce ) ) {
if ( ! force_refresh & & ! cache_entry_expired ( ce ) )
return ce ;
} else if ( PTR_ERR ( ce ) ! = - ENOENT ) {
up_read ( & htable_rw_lock ) ;
2023-01-17 19:00:38 -03:00
return ce ;
2023-01-17 19:00:41 -03:00
}
2023-01-17 19:00:38 -03:00
2023-01-17 19:00:37 -03:00
/*
* Unlock shared access as we don ' t want to hold any locks while getting
* a new referral . The @ ses used for performing the I / O could be
* reconnecting and it acquires @ htable_rw_lock to look up the dfs cache
* in order to failover - - if necessary .
*/
up_read ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
/*
2023-01-17 19:00:40 -03:00
* Either the entry was not found , or it is expired , or it is a forced
* refresh .
2021-06-04 19:25:29 -03:00
* Request a new DFS referral in order to create or update a cache entry .
2019-12-04 17:38:03 -03:00
*/
2021-06-04 19:25:30 -03:00
rc = get_dfs_referral ( xid , ses , path , & refs , & numrefs ) ;
2023-01-17 19:00:38 -03:00
if ( rc ) {
ce = ERR_PTR ( rc ) ;
2023-01-17 19:00:37 -03:00
goto out ;
2023-01-17 19:00:38 -03:00
}
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
dump_refs ( refs , numrefs ) ;
2018-11-14 16:01:21 -02:00
2023-01-17 19:00:37 -03:00
down_write ( & htable_rw_lock ) ;
/* Re-check as another task might have it added or refreshed already */
ce = lookup_cache_entry ( path ) ;
if ( ! IS_ERR ( ce ) ) {
2023-01-17 19:00:40 -03:00
if ( force_refresh | | cache_entry_expired ( ce ) ) {
2023-01-17 19:00:37 -03:00
rc = update_cache_entry_locked ( ce , refs , numrefs ) ;
2023-01-17 19:00:38 -03:00
if ( rc )
ce = ERR_PTR ( rc ) ;
}
2023-01-17 19:00:41 -03:00
} else if ( PTR_ERR ( ce ) = = - ENOENT ) {
2023-01-17 19:00:38 -03:00
ce = add_cache_entry_locked ( refs , numrefs ) ;
2019-12-04 17:38:03 -03:00
}
2018-11-14 16:01:21 -02:00
2023-01-17 19:00:38 -03:00
if ( IS_ERR ( ce ) ) {
up_write ( & htable_rw_lock ) ;
goto out ;
}
downgrade_write ( & htable_rw_lock ) ;
2023-01-17 19:00:37 -03:00
out :
2019-12-04 17:38:03 -03:00
free_dfs_info_array ( refs , numrefs ) ;
2023-01-17 19:00:38 -03:00
return ce ;
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:38:03 -03:00
/*
* Set up a DFS referral from a given cache entry .
*
* Must be called with htable_rw_lock held .
*/
static int setup_referral ( const char * path , struct cache_entry * ce ,
struct dfs_info3_param * ref , const char * target )
2018-11-14 16:01:21 -02:00
{
int rc ;
cifs_dbg ( FYI , " %s: set up new ref \n " , __func__ ) ;
memset ( ref , 0 , sizeof ( * ref ) ) ;
2021-03-05 15:02:34 -05:00
ref - > path_name = kstrdup ( path , GFP_ATOMIC ) ;
2018-11-14 16:01:21 -02:00
if ( ! ref - > path_name )
return - ENOMEM ;
2021-03-05 15:02:34 -05:00
ref - > node_name = kstrdup ( target , GFP_ATOMIC ) ;
2018-11-14 16:01:21 -02:00
if ( ! ref - > node_name ) {
rc = - ENOMEM ;
goto err_free_path ;
}
2019-12-04 17:38:03 -03:00
ref - > path_consumed = ce - > path_consumed ;
2019-12-04 17:37:58 -03:00
ref - > ttl = ce - > ttl ;
ref - > server_type = ce - > srvtype ;
2021-02-24 20:59:23 -03:00
ref - > ref_flag = ce - > ref_flags ;
ref - > flags = ce - > hdr_flags ;
2018-11-14 16:01:21 -02:00
return 0 ;
err_free_path :
kfree ( ref - > path_name ) ;
ref - > path_name = NULL ;
return rc ;
}
/* Return target list of a DFS cache entry */
2019-12-04 17:38:03 -03:00
static int get_targets ( struct cache_entry * ce , struct dfs_cache_tgt_list * tl )
2018-11-14 16:01:21 -02:00
{
int rc ;
struct list_head * head = & tl - > tl_list ;
2019-12-04 17:37:58 -03:00
struct cache_dfs_tgt * t ;
2018-11-14 16:01:21 -02:00
struct dfs_cache_tgt_iterator * it , * nit ;
memset ( tl , 0 , sizeof ( * tl ) ) ;
INIT_LIST_HEAD ( head ) ;
2019-12-04 17:37:58 -03:00
list_for_each_entry ( t , & ce - > tlist , list ) {
2019-12-04 17:38:03 -03:00
it = kzalloc ( sizeof ( * it ) , GFP_ATOMIC ) ;
2018-11-14 16:01:21 -02:00
if ( ! it ) {
rc = - ENOMEM ;
goto err_free_it ;
}
2021-03-05 15:02:34 -05:00
it - > it_name = kstrdup ( t - > name , GFP_ATOMIC ) ;
2018-11-14 16:01:21 -02:00
if ( ! it - > it_name ) {
2019-01-05 21:18:03 +03:00
kfree ( it ) ;
2018-11-14 16:01:21 -02:00
rc = - ENOMEM ;
goto err_free_it ;
}
2020-07-21 09:36:42 -03:00
it - > it_path_consumed = t - > path_consumed ;
2018-11-14 16:01:21 -02:00
2023-01-17 19:00:39 -03:00
if ( READ_ONCE ( ce - > tgthint ) = = t )
2018-11-14 16:01:21 -02:00
list_add ( & it - > it_list , head ) ;
else
list_add_tail ( & it - > it_list , head ) ;
}
2019-12-04 17:38:03 -03:00
2019-12-04 17:37:58 -03:00
tl - > tl_numtgts = ce - > numtgts ;
2018-11-14 16:01:21 -02:00
return 0 ;
err_free_it :
list_for_each_entry_safe ( it , nit , head , it_list ) {
2021-07-16 03:26:41 -03:00
list_del ( & it - > it_list ) ;
2018-11-14 16:01:21 -02:00
kfree ( it - > it_name ) ;
kfree ( it ) ;
}
return rc ;
}
/**
* dfs_cache_find - find a DFS cache entry
*
* If it doesn ' t find the cache entry , then it will get a DFS referral
* for @ path and create a new entry .
*
* In case the cache entry exists but expired , it will get a DFS referral
* for @ path and then update the respective cache entry .
*
* These parameters are passed down to the get_dfs_refer ( ) call if it
* needs to be issued :
* @ xid : syscall xid
* @ ses : smb session to issue the request on
2021-06-04 19:25:30 -03:00
* @ cp : codepage
2018-11-14 16:01:21 -02:00
* @ remap : path character remapping type
* @ path : path to lookup in DFS referral cache .
*
* @ ref : when non - NULL , store single DFS referral result in it .
* @ tgt_list : when non - NULL , store complete DFS target list in it .
*
* Return zero if the target was found , otherwise non - zero .
*/
2021-06-04 19:25:30 -03:00
int dfs_cache_find ( const unsigned int xid , struct cifs_ses * ses , const struct nls_table * cp ,
int remap , const char * path , struct dfs_info3_param * ref ,
2018-11-14 16:01:21 -02:00
struct dfs_cache_tgt_list * tgt_list )
{
int rc ;
2021-03-18 01:03:34 -04:00
const char * npath ;
2019-12-04 17:37:58 -03:00
struct cache_entry * ce ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:30 -03:00
npath = dfs_cache_canonical_path ( path , cp , remap ) ;
if ( IS_ERR ( npath ) )
return PTR_ERR ( npath ) ;
2018-11-14 16:01:21 -02:00
2023-01-17 19:00:40 -03:00
ce = cache_refresh_path ( xid , ses , npath , false ) ;
2019-12-04 17:38:03 -03:00
if ( IS_ERR ( ce ) ) {
2018-11-14 16:01:21 -02:00
rc = PTR_ERR ( ce ) ;
2019-12-04 17:38:03 -03:00
goto out_free_path ;
2018-11-14 16:01:21 -02:00
}
2019-12-04 17:38:03 -03:00
if ( ref )
rc = setup_referral ( path , ce , ref , get_tgt_name ( ce ) ) ;
else
rc = 0 ;
if ( ! rc & & tgt_list )
rc = get_targets ( ce , tgt_list ) ;
up_read ( & htable_rw_lock ) ;
out_free_path :
2021-06-04 19:25:30 -03:00
kfree ( npath ) ;
2018-11-14 16:01:21 -02:00
return rc ;
}
/**
* dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
* the currently connected server .
*
* NOTE : This function will neither update a cache entry in case it was
* expired , nor create a new cache entry if @ path hasn ' t been found . It heavily
* relies on an existing cache entry .
*
2021-06-04 19:25:30 -03:00
* @ path : canonical DFS path to lookup in the DFS referral cache .
2018-11-14 16:01:21 -02:00
* @ ref : when non - NULL , store single DFS referral result in it .
* @ tgt_list : when non - NULL , store complete DFS target list in it .
*
* Return 0 if successful .
* Return - ENOENT if the entry was not found .
* Return non - zero for other errors .
*/
int dfs_cache_noreq_find ( const char * path , struct dfs_info3_param * ref ,
struct dfs_cache_tgt_list * tgt_list )
{
int rc ;
2019-12-04 17:37:58 -03:00
struct cache_entry * ce ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:30 -03:00
cifs_dbg ( FYI , " %s: path: %s \n " , __func__ , path ) ;
2019-12-04 17:38:03 -03:00
down_read ( & htable_rw_lock ) ;
2021-06-04 19:25:31 -03:00
ce = lookup_cache_entry ( path ) ;
2018-11-14 16:01:21 -02:00
if ( IS_ERR ( ce ) ) {
rc = PTR_ERR ( ce ) ;
2019-12-04 17:38:03 -03:00
goto out_unlock ;
2018-11-14 16:01:21 -02:00
}
if ( ref )
2019-12-04 17:38:03 -03:00
rc = setup_referral ( path , ce , ref , get_tgt_name ( ce ) ) ;
2018-11-14 16:01:21 -02:00
else
rc = 0 ;
if ( ! rc & & tgt_list )
2019-12-04 17:38:03 -03:00
rc = get_targets ( ce , tgt_list ) ;
out_unlock :
up_read ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
return rc ;
}
/**
* dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
* without sending any requests to the currently connected server .
*
* NOTE : This function will neither update a cache entry in case it was
* expired , nor create a new cache entry if @ path hasn ' t been found . It heavily
* relies on an existing cache entry .
*
2021-06-04 19:25:30 -03:00
* @ path : canonical DFS path to lookup in DFS referral cache .
2018-11-14 16:01:21 -02:00
* @ it : target iterator which contains the target hint to update the cache
* entry with .
*
* Return zero if the target hint was updated successfully , otherwise non - zero .
*/
2022-12-16 21:41:31 -03:00
void dfs_cache_noreq_update_tgthint ( const char * path , const struct dfs_cache_tgt_iterator * it )
2018-11-14 16:01:21 -02:00
{
2019-12-04 17:37:58 -03:00
struct cache_dfs_tgt * t ;
2022-12-16 21:41:31 -03:00
struct cache_entry * ce ;
2018-11-14 16:01:21 -02:00
2022-12-16 21:41:31 -03:00
if ( ! path | | ! it )
return ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:30 -03:00
cifs_dbg ( FYI , " %s: path: %s \n " , __func__ , path ) ;
2018-11-14 16:01:21 -02:00
2023-01-17 19:00:39 -03:00
down_read ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:31 -03:00
ce = lookup_cache_entry ( path ) ;
2022-12-16 21:41:31 -03:00
if ( IS_ERR ( ce ) )
2019-12-04 17:38:03 -03:00
goto out_unlock ;
2018-11-14 16:01:21 -02:00
2023-01-17 19:00:39 -03:00
t = READ_ONCE ( ce - > tgthint ) ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:37:58 -03:00
if ( unlikely ( ! strcasecmp ( it - > it_name , t - > name ) ) )
2019-12-04 17:38:03 -03:00
goto out_unlock ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:37:58 -03:00
list_for_each_entry ( t , & ce - > tlist , list ) {
if ( ! strcasecmp ( t - > name , it - > it_name ) ) {
2023-01-17 19:00:39 -03:00
WRITE_ONCE ( ce - > tgthint , t ) ;
2018-11-14 16:01:21 -02:00
cifs_dbg ( FYI , " %s: new target hint: %s \n " , __func__ ,
it - > it_name ) ;
break ;
}
}
2019-12-04 17:38:03 -03:00
out_unlock :
2023-01-17 19:00:39 -03:00
up_read ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
}
/**
* dfs_cache_get_tgt_referral - returns a DFS referral ( @ ref ) from a given
* target iterator ( @ it ) .
*
2021-06-04 19:25:30 -03:00
* @ path : canonical DFS path to lookup in DFS referral cache .
2018-11-14 16:01:21 -02:00
* @ it : DFS target iterator .
* @ ref : DFS referral pointer to set up the gathered information .
*
* Return zero if the DFS referral was set up correctly , otherwise non - zero .
*/
2021-06-04 19:25:30 -03:00
int dfs_cache_get_tgt_referral ( const char * path , const struct dfs_cache_tgt_iterator * it ,
2018-11-14 16:01:21 -02:00
struct dfs_info3_param * ref )
{
int rc ;
2019-12-04 17:37:58 -03:00
struct cache_entry * ce ;
2018-11-14 16:01:21 -02:00
if ( ! it | | ! ref )
return - EINVAL ;
2021-06-04 19:25:30 -03:00
cifs_dbg ( FYI , " %s: path: %s \n " , __func__ , path ) ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
down_read ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:31 -03:00
ce = lookup_cache_entry ( path ) ;
2018-11-14 16:01:21 -02:00
if ( IS_ERR ( ce ) ) {
rc = PTR_ERR ( ce ) ;
2019-12-04 17:38:03 -03:00
goto out_unlock ;
2018-11-14 16:01:21 -02:00
}
cifs_dbg ( FYI , " %s: target name: %s \n " , __func__ , it - > it_name ) ;
2019-12-04 17:38:03 -03:00
rc = setup_referral ( path , ce , ref , it - > it_name ) ;
2018-11-14 16:01:21 -02:00
2019-12-04 17:38:03 -03:00
out_unlock :
up_read ( & htable_rw_lock ) ;
2018-11-14 16:01:21 -02:00
return rc ;
}
cifs: skip trailing separators of prefix paths
During DFS failover, prefix paths may change, so make sure to not
leave trailing separators when parsing thew in
dfs_cache_get_tgt_share(). The separators of prefix paths are already
handled by build_path_from_dentry_optional_prefix().
Consider the following DFS link:
//dom/dfs/link: [\srv1\share\dir1, \srv2\share\dir1]
Before commit:
mount.cifs //dom/dfs/link
tree connect to \\srv1\share; prefix_path=dir1
disconnect srv1; failover to srv2
tree connect to \\srv2\share; prefix_path=dir1\
mv foo bar
...
SMB2 430 Create Request File: dir1\\foo;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 582 Create Response File: dir1\\foo;GetInfo Response;Close Response
SMB2 430 Create Request File: dir1\\bar;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 286 Create Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;GetInfo Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;Close Response, Error: STATUS_OBJECT_NAME_NOT_FOUND
SMB2 462 Create Request File: dir1\\foo;SetInfo Request FILE_INFO/SMB2_FILE_RENAME_INFO NewName:dir1\\bar;Close Request
SMB2 478 Create Response File: dir1\\foo;SetInfo Response, Error: STATUS_OBJECT_NAME_INVALID;Close Response
After commit:
mount.cifs //dom/dfs/link
tree connect to \\srv1\share; prefix_path=dir1
disconnect srv1; failover to srv2
tree connect to \\srv2\share; prefix_path=dir1
mv foo bar
...
SMB2 430 Create Request File: dir1\foo;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 582 Create Response File: dir1\foo;GetInfo Response;Close Response
SMB2 430 Create Request File: dir1\bar;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 286 Create Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;GetInfo Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;Close Response, Error: STATUS_OBJECT_NAME_NOT_FOUND
SMB2 462 Create Request File: dir1\foo;SetInfo Request FILE_INFO/SMB2_FILE_RENAME_INFO NewName:dir1\bar;Close Request
SMB2 478 Create Response File: dir1\foo;SetInfo Response;Close Response
Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
Signed-off-by: Steve French <stfrench@microsoft.com>
2022-06-03 16:13:02 -03:00
/* Extract share from DFS target and return a pointer to prefix path or NULL */
static const char * parse_target_share ( const char * target , char * * share )
{
const char * s , * seps = " / \\ " ;
size_t len ;
s = strpbrk ( target + 1 , seps ) ;
if ( ! s )
return ERR_PTR ( - EINVAL ) ;
len = strcspn ( s + 1 , seps ) ;
if ( ! len )
return ERR_PTR ( - EINVAL ) ;
s + = len ;
len = s - target + 1 ;
* share = kstrndup ( target , len , GFP_KERNEL ) ;
if ( ! * share )
return ERR_PTR ( - ENOMEM ) ;
s = target + len ;
return s + strspn ( s , seps ) ;
}
2020-02-20 19:49:34 -03:00
/**
* dfs_cache_get_tgt_share - parse a DFS target
*
2020-07-21 09:36:42 -03:00
* @ path : DFS full path
2020-02-20 19:49:34 -03:00
* @ it : DFS target iterator .
* @ share : tree name .
* @ prefix : prefix path .
*
* Return zero if target was parsed correctly , otherwise non - zero .
*/
2021-06-04 19:25:30 -03:00
int dfs_cache_get_tgt_share ( char * path , const struct dfs_cache_tgt_iterator * it , char * * share ,
char * * prefix )
2020-02-20 19:49:34 -03:00
{
cifs: skip trailing separators of prefix paths
During DFS failover, prefix paths may change, so make sure to not
leave trailing separators when parsing thew in
dfs_cache_get_tgt_share(). The separators of prefix paths are already
handled by build_path_from_dentry_optional_prefix().
Consider the following DFS link:
//dom/dfs/link: [\srv1\share\dir1, \srv2\share\dir1]
Before commit:
mount.cifs //dom/dfs/link
tree connect to \\srv1\share; prefix_path=dir1
disconnect srv1; failover to srv2
tree connect to \\srv2\share; prefix_path=dir1\
mv foo bar
...
SMB2 430 Create Request File: dir1\\foo;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 582 Create Response File: dir1\\foo;GetInfo Response;Close Response
SMB2 430 Create Request File: dir1\\bar;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 286 Create Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;GetInfo Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;Close Response, Error: STATUS_OBJECT_NAME_NOT_FOUND
SMB2 462 Create Request File: dir1\\foo;SetInfo Request FILE_INFO/SMB2_FILE_RENAME_INFO NewName:dir1\\bar;Close Request
SMB2 478 Create Response File: dir1\\foo;SetInfo Response, Error: STATUS_OBJECT_NAME_INVALID;Close Response
After commit:
mount.cifs //dom/dfs/link
tree connect to \\srv1\share; prefix_path=dir1
disconnect srv1; failover to srv2
tree connect to \\srv2\share; prefix_path=dir1
mv foo bar
...
SMB2 430 Create Request File: dir1\foo;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 582 Create Response File: dir1\foo;GetInfo Response;Close Response
SMB2 430 Create Request File: dir1\bar;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 286 Create Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;GetInfo Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;Close Response, Error: STATUS_OBJECT_NAME_NOT_FOUND
SMB2 462 Create Request File: dir1\foo;SetInfo Request FILE_INFO/SMB2_FILE_RENAME_INFO NewName:dir1\bar;Close Request
SMB2 478 Create Response File: dir1\foo;SetInfo Response;Close Response
Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
Signed-off-by: Steve French <stfrench@microsoft.com>
2022-06-03 16:13:02 -03:00
char sep ;
2022-06-04 01:18:37 -05:00
char * target_share ;
char * ppath = NULL ;
cifs: skip trailing separators of prefix paths
During DFS failover, prefix paths may change, so make sure to not
leave trailing separators when parsing thew in
dfs_cache_get_tgt_share(). The separators of prefix paths are already
handled by build_path_from_dentry_optional_prefix().
Consider the following DFS link:
//dom/dfs/link: [\srv1\share\dir1, \srv2\share\dir1]
Before commit:
mount.cifs //dom/dfs/link
tree connect to \\srv1\share; prefix_path=dir1
disconnect srv1; failover to srv2
tree connect to \\srv2\share; prefix_path=dir1\
mv foo bar
...
SMB2 430 Create Request File: dir1\\foo;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 582 Create Response File: dir1\\foo;GetInfo Response;Close Response
SMB2 430 Create Request File: dir1\\bar;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 286 Create Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;GetInfo Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;Close Response, Error: STATUS_OBJECT_NAME_NOT_FOUND
SMB2 462 Create Request File: dir1\\foo;SetInfo Request FILE_INFO/SMB2_FILE_RENAME_INFO NewName:dir1\\bar;Close Request
SMB2 478 Create Response File: dir1\\foo;SetInfo Response, Error: STATUS_OBJECT_NAME_INVALID;Close Response
After commit:
mount.cifs //dom/dfs/link
tree connect to \\srv1\share; prefix_path=dir1
disconnect srv1; failover to srv2
tree connect to \\srv2\share; prefix_path=dir1
mv foo bar
...
SMB2 430 Create Request File: dir1\foo;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 582 Create Response File: dir1\foo;GetInfo Response;Close Response
SMB2 430 Create Request File: dir1\bar;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 286 Create Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;GetInfo Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;Close Response, Error: STATUS_OBJECT_NAME_NOT_FOUND
SMB2 462 Create Request File: dir1\foo;SetInfo Request FILE_INFO/SMB2_FILE_RENAME_INFO NewName:dir1\bar;Close Request
SMB2 478 Create Response File: dir1\foo;SetInfo Response;Close Response
Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
Signed-off-by: Steve French <stfrench@microsoft.com>
2022-06-03 16:13:02 -03:00
const char * target_ppath , * dfsref_ppath ;
size_t target_pplen , dfsref_pplen ;
size_t len , c ;
2020-02-20 19:49:34 -03:00
2020-07-21 09:36:42 -03:00
if ( ! it | | ! path | | ! share | | ! prefix | | strlen ( path ) < it - > it_path_consumed )
2020-02-20 19:49:34 -03:00
return - EINVAL ;
sep = it - > it_name [ 0 ] ;
if ( sep ! = ' \\ ' & & sep ! = ' / ' )
return - EINVAL ;
cifs: skip trailing separators of prefix paths
During DFS failover, prefix paths may change, so make sure to not
leave trailing separators when parsing thew in
dfs_cache_get_tgt_share(). The separators of prefix paths are already
handled by build_path_from_dentry_optional_prefix().
Consider the following DFS link:
//dom/dfs/link: [\srv1\share\dir1, \srv2\share\dir1]
Before commit:
mount.cifs //dom/dfs/link
tree connect to \\srv1\share; prefix_path=dir1
disconnect srv1; failover to srv2
tree connect to \\srv2\share; prefix_path=dir1\
mv foo bar
...
SMB2 430 Create Request File: dir1\\foo;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 582 Create Response File: dir1\\foo;GetInfo Response;Close Response
SMB2 430 Create Request File: dir1\\bar;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 286 Create Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;GetInfo Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;Close Response, Error: STATUS_OBJECT_NAME_NOT_FOUND
SMB2 462 Create Request File: dir1\\foo;SetInfo Request FILE_INFO/SMB2_FILE_RENAME_INFO NewName:dir1\\bar;Close Request
SMB2 478 Create Response File: dir1\\foo;SetInfo Response, Error: STATUS_OBJECT_NAME_INVALID;Close Response
After commit:
mount.cifs //dom/dfs/link
tree connect to \\srv1\share; prefix_path=dir1
disconnect srv1; failover to srv2
tree connect to \\srv2\share; prefix_path=dir1
mv foo bar
...
SMB2 430 Create Request File: dir1\foo;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 582 Create Response File: dir1\foo;GetInfo Response;Close Response
SMB2 430 Create Request File: dir1\bar;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 286 Create Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;GetInfo Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;Close Response, Error: STATUS_OBJECT_NAME_NOT_FOUND
SMB2 462 Create Request File: dir1\foo;SetInfo Request FILE_INFO/SMB2_FILE_RENAME_INFO NewName:dir1\bar;Close Request
SMB2 478 Create Response File: dir1\foo;SetInfo Response;Close Response
Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
Signed-off-by: Steve French <stfrench@microsoft.com>
2022-06-03 16:13:02 -03:00
target_ppath = parse_target_share ( it - > it_name , & target_share ) ;
if ( IS_ERR ( target_ppath ) )
return PTR_ERR ( target_ppath ) ;
2020-02-20 19:49:34 -03:00
cifs: skip trailing separators of prefix paths
During DFS failover, prefix paths may change, so make sure to not
leave trailing separators when parsing thew in
dfs_cache_get_tgt_share(). The separators of prefix paths are already
handled by build_path_from_dentry_optional_prefix().
Consider the following DFS link:
//dom/dfs/link: [\srv1\share\dir1, \srv2\share\dir1]
Before commit:
mount.cifs //dom/dfs/link
tree connect to \\srv1\share; prefix_path=dir1
disconnect srv1; failover to srv2
tree connect to \\srv2\share; prefix_path=dir1\
mv foo bar
...
SMB2 430 Create Request File: dir1\\foo;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 582 Create Response File: dir1\\foo;GetInfo Response;Close Response
SMB2 430 Create Request File: dir1\\bar;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 286 Create Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;GetInfo Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;Close Response, Error: STATUS_OBJECT_NAME_NOT_FOUND
SMB2 462 Create Request File: dir1\\foo;SetInfo Request FILE_INFO/SMB2_FILE_RENAME_INFO NewName:dir1\\bar;Close Request
SMB2 478 Create Response File: dir1\\foo;SetInfo Response, Error: STATUS_OBJECT_NAME_INVALID;Close Response
After commit:
mount.cifs //dom/dfs/link
tree connect to \\srv1\share; prefix_path=dir1
disconnect srv1; failover to srv2
tree connect to \\srv2\share; prefix_path=dir1
mv foo bar
...
SMB2 430 Create Request File: dir1\foo;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 582 Create Response File: dir1\foo;GetInfo Response;Close Response
SMB2 430 Create Request File: dir1\bar;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 286 Create Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;GetInfo Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;Close Response, Error: STATUS_OBJECT_NAME_NOT_FOUND
SMB2 462 Create Request File: dir1\foo;SetInfo Request FILE_INFO/SMB2_FILE_RENAME_INFO NewName:dir1\bar;Close Request
SMB2 478 Create Response File: dir1\foo;SetInfo Response;Close Response
Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
Signed-off-by: Steve French <stfrench@microsoft.com>
2022-06-03 16:13:02 -03:00
/* point to prefix in DFS referral path */
dfsref_ppath = path + it - > it_path_consumed ;
dfsref_ppath + = strspn ( dfsref_ppath , " / \\ " ) ;
2020-02-20 19:49:34 -03:00
cifs: skip trailing separators of prefix paths
During DFS failover, prefix paths may change, so make sure to not
leave trailing separators when parsing thew in
dfs_cache_get_tgt_share(). The separators of prefix paths are already
handled by build_path_from_dentry_optional_prefix().
Consider the following DFS link:
//dom/dfs/link: [\srv1\share\dir1, \srv2\share\dir1]
Before commit:
mount.cifs //dom/dfs/link
tree connect to \\srv1\share; prefix_path=dir1
disconnect srv1; failover to srv2
tree connect to \\srv2\share; prefix_path=dir1\
mv foo bar
...
SMB2 430 Create Request File: dir1\\foo;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 582 Create Response File: dir1\\foo;GetInfo Response;Close Response
SMB2 430 Create Request File: dir1\\bar;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 286 Create Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;GetInfo Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;Close Response, Error: STATUS_OBJECT_NAME_NOT_FOUND
SMB2 462 Create Request File: dir1\\foo;SetInfo Request FILE_INFO/SMB2_FILE_RENAME_INFO NewName:dir1\\bar;Close Request
SMB2 478 Create Response File: dir1\\foo;SetInfo Response, Error: STATUS_OBJECT_NAME_INVALID;Close Response
After commit:
mount.cifs //dom/dfs/link
tree connect to \\srv1\share; prefix_path=dir1
disconnect srv1; failover to srv2
tree connect to \\srv2\share; prefix_path=dir1
mv foo bar
...
SMB2 430 Create Request File: dir1\foo;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 582 Create Response File: dir1\foo;GetInfo Response;Close Response
SMB2 430 Create Request File: dir1\bar;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 286 Create Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;GetInfo Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;Close Response, Error: STATUS_OBJECT_NAME_NOT_FOUND
SMB2 462 Create Request File: dir1\foo;SetInfo Request FILE_INFO/SMB2_FILE_RENAME_INFO NewName:dir1\bar;Close Request
SMB2 478 Create Response File: dir1\foo;SetInfo Response;Close Response
Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
Signed-off-by: Steve French <stfrench@microsoft.com>
2022-06-03 16:13:02 -03:00
target_pplen = strlen ( target_ppath ) ;
dfsref_pplen = strlen ( dfsref_ppath ) ;
2020-02-20 19:49:34 -03:00
cifs: skip trailing separators of prefix paths
During DFS failover, prefix paths may change, so make sure to not
leave trailing separators when parsing thew in
dfs_cache_get_tgt_share(). The separators of prefix paths are already
handled by build_path_from_dentry_optional_prefix().
Consider the following DFS link:
//dom/dfs/link: [\srv1\share\dir1, \srv2\share\dir1]
Before commit:
mount.cifs //dom/dfs/link
tree connect to \\srv1\share; prefix_path=dir1
disconnect srv1; failover to srv2
tree connect to \\srv2\share; prefix_path=dir1\
mv foo bar
...
SMB2 430 Create Request File: dir1\\foo;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 582 Create Response File: dir1\\foo;GetInfo Response;Close Response
SMB2 430 Create Request File: dir1\\bar;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 286 Create Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;GetInfo Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;Close Response, Error: STATUS_OBJECT_NAME_NOT_FOUND
SMB2 462 Create Request File: dir1\\foo;SetInfo Request FILE_INFO/SMB2_FILE_RENAME_INFO NewName:dir1\\bar;Close Request
SMB2 478 Create Response File: dir1\\foo;SetInfo Response, Error: STATUS_OBJECT_NAME_INVALID;Close Response
After commit:
mount.cifs //dom/dfs/link
tree connect to \\srv1\share; prefix_path=dir1
disconnect srv1; failover to srv2
tree connect to \\srv2\share; prefix_path=dir1
mv foo bar
...
SMB2 430 Create Request File: dir1\foo;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 582 Create Response File: dir1\foo;GetInfo Response;Close Response
SMB2 430 Create Request File: dir1\bar;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 286 Create Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;GetInfo Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;Close Response, Error: STATUS_OBJECT_NAME_NOT_FOUND
SMB2 462 Create Request File: dir1\foo;SetInfo Request FILE_INFO/SMB2_FILE_RENAME_INFO NewName:dir1\bar;Close Request
SMB2 478 Create Response File: dir1\foo;SetInfo Response;Close Response
Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
Signed-off-by: Steve French <stfrench@microsoft.com>
2022-06-03 16:13:02 -03:00
/* merge prefix paths from DFS referral path and target node */
if ( target_pplen | | dfsref_pplen ) {
len = target_pplen + dfsref_pplen + 2 ;
ppath = kzalloc ( len , GFP_KERNEL ) ;
if ( ! ppath ) {
kfree ( target_share ) ;
2020-07-21 09:36:42 -03:00
return - ENOMEM ;
}
cifs: skip trailing separators of prefix paths
During DFS failover, prefix paths may change, so make sure to not
leave trailing separators when parsing thew in
dfs_cache_get_tgt_share(). The separators of prefix paths are already
handled by build_path_from_dentry_optional_prefix().
Consider the following DFS link:
//dom/dfs/link: [\srv1\share\dir1, \srv2\share\dir1]
Before commit:
mount.cifs //dom/dfs/link
tree connect to \\srv1\share; prefix_path=dir1
disconnect srv1; failover to srv2
tree connect to \\srv2\share; prefix_path=dir1\
mv foo bar
...
SMB2 430 Create Request File: dir1\\foo;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 582 Create Response File: dir1\\foo;GetInfo Response;Close Response
SMB2 430 Create Request File: dir1\\bar;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 286 Create Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;GetInfo Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;Close Response, Error: STATUS_OBJECT_NAME_NOT_FOUND
SMB2 462 Create Request File: dir1\\foo;SetInfo Request FILE_INFO/SMB2_FILE_RENAME_INFO NewName:dir1\\bar;Close Request
SMB2 478 Create Response File: dir1\\foo;SetInfo Response, Error: STATUS_OBJECT_NAME_INVALID;Close Response
After commit:
mount.cifs //dom/dfs/link
tree connect to \\srv1\share; prefix_path=dir1
disconnect srv1; failover to srv2
tree connect to \\srv2\share; prefix_path=dir1
mv foo bar
...
SMB2 430 Create Request File: dir1\foo;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 582 Create Response File: dir1\foo;GetInfo Response;Close Response
SMB2 430 Create Request File: dir1\bar;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 286 Create Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;GetInfo Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;Close Response, Error: STATUS_OBJECT_NAME_NOT_FOUND
SMB2 462 Create Request File: dir1\foo;SetInfo Request FILE_INFO/SMB2_FILE_RENAME_INFO NewName:dir1\bar;Close Request
SMB2 478 Create Response File: dir1\foo;SetInfo Response;Close Response
Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
Signed-off-by: Steve French <stfrench@microsoft.com>
2022-06-03 16:13:02 -03:00
c = strscpy ( ppath , target_ppath , len ) ;
if ( c & & dfsref_pplen )
ppath [ c ] = sep ;
strlcat ( ppath , dfsref_ppath , len ) ;
2020-07-21 09:36:42 -03:00
}
cifs: skip trailing separators of prefix paths
During DFS failover, prefix paths may change, so make sure to not
leave trailing separators when parsing thew in
dfs_cache_get_tgt_share(). The separators of prefix paths are already
handled by build_path_from_dentry_optional_prefix().
Consider the following DFS link:
//dom/dfs/link: [\srv1\share\dir1, \srv2\share\dir1]
Before commit:
mount.cifs //dom/dfs/link
tree connect to \\srv1\share; prefix_path=dir1
disconnect srv1; failover to srv2
tree connect to \\srv2\share; prefix_path=dir1\
mv foo bar
...
SMB2 430 Create Request File: dir1\\foo;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 582 Create Response File: dir1\\foo;GetInfo Response;Close Response
SMB2 430 Create Request File: dir1\\bar;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 286 Create Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;GetInfo Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;Close Response, Error: STATUS_OBJECT_NAME_NOT_FOUND
SMB2 462 Create Request File: dir1\\foo;SetInfo Request FILE_INFO/SMB2_FILE_RENAME_INFO NewName:dir1\\bar;Close Request
SMB2 478 Create Response File: dir1\\foo;SetInfo Response, Error: STATUS_OBJECT_NAME_INVALID;Close Response
After commit:
mount.cifs //dom/dfs/link
tree connect to \\srv1\share; prefix_path=dir1
disconnect srv1; failover to srv2
tree connect to \\srv2\share; prefix_path=dir1
mv foo bar
...
SMB2 430 Create Request File: dir1\foo;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 582 Create Response File: dir1\foo;GetInfo Response;Close Response
SMB2 430 Create Request File: dir1\bar;GetInfo Request FILE_INFO/SMB2_FILE_ALL_INFO;Close Request
SMB2 286 Create Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;GetInfo Response, Error: STATUS_OBJECT_NAME_NOT_FOUND;Close Response, Error: STATUS_OBJECT_NAME_NOT_FOUND
SMB2 462 Create Request File: dir1\foo;SetInfo Request FILE_INFO/SMB2_FILE_RENAME_INFO NewName:dir1\bar;Close Request
SMB2 478 Create Response File: dir1\foo;SetInfo Response;Close Response
Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
Signed-off-by: Steve French <stfrench@microsoft.com>
2022-06-03 16:13:02 -03:00
* share = target_share ;
* prefix = ppath ;
2020-02-20 19:49:34 -03:00
return 0 ;
}
2021-07-16 03:26:41 -03:00
static bool target_share_equal ( struct TCP_Server_Info * server , const char * s1 , const char * s2 )
{
char unc [ sizeof ( " \\ \\ " ) + SERVER_NAME_LENGTH ] = { 0 } ;
const char * host ;
size_t hostlen ;
2022-10-04 18:41:36 -03:00
struct sockaddr_storage ss ;
2021-07-16 03:26:41 -03:00
bool match ;
int rc ;
if ( strcasecmp ( s1 , s2 ) )
return false ;
/*
* Resolve share ' s hostname and check if server address matches . Otherwise just ignore it
* as we could not have upcall to resolve hostname or failed to convert ip address .
*/
extract_unc_hostname ( s1 , & host , & hostlen ) ;
scnprintf ( unc , sizeof ( unc ) , " \\ \\ %.*s " , ( int ) hostlen , host ) ;
2022-10-04 18:41:36 -03:00
rc = dns_resolve_server_name_to_ip ( unc , ( struct sockaddr * ) & ss , NULL ) ;
2021-07-16 03:26:41 -03:00
if ( rc < 0 ) {
cifs_dbg ( FYI , " %s: could not resolve %.*s. assuming server address matches. \n " ,
__func__ , ( int ) hostlen , host ) ;
return true ;
}
2022-10-04 18:41:36 -03:00
cifs_server_lock ( server ) ;
match = cifs_match_ipaddr ( ( struct sockaddr * ) & server - > dstaddr , ( struct sockaddr * ) & ss ) ;
cifs_server_unlock ( server ) ;
2021-07-16 03:26:41 -03:00
return match ;
}
/*
* Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
* target shares in @ refs .
*/
2023-01-17 19:00:40 -03:00
static void mark_for_reconnect_if_needed ( struct TCP_Server_Info * server ,
struct dfs_cache_tgt_list * old_tl ,
struct dfs_cache_tgt_list * new_tl )
2021-07-16 03:26:41 -03:00
{
2023-01-17 19:00:40 -03:00
struct dfs_cache_tgt_iterator * oit , * nit ;
for ( oit = dfs_cache_get_tgt_iterator ( old_tl ) ; oit ;
oit = dfs_cache_get_next_tgt ( old_tl , oit ) ) {
for ( nit = dfs_cache_get_tgt_iterator ( new_tl ) ; nit ;
nit = dfs_cache_get_next_tgt ( new_tl , nit ) ) {
if ( target_share_equal ( server ,
dfs_cache_get_tgt_name ( oit ) ,
dfs_cache_get_tgt_name ( nit ) ) )
2021-07-16 03:26:41 -03:00
return ;
}
}
cifs_dbg ( FYI , " %s: no cached or matched targets. mark dfs share for reconnect. \n " , __func__ ) ;
2023-01-17 19:00:40 -03:00
cifs_signal_cifsd_for_reconnect ( server , true ) ;
2021-07-16 03:26:41 -03:00
}
/* Refresh dfs referral of tcon and mark it for reconnect if needed */
2022-12-13 01:23:16 -03:00
static int __refresh_tcon ( const char * path , struct cifs_tcon * tcon , bool force_refresh )
2021-07-16 03:26:41 -03:00
{
2023-01-17 19:00:40 -03:00
struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT ( old_tl ) ;
struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT ( new_tl ) ;
2022-12-13 01:23:16 -03:00
struct cifs_ses * ses = CIFS_DFS_ROOT_SES ( tcon - > ses ) ;
2022-12-13 11:30:30 -03:00
struct cifs_tcon * ipc = ses - > tcon_ipc ;
2021-07-16 03:26:41 -03:00
bool needs_refresh = false ;
2022-12-13 11:30:30 -03:00
struct cache_entry * ce ;
2021-07-16 03:26:41 -03:00
unsigned int xid ;
2022-12-13 11:30:30 -03:00
int rc = 0 ;
2021-07-16 03:26:41 -03:00
2022-12-13 01:23:16 -03:00
xid = get_xid ( ) ;
2021-07-16 03:26:41 -03:00
down_read ( & htable_rw_lock ) ;
ce = lookup_cache_entry ( path ) ;
needs_refresh = force_refresh | | IS_ERR ( ce ) | | cache_entry_expired ( ce ) ;
if ( ! IS_ERR ( ce ) ) {
2023-01-17 19:00:40 -03:00
rc = get_targets ( ce , & old_tl ) ;
cifs_dbg ( FYI , " %s: get_targets: %d \n " , __func__ , rc ) ;
2021-07-16 03:26:41 -03:00
}
up_read ( & htable_rw_lock ) ;
if ( ! needs_refresh ) {
rc = 0 ;
goto out ;
}
2022-12-13 11:30:30 -03:00
spin_lock ( & ipc - > tc_lock ) ;
2023-03-16 10:45:12 +00:00
if ( ipc - > status ! = TID_GOOD ) {
2022-12-13 11:30:30 -03:00
spin_unlock ( & ipc - > tc_lock ) ;
cifs_dbg ( FYI , " %s: skip cache refresh due to disconnected ipc \n " , __func__ ) ;
goto out ;
}
spin_unlock ( & ipc - > tc_lock ) ;
2023-01-17 19:00:40 -03:00
ce = cache_refresh_path ( xid , ses , path , true ) ;
if ( ! IS_ERR ( ce ) ) {
rc = get_targets ( ce , & new_tl ) ;
up_read ( & htable_rw_lock ) ;
cifs_dbg ( FYI , " %s: get_targets: %d \n " , __func__ , rc ) ;
mark_for_reconnect_if_needed ( tcon - > ses - > server , & old_tl , & new_tl ) ;
2021-07-16 03:26:41 -03:00
}
out :
2022-12-13 01:23:16 -03:00
free_xid ( xid ) ;
2023-01-17 19:00:40 -03:00
dfs_cache_free_tgts ( & old_tl ) ;
dfs_cache_free_tgts ( & new_tl ) ;
2021-07-16 03:26:41 -03:00
return rc ;
}
2022-12-13 01:23:16 -03:00
static int refresh_tcon ( struct cifs_tcon * tcon , bool force_refresh )
2021-11-03 13:53:29 -03:00
{
struct TCP_Server_Info * server = tcon - > ses - > server ;
mutex_lock ( & server - > refpath_lock ) ;
2022-12-13 01:23:16 -03:00
if ( server - > leaf_fullpath )
__refresh_tcon ( server - > leaf_fullpath + 1 , tcon , force_refresh ) ;
2021-11-03 13:53:29 -03:00
mutex_unlock ( & server - > refpath_lock ) ;
return 0 ;
}
2021-07-16 03:26:41 -03:00
/**
* dfs_cache_remount_fs - remount a DFS share
*
* Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not
* match any of the new targets , mark it for reconnect .
*
* @ cifs_sb : cifs superblock .
*
* Return zero if remounted , otherwise non - zero .
*/
int dfs_cache_remount_fs ( struct cifs_sb_info * cifs_sb )
{
struct cifs_tcon * tcon ;
2021-11-03 13:53:29 -03:00
struct TCP_Server_Info * server ;
2021-07-16 03:26:41 -03:00
if ( ! cifs_sb | | ! cifs_sb - > master_tlink )
return - EINVAL ;
tcon = cifs_sb_master_tcon ( cifs_sb ) ;
2021-11-03 13:53:29 -03:00
server = tcon - > ses - > server ;
if ( ! server - > origin_fullpath ) {
cifs_dbg ( FYI , " %s: not a dfs mount \n " , __func__ ) ;
2021-07-16 03:26:41 -03:00
return 0 ;
}
/*
* After reconnecting to a different server , unique ids won ' t match anymore , so we disable
* serverino . This prevents dentry revalidation to think the dentry are stale ( ESTALE ) .
*/
cifs_autodisable_serverino ( cifs_sb ) ;
/*
* Force the use of prefix path to support failover on DFS paths that resolve to targets
* that have different prefix paths .
*/
cifs_sb - > mnt_cifs_flags | = CIFS_MOUNT_USE_PREFIX_PATH ;
2022-12-13 01:23:16 -03:00
return refresh_tcon ( tcon , true ) ;
2021-07-16 03:26:41 -03:00
}
2021-06-04 19:25:29 -03:00
/*
2022-12-13 01:23:16 -03:00
* Worker that will refresh DFS cache from all active mounts based on lowest TTL value
* from a DFS referral .
2021-06-04 19:25:29 -03:00
*/
2022-12-13 01:23:16 -03:00
static void refresh_cache_worker ( struct work_struct * work )
2018-11-14 16:01:21 -02:00
{
2021-06-04 19:25:29 -03:00
struct TCP_Server_Info * server ;
struct cifs_tcon * tcon , * ntcon ;
struct list_head tcons ;
2022-12-13 11:30:30 -03:00
struct cifs_ses * ses ;
2018-11-14 16:01:21 -02:00
2021-06-04 19:25:29 -03:00
INIT_LIST_HEAD ( & tcons ) ;
2018-11-14 16:01:21 -02:00
spin_lock ( & cifs_tcp_ses_lock ) ;
2021-06-04 19:25:29 -03:00
list_for_each_entry ( server , & cifs_tcp_ses_list , tcp_ses_list ) {
2022-11-17 13:23:49 -03:00
if ( ! server - > leaf_fullpath )
2021-11-03 13:53:29 -03:00
continue ;
2021-06-04 19:25:29 -03:00
list_for_each_entry ( ses , & server - > smb_ses_list , smb_ses_list ) {
2022-12-13 11:30:30 -03:00
if ( ses - > tcon_ipc ) {
ses - > ses_count + + ;
list_add_tail ( & ses - > tcon_ipc - > ulist , & tcons ) ;
2022-12-13 01:23:16 -03:00
}
2021-06-04 19:25:29 -03:00
list_for_each_entry ( tcon , & ses - > tcon_list , tcon_list ) {
2022-12-13 11:30:30 -03:00
if ( ! tcon - > ipc ) {
2021-06-04 19:25:29 -03:00
tcon - > tc_count + + ;
list_add_tail ( & tcon - > ulist , & tcons ) ;
}
2018-11-14 16:01:21 -02:00
}
}
}
spin_unlock ( & cifs_tcp_ses_lock ) ;
2019-03-19 16:54:29 -03:00
2021-06-04 19:25:29 -03:00
list_for_each_entry_safe ( tcon , ntcon , & tcons , ulist ) {
2021-11-03 13:53:29 -03:00
struct TCP_Server_Info * server = tcon - > ses - > server ;
2021-06-04 19:25:29 -03:00
list_del_init ( & tcon - > ulist ) ;
2021-11-03 13:53:29 -03:00
mutex_lock ( & server - > refpath_lock ) ;
2022-11-17 13:23:49 -03:00
if ( server - > leaf_fullpath )
2022-12-13 01:23:16 -03:00
__refresh_tcon ( server - > leaf_fullpath + 1 , tcon , false ) ;
2021-11-03 13:53:29 -03:00
mutex_unlock ( & server - > refpath_lock ) ;
2022-12-13 11:30:30 -03:00
if ( tcon - > ipc )
cifs_put_smb_ses ( tcon - > ses ) ;
else
cifs_put_tcon ( tcon ) ;
2019-12-04 17:38:00 -03:00
}
2019-12-04 17:38:02 -03:00
spin_lock ( & cache_ttl_lock ) ;
2019-12-04 17:37:58 -03:00
queue_delayed_work ( dfscache_wq , & refresh_task , cache_ttl * HZ ) ;
2019-12-04 17:38:02 -03:00
spin_unlock ( & cache_ttl_lock ) ;
2018-11-14 16:01:21 -02:00
}