2011-11-03 21:37:24 +04:00
# KCC topology utilities
#
# Copyright (C) Dave Craft 2011
2011-12-08 23:20:03 +04:00
# Copyright (C) Jelmer Vernooij 2011
2015-03-13 06:40:11 +03:00
# Copyright (C) Andrew Bartlett 2015
#
# Andrew Bartlett's alleged work performed by his underlings Douglas
# Bagnall and Garming Sam.
2011-11-03 21:37:24 +04:00
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
2011-12-08 23:20:03 +04:00
import ldb
2011-11-03 21:37:24 +04:00
import uuid
2012-01-11 18:11:35 +04:00
import time
2015-03-11 03:53:38 +03:00
import sys
import itertools
2011-11-03 21:37:24 +04:00
2012-03-04 04:05:23 +04:00
from samba import dsdb , unix2nttime
from samba . dcerpc import (
drsblobs ,
drsuapi ,
misc ,
)
2011-12-08 23:20:03 +04:00
from samba . common import dsdb_Dn
2012-03-04 04:05:23 +04:00
from samba . ndr import ( ndr_unpack , ndr_pack )
2011-11-03 21:37:24 +04:00
2015-03-06 05:28:29 +03:00
#colours for prettier logs
C_NORMAL = " \033 [00m "
DARK_RED = " \033 [00;31m "
RED = " \033 [01;31m "
DARK_GREEN = " \033 [00;32m "
GREEN = " \033 [01;32m "
YELLOW = " \033 [01;33m "
DARK_YELLOW = " \033 [00;33m "
DARK_BLUE = " \033 [00;34m "
BLUE = " \033 [01;34m "
PURPLE = " \033 [00;35m "
MAGENTA = " \033 [01;35m "
DARK_CYAN = " \033 [00;36m "
CYAN = " \033 [01;36m "
GREY = " \033 [00;37m "
WHITE = " \033 [01;37m "
REV_RED = " \033 [01;41m "
2015-03-06 05:17:24 +03:00
class KCCError ( Exception ) :
pass
2011-12-08 23:20:03 +04:00
class NCType ( object ) :
2011-11-03 21:37:24 +04:00
( unknown , schema , domain , config , application ) = range ( 0 , 5 )
2011-12-08 23:20:03 +04:00
class NamingContext ( object ) :
""" Base class for a naming context.
Holds the DN , GUID , SID ( if available ) and type of the DN .
Subclasses may inherit from this and specialize
2011-11-03 21:37:24 +04:00
"""
2012-01-11 18:11:35 +04:00
def __init__ ( self , nc_dnstr ) :
2011-11-03 21:37:24 +04:00
""" Instantiate a NamingContext
2011-12-08 23:20:03 +04:00
: param nc_dnstr : NC dn string
2011-11-03 21:37:24 +04:00
"""
2011-12-08 23:20:03 +04:00
self . nc_dnstr = nc_dnstr
2012-03-04 04:05:23 +04:00
self . nc_guid = None
self . nc_sid = None
self . nc_type = NCType . unknown
2011-11-03 21:37:24 +04:00
def __str__ ( self ) :
''' Debug dump string output of class '''
2015-03-06 05:28:29 +03:00
text = " %s %s %s : " % ( CYAN , self . __class__ . __name__ , C_NORMAL )
2011-12-04 21:08:56 +04:00
text = text + " \n \t nc_dnstr= %s " % self . nc_dnstr
text = text + " \n \t nc_guid= %s " % str ( self . nc_guid )
2012-01-11 18:11:35 +04:00
if self . nc_sid is None :
text = text + " \n \t nc_sid=<absent> "
else :
text = text + " \n \t nc_sid=<present> "
2011-12-04 21:08:56 +04:00
text = text + " \n \t nc_type= %s " % self . nc_type
return text
2011-11-03 21:37:24 +04:00
2012-01-11 18:11:35 +04:00
def load_nc ( self , samdb ) :
attrs = [ " objectGUID " ,
" objectSid " ]
try :
res = samdb . search ( base = self . nc_dnstr ,
scope = ldb . SCOPE_BASE , attrs = attrs )
except ldb . LdbError , ( enum , estr ) :
2015-01-22 23:43:55 +03:00
raise Exception ( " Unable to find naming context ( %s ) - ( %s ) " %
2012-01-11 18:11:35 +04:00
( self . nc_dnstr , estr ) )
msg = res [ 0 ]
if " objectGUID " in msg :
self . nc_guid = misc . GUID ( samdb . schema_format_value ( " objectGUID " ,
msg [ " objectGUID " ] [ 0 ] ) )
if " objectSid " in msg :
self . nc_sid = msg [ " objectSid " ] [ 0 ]
assert self . nc_guid is not None
2011-11-03 21:37:24 +04:00
def is_schema ( self ) :
''' Return True if NC is schema '''
2012-01-11 18:11:35 +04:00
assert self . nc_type != NCType . unknown
2011-11-03 21:37:24 +04:00
return self . nc_type == NCType . schema
def is_domain ( self ) :
''' Return True if NC is domain '''
2012-01-11 18:11:35 +04:00
assert self . nc_type != NCType . unknown
2011-11-03 21:37:24 +04:00
return self . nc_type == NCType . domain
def is_application ( self ) :
''' Return True if NC is application '''
2012-01-11 18:11:35 +04:00
assert self . nc_type != NCType . unknown
2011-11-03 21:37:24 +04:00
return self . nc_type == NCType . application
def is_config ( self ) :
''' Return True if NC is config '''
2012-01-11 18:11:35 +04:00
assert self . nc_type != NCType . unknown
2011-11-03 21:37:24 +04:00
return self . nc_type == NCType . config
def identify_by_basedn ( self , samdb ) :
""" Given an NC object, identify what type is is thru
the samdb basedn strings and NC sid value
"""
2012-01-11 18:11:35 +04:00
# Invoke loader to initialize guid and more
# importantly sid value (sid is used to identify
# domain NCs)
if self . nc_guid is None :
self . load_nc ( samdb )
2011-11-03 21:37:24 +04:00
# We check against schema and config because they
# will be the same for all nTDSDSAs in the forest.
# That leaves the domain NCs which can be identified
# by sid and application NCs as the last identified
if self . nc_dnstr == str ( samdb . get_schema_basedn ( ) ) :
self . nc_type = NCType . schema
elif self . nc_dnstr == str ( samdb . get_config_basedn ( ) ) :
self . nc_type = NCType . config
2011-12-04 21:08:56 +04:00
elif self . nc_sid is not None :
2011-11-03 21:37:24 +04:00
self . nc_type = NCType . domain
else :
self . nc_type = NCType . application
def identify_by_dsa_attr ( self , samdb , attr ) :
""" Given an NC which has been discovered thru the
2011-12-08 23:20:03 +04:00
nTDSDSA database object , determine what type of NC
it is ( i . e . schema , config , domain , application ) via
the use of the schema attribute under which the NC
was found .
: param attr : attr of nTDSDSA object where NC DN appears
2011-11-03 21:37:24 +04:00
"""
# If the NC is listed under msDS-HasDomainNCs then
# this can only be a domain NC and it is our default
# domain for this dsa
if attr == " msDS-HasDomainNCs " :
self . nc_type = NCType . domain
# If the NC is listed under hasPartialReplicaNCs
# this is only a domain NC
elif attr == " hasPartialReplicaNCs " :
self . nc_type = NCType . domain
# NCs listed under hasMasterNCs are either
# default domain, schema, or config. We
2012-01-11 18:11:35 +04:00
# utilize the identify_by_basedn() to
2011-11-03 21:37:24 +04:00
# identify those
elif attr == " hasMasterNCs " :
self . identify_by_basedn ( samdb )
# Still unknown (unlikely) but for completeness
# and for finally identifying application NCs
if self . nc_type == NCType . unknown :
self . identify_by_basedn ( samdb )
class NCReplica ( NamingContext ) :
2011-12-08 23:20:03 +04:00
""" Naming context replica that is relative to a specific DSA.
This is a more specific form of NamingContext class ( inheriting from that
class ) and it identifies unique attributes of the DSA ' s replica for a NC.
2011-11-03 21:37:24 +04:00
"""
2012-01-11 18:11:35 +04:00
def __init__ ( self , dsa_dnstr , dsa_guid , nc_dnstr ) :
2011-11-03 21:37:24 +04:00
""" Instantiate a Naming Context Replica
2011-12-08 23:20:03 +04:00
: param dsa_guid : GUID of DSA where replica appears
: param nc_dnstr : NC dn string
2011-11-03 21:37:24 +04:00
"""
2011-12-08 23:20:03 +04:00
self . rep_dsa_dnstr = dsa_dnstr
self . rep_dsa_guid = dsa_guid
self . rep_default = False # replica for DSA's default domain
self . rep_partial = False
self . rep_ro = False
self . rep_instantiated_flags = 0
2011-12-04 21:08:56 +04:00
2012-01-11 18:11:35 +04:00
self . rep_fsmo_role_owner = None
2011-12-04 21:08:56 +04:00
# RepsFromTo tuples
self . rep_repsFrom = [ ]
2011-11-03 21:37:24 +04:00
# The (is present) test is a combination of being
# enumerated in (hasMasterNCs or msDS-hasFullReplicaNCs or
# hasPartialReplicaNCs) as well as its replica flags found
# thru the msDS-HasInstantiatedNCs. If the NC replica meets
# the first enumeration test then this flag is set true
self . rep_present_criteria_one = False
# Call my super class we inherited from
2012-01-11 18:11:35 +04:00
NamingContext . __init__ ( self , nc_dnstr )
2011-11-03 21:37:24 +04:00
def __str__ ( self ) :
''' Debug dump string output of class '''
2011-12-04 21:08:56 +04:00
text = " %s : " % self . __class__ . __name__
2012-01-11 18:11:35 +04:00
text = text + " \n \t dsa_dnstr= %s " % self . rep_dsa_dnstr
text = text + " \n \t dsa_guid= %s " % str ( self . rep_dsa_guid )
text = text + " \n \t default= %s " % self . rep_default
text = text + " \n \t ro= %s " % self . rep_ro
text = text + " \n \t partial= %s " % self . rep_partial
text = text + " \n \t present= %s " % self . is_present ( )
text = text + " \n \t fsmo_role_owner= %s " % self . rep_fsmo_role_owner
2011-12-04 21:08:56 +04:00
for rep in self . rep_repsFrom :
text = text + " \n %s " % rep
return " %s \n %s " % ( NamingContext . __str__ ( self ) , text )
def set_instantiated_flags ( self , flags = None ) :
''' Set or clear NC replica instantiated flags '''
2012-09-27 20:30:47 +04:00
if flags is None :
2011-12-04 21:08:56 +04:00
self . rep_instantiated_flags = 0
2011-11-03 21:37:24 +04:00
else :
2011-12-04 21:08:56 +04:00
self . rep_instantiated_flags = flags
2011-11-03 21:37:24 +04:00
def identify_by_dsa_attr ( self , samdb , attr ) :
""" Given an NC which has been discovered thru the
2011-12-08 23:20:03 +04:00
nTDSDSA database object , determine what type of NC
replica it is ( i . e . partial , read only , default )
: param attr : attr of nTDSDSA object where NC DN appears
2011-11-03 21:37:24 +04:00
"""
# If the NC was found under hasPartialReplicaNCs
# then a partial replica at this dsa
if attr == " hasPartialReplicaNCs " :
self . rep_partial = True
self . rep_present_criteria_one = True
# If the NC is listed under msDS-HasDomainNCs then
# this can only be a domain NC and it is the DSA's
# default domain NC
elif attr == " msDS-HasDomainNCs " :
self . rep_default = True
# NCs listed under hasMasterNCs are either
# default domain, schema, or config. We check
# against schema and config because they will be
# the same for all nTDSDSAs in the forest. That
# leaves the default domain NC remaining which
# may be different for each nTDSDSAs (and thus
# we don't compare agains this samdb's default
# basedn
elif attr == " hasMasterNCs " :
self . rep_present_criteria_one = True
if self . nc_dnstr != str ( samdb . get_schema_basedn ( ) ) and \
self . nc_dnstr != str ( samdb . get_config_basedn ( ) ) :
self . rep_default = True
# RODC only
elif attr == " msDS-hasFullReplicaNCs " :
self . rep_present_criteria_one = True
self . rep_ro = True
# Not RODC
elif attr == " msDS-hasMasterNCs " :
2015-03-12 04:44:34 +03:00
self . rep_present_criteria_one = True
2011-11-03 21:37:24 +04:00
self . rep_ro = False
# Now use this DSA attribute to identify the naming
# context type by calling the super class method
# of the same name
NamingContext . identify_by_dsa_attr ( self , samdb , attr )
def is_default ( self ) :
2011-12-08 23:20:03 +04:00
""" Whether this is a default domain for the dsa that this NC appears on
2011-11-03 21:37:24 +04:00
"""
return self . rep_default
def is_ro ( self ) :
''' Return True if NC replica is read only '''
return self . rep_ro
def is_partial ( self ) :
''' Return True if NC replica is partial '''
return self . rep_partial
def is_present ( self ) :
""" Given an NC replica which has been discovered thru the
2011-12-08 23:20:03 +04:00
nTDSDSA database object and populated with replica flags
from the msDS - HasInstantiatedNCs ; return whether the NC
replica is present ( true ) or if the IT_NC_GOING flag is
set then the NC replica is not present ( false )
2011-11-03 21:37:24 +04:00
"""
if self . rep_present_criteria_one and \
2011-12-04 21:08:56 +04:00
self . rep_instantiated_flags & dsdb . INSTANCE_TYPE_NC_GOING == 0 :
2011-11-03 21:37:24 +04:00
return True
return False
2011-12-04 21:08:56 +04:00
def load_repsFrom ( self , samdb ) :
""" Given an NC replica which has been discovered thru the nTDSDSA
2011-12-08 23:20:03 +04:00
database object , load the repsFrom attribute for the local replica .
held by my dsa . The repsFrom attribute is not replicated so this
attribute is relative only to the local DSA that the samdb exists on
2011-12-04 21:08:56 +04:00
"""
try :
res = samdb . search ( base = self . nc_dnstr , scope = ldb . SCOPE_BASE ,
attrs = [ " repsFrom " ] )
except ldb . LdbError , ( enum , estr ) :
2011-12-08 23:20:03 +04:00
raise Exception ( " Unable to find NC for ( %s ) - ( %s ) " %
2011-12-04 21:08:56 +04:00
( self . nc_dnstr , estr ) )
msg = res [ 0 ]
# Possibly no repsFrom if this is a singleton DC
if " repsFrom " in msg :
for value in msg [ " repsFrom " ] :
2011-12-08 23:20:03 +04:00
rep = RepsFromTo ( self . nc_dnstr ,
2011-12-04 21:08:56 +04:00
ndr_unpack ( drsblobs . repsFromToBlob , value ) )
self . rep_repsFrom . append ( rep )
2012-01-11 18:11:35 +04:00
def commit_repsFrom ( self , samdb , ro = False ) :
2011-12-04 21:08:56 +04:00
""" Commit repsFrom to the database """
# XXX - This is not truly correct according to the MS-TECH
# docs. To commit a repsFrom we should be using RPCs
# IDL_DRSReplicaAdd, IDL_DRSReplicaModify, and
# IDL_DRSReplicaDel to affect a repsFrom change.
#
# Those RPCs are missing in samba, so I'll have to
# implement them to get this to more accurately
# reflect the reference docs. As of right now this
# commit to the database will work as its what the
# older KCC also did
2011-12-08 23:20:03 +04:00
modify = False
2011-12-04 21:08:56 +04:00
newreps = [ ]
2012-01-11 18:11:35 +04:00
delreps = [ ]
2011-12-04 21:08:56 +04:00
for repsFrom in self . rep_repsFrom :
# Leave out any to be deleted from
2012-01-11 18:11:35 +04:00
# replacement list. Build a list
# of to be deleted reps which we will
# remove from rep_repsFrom list below
2012-09-27 20:30:47 +04:00
if repsFrom . to_be_deleted :
2012-01-11 18:11:35 +04:00
delreps . append ( repsFrom )
2011-12-04 21:08:56 +04:00
modify = True
continue
if repsFrom . is_modified ( ) :
2012-01-11 18:11:35 +04:00
repsFrom . set_unmodified ( )
2011-12-04 21:08:56 +04:00
modify = True
2012-01-11 18:11:35 +04:00
# current (unmodified) elements also get
# appended here but no changes will occur
# unless something is "to be modified" or
# "to be deleted"
2011-12-04 21:08:56 +04:00
newreps . append ( ndr_pack ( repsFrom . ndr_blob ) )
2012-01-11 18:11:35 +04:00
# Now delete these from our list of rep_repsFrom
for repsFrom in delreps :
self . rep_repsFrom . remove ( repsFrom )
delreps = [ ]
2011-12-04 21:08:56 +04:00
# Nothing to do if no reps have been modified or
2012-01-11 18:11:35 +04:00
# need to be deleted or input option has informed
# us to be "readonly" (ro). Leave database
# record "as is"
2012-09-27 20:30:47 +04:00
if not modify or ro :
2011-12-04 21:08:56 +04:00
return
2011-12-08 23:20:03 +04:00
m = ldb . Message ( )
2011-12-04 21:08:56 +04:00
m . dn = ldb . Dn ( samdb , self . nc_dnstr )
m [ " repsFrom " ] = \
ldb . MessageElement ( newreps , ldb . FLAG_MOD_REPLACE , " repsFrom " )
try :
samdb . modify ( m )
except ldb . LdbError , estr :
2011-12-08 23:20:03 +04:00
raise Exception ( " Could not set repsFrom for ( %s ) - ( %s ) " %
2015-01-22 23:43:55 +03:00
( self . nc_dnstr , estr ) )
2011-12-04 21:08:56 +04:00
2015-02-18 02:13:38 +03:00
def load_replUpToDateVector ( self , samdb ) :
""" Given an NC replica which has been discovered thru the nTDSDSA
database object , load the replUpToDateVector attribute for the local replica .
held by my dsa . The replUpToDateVector attribute is not replicated so this
attribute is relative only to the local DSA that the samdb exists on
"""
try :
res = samdb . search ( base = self . nc_dnstr , scope = ldb . SCOPE_BASE ,
attrs = [ " replUpToDateVector " ] )
except ldb . LdbError , ( enum , estr ) :
raise Exception ( " Unable to find NC for ( %s ) - ( %s ) " %
( self . nc_dnstr , estr ) )
msg = res [ 0 ]
# Possibly no replUpToDateVector if this is a singleton DC
if " replUpToDateVector " in msg :
value = msg [ " replUpToDateVector " ] [ 0 ]
replUpToDateVectorBlob = ndr_unpack ( drsblobs . replUpToDateVectorBlob , value )
if replUpToDateVectorBlob . version != 2 :
# Samba only generates version 2, and this runs locally
raise AttributeError ( " Unexpected replUpToDateVector version %d "
% replUpToDateVectorBlob . version )
self . rep_replUpToDateVector_cursors = replUpToDateVectorBlob . ctr . cursors
else :
self . rep_replUpToDateVector_cursors = [ ]
2012-01-11 18:11:35 +04:00
def dumpstr_to_be_deleted ( self ) :
2015-03-05 04:53:55 +03:00
return ' \n ' . join ( str ( x ) for x in self . rep_repsFrom if x . to_be_deleted )
2012-01-11 18:11:35 +04:00
def dumpstr_to_be_modified ( self ) :
2015-03-05 04:53:55 +03:00
return ' \n ' . join ( str ( x ) for x in self . rep_repsFrom if x . is_modified ( ) )
2012-01-11 18:11:35 +04:00
2011-12-04 21:08:56 +04:00
def load_fsmo_roles ( self , samdb ) :
2012-01-11 18:11:35 +04:00
""" Given an NC replica which has been discovered thru the nTDSDSA
database object , load the fSMORoleOwner attribute .
"""
try :
res = samdb . search ( base = self . nc_dnstr , scope = ldb . SCOPE_BASE ,
attrs = [ " fSMORoleOwner " ] )
except ldb . LdbError , ( enum , estr ) :
raise Exception ( " Unable to find NC for ( %s ) - ( %s ) " %
( self . nc_dnstr , estr ) )
msg = res [ 0 ]
# Possibly no fSMORoleOwner
if " fSMORoleOwner " in msg :
self . rep_fsmo_role_owner = msg [ " fSMORoleOwner " ]
2011-12-04 21:08:56 +04:00
def is_fsmo_role_owner ( self , dsa_dnstr ) :
2012-01-11 18:11:35 +04:00
if self . rep_fsmo_role_owner is not None and \
self . rep_fsmo_role_owner == dsa_dnstr :
return True
2011-12-04 21:08:56 +04:00
return False
2011-11-03 21:37:24 +04:00
2012-03-04 04:05:23 +04:00
2011-12-08 23:20:03 +04:00
class DirectoryServiceAgent ( object ) :
2011-11-03 21:37:24 +04:00
def __init__ ( self , dsa_dnstr ) :
2011-12-08 23:20:03 +04:00
""" Initialize DSA class.
Class is subsequently fully populated by calling the load_dsa ( ) method
: param dsa_dnstr : DN of the nTDSDSA
2011-11-03 21:37:24 +04:00
"""
2011-12-08 23:20:03 +04:00
self . dsa_dnstr = dsa_dnstr
self . dsa_guid = None
self . dsa_ivid = None
self . dsa_is_ro = False
2012-01-11 18:11:35 +04:00
self . dsa_is_istg = False
2011-12-08 23:20:03 +04:00
self . dsa_options = 0
self . dsa_behavior = 0
2011-11-03 21:37:24 +04:00
self . default_dnstr = None # default domain dn string for dsa
2011-12-04 21:08:56 +04:00
# NCReplicas for this dsa that are "present"
2011-11-03 21:37:24 +04:00
# Indexed by DN string of naming context
2011-12-04 21:08:56 +04:00
self . current_rep_table = { }
2011-11-03 21:37:24 +04:00
2011-12-04 21:08:56 +04:00
# NCReplicas for this dsa that "should be present"
# Indexed by DN string of naming context
self . needed_rep_table = { }
# NTDSConnections for this dsa. These are current
2012-01-11 18:11:35 +04:00
# valid connections that are committed or pending a commit
2011-12-04 21:08:56 +04:00
# in the database. Indexed by DN string of connection
2011-11-03 21:37:24 +04:00
self . connect_table = { }
2011-12-04 21:08:56 +04:00
2011-11-03 21:37:24 +04:00
def __str__ ( self ) :
''' Debug dump string output of class '''
2011-12-04 21:08:56 +04:00
text = " %s : " % self . __class__ . __name__
if self . dsa_dnstr is not None :
text = text + " \n \t dsa_dnstr= %s " % self . dsa_dnstr
if self . dsa_guid is not None :
text = text + " \n \t dsa_guid= %s " % str ( self . dsa_guid )
if self . dsa_ivid is not None :
text = text + " \n \t dsa_ivid= %s " % str ( self . dsa_ivid )
text = text + " \n \t ro= %s " % self . is_ro ( )
text = text + " \n \t gc= %s " % self . is_gc ( )
2012-01-11 18:11:35 +04:00
text = text + " \n \t istg= %s " % self . is_istg ( )
2011-12-04 21:08:56 +04:00
text = text + " \n current_replica_table: "
text = text + " \n %s " % self . dumpstr_current_replica_table ( )
text = text + " \n needed_replica_table: "
text = text + " \n %s " % self . dumpstr_needed_replica_table ( )
text = text + " \n connect_table: "
text = text + " \n %s " % self . dumpstr_connect_table ( )
return text
def get_current_replica ( self , nc_dnstr ) :
2015-03-05 07:26:53 +03:00
return self . current_rep_table . get ( nc_dnstr )
2012-01-11 18:11:35 +04:00
def is_istg ( self ) :
''' Returns True if dsa is intersite topology generator for it ' s site '''
# The KCC on an RODC always acts as an ISTG for itself
return self . dsa_is_istg or self . dsa_is_ro
2011-11-03 21:37:24 +04:00
def is_ro ( self ) :
''' Returns True if dsa a read only domain controller '''
return self . dsa_is_ro
def is_gc ( self ) :
''' Returns True if dsa hosts a global catalog '''
2011-12-04 21:08:56 +04:00
if ( self . options & dsdb . DS_NTDSDSA_OPT_IS_GC ) != 0 :
return True
return False
2011-11-03 21:37:24 +04:00
def is_minimum_behavior ( self , version ) :
2011-12-08 23:20:03 +04:00
""" Is dsa at minimum windows level greater than or equal to (version)
: param version : Windows version to test against
2014-04-21 16:32:48 +04:00
( e . g . DS_DOMAIN_FUNCTION_2008 )
2011-11-03 21:37:24 +04:00
"""
if self . dsa_behavior > = version :
return True
return False
2012-01-11 18:11:35 +04:00
def is_translate_ntdsconn_disabled ( self ) :
2011-12-08 23:20:03 +04:00
""" Whether this allows NTDSConnection translation in its options. """
2011-12-04 21:08:56 +04:00
if ( self . options & dsdb . DS_NTDSDSA_OPT_DISABLE_NTDSCONN_XLATE ) != 0 :
2012-01-11 18:11:35 +04:00
return True
return False
2011-12-04 21:08:56 +04:00
def get_rep_tables ( self ) :
""" Return DSA current and needed replica tables
"""
return self . current_rep_table , self . needed_rep_table
def get_parent_dnstr ( self ) :
2011-12-08 23:20:03 +04:00
""" Get the parent DN string of this object. """
2011-12-04 21:08:56 +04:00
head , sep , tail = self . dsa_dnstr . partition ( ' , ' )
return tail
2011-11-03 21:37:24 +04:00
def load_dsa ( self , samdb ) :
2011-12-08 23:20:03 +04:00
""" Load a DSA from the samdb.
2012-01-11 18:11:35 +04:00
2011-12-08 23:20:03 +04:00
Prior initialization has given us the DN of the DSA that we are to
load . This method initializes all other attributes , including loading
2012-01-11 18:11:35 +04:00
the NC replica table for this DSA .
2011-11-03 21:37:24 +04:00
"""
2011-12-08 23:20:03 +04:00
attrs = [ " objectGUID " ,
" invocationID " ,
" options " ,
" msDS-isRODC " ,
" msDS-Behavior-Version " ]
2011-11-03 21:37:24 +04:00
try :
res = samdb . search ( base = self . dsa_dnstr , scope = ldb . SCOPE_BASE ,
2012-01-11 18:11:35 +04:00
attrs = attrs )
2011-11-03 21:37:24 +04:00
except ldb . LdbError , ( enum , estr ) :
2011-12-08 23:20:03 +04:00
raise Exception ( " Unable to find nTDSDSA for ( %s ) - ( %s ) " %
2011-11-03 21:37:24 +04:00
( self . dsa_dnstr , estr ) )
msg = res [ 0 ]
2011-12-08 23:20:03 +04:00
self . dsa_guid = misc . GUID ( samdb . schema_format_value ( " objectGUID " ,
2011-11-03 21:37:24 +04:00
msg [ " objectGUID " ] [ 0 ] ) )
# RODCs don't originate changes and thus have no invocationId,
# therefore we must check for existence first
if " invocationId " in msg :
self . dsa_ivid = misc . GUID ( samdb . schema_format_value ( " objectGUID " ,
msg [ " invocationId " ] [ 0 ] ) )
2011-12-04 21:08:56 +04:00
if " options " in msg :
self . options = int ( msg [ " options " ] [ 0 ] )
2011-11-03 21:37:24 +04:00
if " msDS-isRODC " in msg and msg [ " msDS-isRODC " ] [ 0 ] == " TRUE " :
self . dsa_is_ro = True
else :
self . dsa_is_ro = False
if " msDS-Behavior-Version " in msg :
self . dsa_behavior = int ( msg [ ' msDS-Behavior-Version ' ] [ 0 ] )
# Load the NC replicas that are enumerated on this dsa
2011-12-04 21:08:56 +04:00
self . load_current_replica_table ( samdb )
2011-11-03 21:37:24 +04:00
# Load the nTDSConnection that are enumerated on this dsa
self . load_connection_table ( samdb )
2011-12-04 21:08:56 +04:00
def load_current_replica_table ( self , samdb ) :
2011-12-08 23:20:03 +04:00
""" Method to load the NC replica ' s listed for DSA object.
2012-01-11 18:11:35 +04:00
2011-12-08 23:20:03 +04:00
This method queries the samdb for ( hasMasterNCs , msDS - hasMasterNCs ,
hasPartialReplicaNCs , msDS - HasDomainNCs , msDS - hasFullReplicaNCs , and
msDS - HasInstantiatedNCs ) to determine complete list of NC replicas that
are enumerated for the DSA . Once a NC replica is loaded it is
identified ( schema , config , etc ) and the other replica attributes
2012-01-11 18:11:35 +04:00
( partial , ro , etc ) are determined .
2011-12-08 23:20:03 +04:00
: param samdb : database to query for DSA replica list
2011-11-03 21:37:24 +04:00
"""
2011-11-07 02:53:06 +04:00
ncattrs = [ # not RODC - default, config, schema (old style)
2011-11-03 21:37:24 +04:00
" hasMasterNCs " ,
# not RODC - default, config, schema, app NCs
" msDS-hasMasterNCs " ,
# domain NC partial replicas
2011-12-04 21:08:56 +04:00
" hasPartialReplicaNCs " ,
2011-11-03 21:37:24 +04:00
# default domain NC
" msDS-HasDomainNCs " ,
# RODC only - default, config, schema, app NCs
" msDS-hasFullReplicaNCs " ,
# Identifies if replica is coming, going, or stable
" msDS-HasInstantiatedNCs " ]
try :
res = samdb . search ( base = self . dsa_dnstr , scope = ldb . SCOPE_BASE ,
2012-01-11 18:11:35 +04:00
attrs = ncattrs )
2011-11-03 21:37:24 +04:00
except ldb . LdbError , ( enum , estr ) :
2011-12-08 23:20:03 +04:00
raise Exception ( " Unable to find nTDSDSA NCs for ( %s ) - ( %s ) " %
2011-11-03 21:37:24 +04:00
( self . dsa_dnstr , estr ) )
# The table of NCs for the dsa we are searching
tmp_table = { }
# We should get one response to our query here for
# the ntds that we requested
if len ( res [ 0 ] ) > 0 :
# Our response will contain a number of elements including
# the dn of the dsa as well as elements for each
# attribute (e.g. hasMasterNCs). Each of these elements
# is a dictonary list which we retrieve the keys for and
# then iterate over them
for k in res [ 0 ] . keys ( ) :
if k == " dn " :
continue
# For each attribute type there will be one or more DNs
# listed. For instance DCs normally have 3 hasMasterNCs
# listed.
for value in res [ 0 ] [ k ] :
# Turn dn into a dsdb_Dn so we can use
2012-01-11 18:11:35 +04:00
# its methods to parse a binary DN
2012-03-04 04:05:23 +04:00
dsdn = dsdb_Dn ( samdb , value )
2011-11-03 21:37:24 +04:00
flags = dsdn . get_binary_integer ( )
dnstr = str ( dsdn . dn )
2015-03-06 05:32:27 +03:00
if not dnstr in tmp_table :
2012-01-11 18:11:35 +04:00
rep = NCReplica ( self . dsa_dnstr , self . dsa_guid , dnstr )
2011-11-03 21:37:24 +04:00
tmp_table [ dnstr ] = rep
else :
rep = tmp_table [ dnstr ]
if k == " msDS-HasInstantiatedNCs " :
2011-12-04 21:08:56 +04:00
rep . set_instantiated_flags ( flags )
2011-11-03 21:37:24 +04:00
continue
rep . identify_by_dsa_attr ( samdb , k )
# if we've identified the default domain NC
# then save its DN string
if rep . is_default ( ) :
self . default_dnstr = dnstr
else :
raise Exception ( " No nTDSDSA NCs for ( %s ) " % self . dsa_dnstr )
# Assign our newly built NC replica table to this dsa
2011-12-04 21:08:56 +04:00
self . current_rep_table = tmp_table
def add_needed_replica ( self , rep ) :
""" Method to add a NC replica that " should be present " to the
2015-03-05 04:53:55 +03:00
needed_rep_table .
2011-12-04 21:08:56 +04:00
"""
2015-03-05 04:53:55 +03:00
self . needed_rep_table [ rep . nc_dnstr ] = rep
2011-12-04 21:08:56 +04:00
2011-11-03 21:37:24 +04:00
def load_connection_table ( self , samdb ) :
""" Method to load the nTDSConnections listed for DSA object.
2011-12-08 23:20:03 +04:00
: param samdb : database to query for DSA connection list
2011-11-03 21:37:24 +04:00
"""
try :
res = samdb . search ( base = self . dsa_dnstr ,
scope = ldb . SCOPE_SUBTREE ,
expression = " (objectClass=nTDSConnection) " )
except ldb . LdbError , ( enum , estr ) :
2011-12-08 23:20:03 +04:00
raise Exception ( " Unable to find nTDSConnection for ( %s ) - ( %s ) " %
2011-11-03 21:37:24 +04:00
( self . dsa_dnstr , estr ) )
for msg in res :
dnstr = str ( msg . dn )
# already loaded
if dnstr in self . connect_table . keys ( ) :
continue
connect = NTDSConnection ( dnstr )
connect . load_connection ( samdb )
self . connect_table [ dnstr ] = connect
2012-01-11 18:11:35 +04:00
def commit_connections ( self , samdb , ro = False ) :
2011-11-03 21:37:24 +04:00
""" Method to commit any uncommitted nTDSConnections
2012-01-11 18:11:35 +04:00
modifications that are in our table . These would be
identified connections that are marked to be added or
deleted
: param samdb : database to commit DSA connection list to
: param ro : if ( true ) then peform internal operations but
do not write to the database ( readonly )
2011-11-03 21:37:24 +04:00
"""
2012-01-11 18:11:35 +04:00
delconn = [ ]
2011-11-03 21:37:24 +04:00
for dnstr , connect in self . connect_table . items ( ) :
2012-01-11 18:11:35 +04:00
if connect . to_be_added :
connect . commit_added ( samdb , ro )
if connect . to_be_modified :
connect . commit_modified ( samdb , ro )
if connect . to_be_deleted :
connect . commit_deleted ( samdb , ro )
delconn . append ( dnstr )
# Now delete the connection from the table
for dnstr in delconn :
del self . connect_table [ dnstr ]
2011-12-04 21:08:56 +04:00
def add_connection ( self , dnstr , connect ) :
2012-01-11 18:11:35 +04:00
assert dnstr not in self . connect_table . keys ( )
2011-11-03 21:37:24 +04:00
self . connect_table [ dnstr ] = connect
def get_connection_by_from_dnstr ( self , from_dnstr ) :
""" Scan DSA nTDSConnection table and return connection
2012-01-11 18:11:35 +04:00
with a " fromServer " dn string equivalent to method
input parameter .
: param from_dnstr : search for this from server entry
2011-11-03 21:37:24 +04:00
"""
for dnstr , connect in self . connect_table . items ( ) :
if connect . get_from_dnstr ( ) == from_dnstr :
return connect
return None
2011-12-04 21:08:56 +04:00
def dumpstr_current_replica_table ( self ) :
''' Debug dump string output of current replica table '''
2015-03-05 04:53:55 +03:00
return ' \n ' . join ( str ( x ) for x in self . current_rep_table )
2011-12-04 21:08:56 +04:00
def dumpstr_needed_replica_table ( self ) :
''' Debug dump string output of needed replica table '''
2015-03-05 04:53:55 +03:00
return ' \n ' . join ( str ( x ) for x in self . needed_rep_table )
2011-11-03 21:37:24 +04:00
def dumpstr_connect_table ( self ) :
''' Debug dump string output of connect table '''
2015-03-05 04:53:55 +03:00
return ' \n ' . join ( str ( x ) for x in self . connect_table )
2011-11-03 21:37:24 +04:00
2012-01-11 18:11:35 +04:00
def new_connection ( self , options , flags , transport , from_dnstr , sched ) :
""" Set up a new connection for the DSA based on input
parameters . Connection will be added to the DSA
connect_table and will be marked as " to be added " pending
a call to commit_connections ( )
"""
dnstr = " CN= %s , " % str ( uuid . uuid4 ( ) ) + self . dsa_dnstr
2012-03-04 04:05:23 +04:00
connect = NTDSConnection ( dnstr )
2012-01-11 18:11:35 +04:00
connect . to_be_added = True
2012-03-04 04:05:23 +04:00
connect . enabled = True
connect . from_dnstr = from_dnstr
connect . options = options
connect . flags = flags
2012-01-11 18:11:35 +04:00
if transport is not None :
connect . transport_dnstr = transport . dnstr
2015-02-23 05:06:41 +03:00
connect . transport_guid = transport . guid
2012-01-11 18:11:35 +04:00
if sched is not None :
connect . schedule = sched
else :
# Create schedule. Attribute valuse set according to MS-TECH
# intrasite connection creation document
connect . schedule = drsblobs . schedule ( )
connect . schedule . size = 188
connect . schedule . bandwidth = 0
connect . schedule . numberOfSchedules = 1
header = drsblobs . scheduleHeader ( )
header . type = 0
header . offset = 20
connect . schedule . headerArray = [ header ]
# 168 byte instances of the 0x01 value. The low order 4 bits
# of the byte equate to 15 minute intervals within a single hour.
# There are 168 bytes because there are 168 hours in a full week
# Effectively we are saying to perform replication at the end of
# each hour of the week
data = drsblobs . scheduleSlots ( )
data . slots = [ 0x01 ] * 168
connect . schedule . dataArray = [ data ]
self . add_connection ( dnstr , connect ) ;
return connect
2011-12-08 23:20:03 +04:00
class NTDSConnection ( object ) :
2011-11-03 21:37:24 +04:00
""" Class defines a nTDSConnection found under a DSA
"""
def __init__ ( self , dnstr ) :
2011-12-08 23:20:03 +04:00
self . dnstr = dnstr
2012-03-04 04:05:23 +04:00
self . guid = None
2011-12-08 23:20:03 +04:00
self . enabled = False
2012-01-11 18:11:35 +04:00
self . whenCreated = 0
2012-03-04 04:05:23 +04:00
self . to_be_added = False # new connection needs to be added
self . to_be_deleted = False # old connection needs to be deleted
2012-01-11 18:11:35 +04:00
self . to_be_modified = False
2011-12-08 23:20:03 +04:00
self . options = 0
2012-01-11 18:11:35 +04:00
self . system_flags = 0
2011-12-04 21:08:56 +04:00
self . transport_dnstr = None
2011-12-08 23:20:03 +04:00
self . transport_guid = None
self . from_dnstr = None
self . schedule = None
2011-11-03 21:37:24 +04:00
def __str__ ( self ) :
''' Debug dump string output of NTDSConnection object '''
2011-12-04 21:08:56 +04:00
text = " %s : \n \t dn= %s " % ( self . __class__ . __name__ , self . dnstr )
text = text + " \n \t enabled= %s " % self . enabled
2012-01-11 18:11:35 +04:00
text = text + " \n \t to_be_added= %s " % self . to_be_added
text = text + " \n \t to_be_deleted= %s " % self . to_be_deleted
text = text + " \n \t to_be_modified= %s " % self . to_be_modified
2011-12-04 21:08:56 +04:00
text = text + " \n \t options=0x %08X " % self . options
2012-01-11 18:11:35 +04:00
text = text + " \n \t system_flags=0x %08X " % self . system_flags
text = text + " \n \t whenCreated= %d " % self . whenCreated
2011-12-04 21:08:56 +04:00
text = text + " \n \t transport_dn= %s " % self . transport_dnstr
2012-01-11 18:11:35 +04:00
if self . guid is not None :
text = text + " \n \t guid= %s " % str ( self . guid )
2011-12-04 21:08:56 +04:00
if self . transport_guid is not None :
text = text + " \n \t transport_guid= %s " % str ( self . transport_guid )
text = text + " \n \t from_dn= %s " % self . from_dnstr
if self . schedule is not None :
text = text + " \n \t schedule.size= %s " % self . schedule . size
text = text + " \n \t schedule.bandwidth= %s " % self . schedule . bandwidth
text = text + " \n \t schedule.numberOfSchedules= %s " % \
self . schedule . numberOfSchedules
for i , header in enumerate ( self . schedule . headerArray ) :
text = text + " \n \t schedule.headerArray[ %d ].type= %d " % \
( i , header . type )
text = text + " \n \t schedule.headerArray[ %d ].offset= %d " % \
( i , header . offset )
text = text + " \n \t schedule.dataArray[ %d ].slots[ " % i
for slot in self . schedule . dataArray [ i ] . slots :
text = text + " 0x %X " % slot
text = text + " ] "
2011-11-03 21:37:24 +04:00
return text
def load_connection ( self , samdb ) :
""" Given a NTDSConnection object with an prior initialization
2012-01-11 18:11:35 +04:00
for the object ' s DN, search for the DN and load attributes
from the samdb .
2011-11-03 21:37:24 +04:00
"""
2011-11-07 02:53:06 +04:00
attrs = [ " options " ,
2011-11-03 21:37:24 +04:00
" enabledConnection " ,
" schedule " ,
2012-01-11 18:11:35 +04:00
" whenCreated " ,
" objectGUID " ,
2011-12-04 21:08:56 +04:00
" transportType " ,
2011-11-03 21:37:24 +04:00
" fromServer " ,
" systemFlags " ]
try :
res = samdb . search ( base = self . dnstr , scope = ldb . SCOPE_BASE ,
2012-01-11 18:11:35 +04:00
attrs = attrs )
2011-11-03 21:37:24 +04:00
except ldb . LdbError , ( enum , estr ) :
2011-12-08 23:20:03 +04:00
raise Exception ( " Unable to find nTDSConnection for ( %s ) - ( %s ) " %
2011-11-03 21:37:24 +04:00
( self . dnstr , estr ) )
msg = res [ 0 ]
if " options " in msg :
self . options = int ( msg [ " options " ] [ 0 ] )
2012-01-11 18:11:35 +04:00
2011-11-03 21:37:24 +04:00
if " enabledConnection " in msg :
if msg [ " enabledConnection " ] [ 0 ] . upper ( ) . lstrip ( ) . rstrip ( ) == " TRUE " :
self . enabled = True
2012-01-11 18:11:35 +04:00
2011-11-07 02:53:06 +04:00
if " systemFlags " in msg :
2012-01-11 18:11:35 +04:00
self . system_flags = int ( msg [ " systemFlags " ] [ 0 ] )
2011-12-04 21:08:56 +04:00
2012-01-11 18:11:35 +04:00
if " objectGUID " in msg :
self . guid = \
misc . GUID ( samdb . schema_format_value ( " objectGUID " ,
msg [ " objectGUID " ] [ 0 ] ) )
2011-12-04 21:08:56 +04:00
2012-01-11 18:11:35 +04:00
if " transportType " in msg :
2015-02-27 08:19:06 +03:00
dsdn = dsdb_Dn ( samdb , msg [ " transportType " ] [ 0 ] )
2014-04-21 16:43:51 +04:00
self . load_connection_transport ( samdb , str ( dsdn . dn ) )
2011-12-04 21:08:56 +04:00
2011-11-03 21:37:24 +04:00
if " schedule " in msg :
2015-01-22 23:43:55 +03:00
self . schedule = ndr_unpack ( drsblobs . schedule , msg [ " schedule " ] [ 0 ] )
2011-12-04 21:08:56 +04:00
2012-01-11 18:11:35 +04:00
if " whenCreated " in msg :
self . whenCreated = ldb . string_to_time ( msg [ " whenCreated " ] [ 0 ] )
2011-11-03 21:37:24 +04:00
if " fromServer " in msg :
dsdn = dsdb_Dn ( samdb , msg [ " fromServer " ] [ 0 ] )
self . from_dnstr = str ( dsdn . dn )
2011-12-04 21:08:56 +04:00
assert self . from_dnstr is not None
2011-11-03 21:37:24 +04:00
2014-04-21 16:43:51 +04:00
def load_connection_transport ( self , samdb , tdnstr ) :
2012-01-11 18:11:35 +04:00
""" Given a NTDSConnection object which enumerates a transport
DN , load the transport information for the connection object
: param tdnstr : transport DN to load
"""
attrs = [ " objectGUID " ]
try :
res = samdb . search ( base = tdnstr ,
scope = ldb . SCOPE_BASE , attrs = attrs )
2011-11-03 21:37:24 +04:00
2012-01-11 18:11:35 +04:00
except ldb . LdbError , ( enum , estr ) :
2015-01-22 23:43:55 +03:00
raise Exception ( " Unable to find transport ( %s ) - ( %s ) " %
2012-01-11 18:11:35 +04:00
( tdnstr , estr ) )
if " objectGUID " in res [ 0 ] :
2014-04-21 19:39:21 +04:00
msg = res [ 0 ]
2012-01-11 18:11:35 +04:00
self . transport_dnstr = tdnstr
2012-03-04 04:05:23 +04:00
self . transport_guid = \
2012-01-11 18:11:35 +04:00
misc . GUID ( samdb . schema_format_value ( " objectGUID " ,
msg [ " objectGUID " ] [ 0 ] ) )
assert self . transport_dnstr is not None
assert self . transport_guid is not None
def commit_deleted ( self , samdb , ro = False ) :
""" Local helper routine for commit_connections() which
handles committed connections that are to be deleted from
the database database
2011-11-03 21:37:24 +04:00
"""
2012-01-11 18:11:35 +04:00
assert self . to_be_deleted
self . to_be_deleted = False
# No database modification requested
2012-09-27 20:30:47 +04:00
if ro :
2012-01-11 18:11:35 +04:00
return
try :
samdb . delete ( self . dnstr )
except ldb . LdbError , ( enum , estr ) :
2012-09-27 20:30:47 +04:00
raise Exception ( " Could not delete nTDSConnection for ( %s ) - ( %s ) " %
2012-01-11 18:11:35 +04:00
( self . dnstr , estr ) )
def commit_added ( self , samdb , ro = False ) :
""" Local helper routine for commit_connections() which
handles committed connections that are to be added to the
database
"""
assert self . to_be_added
self . to_be_added = False
# No database modification requested
2012-09-27 20:30:47 +04:00
if ro :
2011-11-03 21:37:24 +04:00
return
2011-12-04 21:08:56 +04:00
# First verify we don't have this entry to ensure nothing
# is programatically amiss
2012-01-11 18:11:35 +04:00
found = False
2011-12-04 21:08:56 +04:00
try :
msg = samdb . search ( base = self . dnstr , scope = ldb . SCOPE_BASE )
2012-01-11 18:11:35 +04:00
if len ( msg ) != 0 :
found = True
2011-12-04 21:08:56 +04:00
except ldb . LdbError , ( enum , estr ) :
2012-01-11 18:11:35 +04:00
if enum != ldb . ERR_NO_SUCH_OBJECT :
2012-09-27 20:30:47 +04:00
raise Exception ( " Unable to search for ( %s ) - ( %s ) " %
2011-12-04 21:08:56 +04:00
( self . dnstr , estr ) )
if found :
2012-09-27 20:30:47 +04:00
raise Exception ( " nTDSConnection for ( %s ) already exists! " %
2012-01-11 18:11:35 +04:00
self . dnstr )
2011-12-04 21:08:56 +04:00
if self . enabled :
enablestr = " TRUE "
else :
enablestr = " FALSE "
# Prepare a message for adding to the samdb
2011-12-08 23:20:03 +04:00
m = ldb . Message ( )
2011-12-04 21:08:56 +04:00
m . dn = ldb . Dn ( samdb , self . dnstr )
m [ " objectClass " ] = \
2012-09-27 20:30:47 +04:00
ldb . MessageElement ( " nTDSConnection " , ldb . FLAG_MOD_ADD ,
2011-12-04 21:08:56 +04:00
" objectClass " )
m [ " showInAdvancedViewOnly " ] = \
2012-09-27 20:30:47 +04:00
ldb . MessageElement ( " TRUE " , ldb . FLAG_MOD_ADD ,
2011-12-04 21:08:56 +04:00
" showInAdvancedViewOnly " )
m [ " enabledConnection " ] = \
ldb . MessageElement ( enablestr , ldb . FLAG_MOD_ADD , " enabledConnection " )
m [ " fromServer " ] = \
ldb . MessageElement ( self . from_dnstr , ldb . FLAG_MOD_ADD , " fromServer " )
m [ " options " ] = \
ldb . MessageElement ( str ( self . options ) , ldb . FLAG_MOD_ADD , " options " )
m [ " systemFlags " ] = \
2012-09-27 20:30:47 +04:00
ldb . MessageElement ( str ( self . system_flags ) , ldb . FLAG_MOD_ADD ,
2012-01-11 18:11:35 +04:00
" systemFlags " )
if self . transport_dnstr is not None :
m [ " transportType " ] = \
2012-09-27 20:30:47 +04:00
ldb . MessageElement ( str ( self . transport_dnstr ) , ldb . FLAG_MOD_ADD ,
2012-01-11 18:11:35 +04:00
" transportType " )
2011-12-04 21:08:56 +04:00
if self . schedule is not None :
m [ " schedule " ] = \
ldb . MessageElement ( ndr_pack ( self . schedule ) ,
ldb . FLAG_MOD_ADD , " schedule " )
try :
samdb . add ( m )
except ldb . LdbError , ( enum , estr ) :
2012-09-27 20:30:47 +04:00
raise Exception ( " Could not add nTDSConnection for ( %s ) - ( %s ) " %
2011-12-04 21:08:56 +04:00
( self . dnstr , estr ) )
2012-01-11 18:11:35 +04:00
def commit_modified ( self , samdb , ro = False ) :
""" Local helper routine for commit_connections() which
handles committed connections that are to be modified to the
database
"""
assert self . to_be_modified
self . to_be_modified = False
# No database modification requested
2012-09-27 20:30:47 +04:00
if ro :
2012-01-11 18:11:35 +04:00
return
# First verify we have this entry to ensure nothing
# is programatically amiss
try :
msg = samdb . search ( base = self . dnstr , scope = ldb . SCOPE_BASE )
found = True
except ldb . LdbError , ( enum , estr ) :
if enum == ldb . ERR_NO_SUCH_OBJECT :
found = False
else :
2012-09-27 20:30:47 +04:00
raise Exception ( " Unable to search for ( %s ) - ( %s ) " %
2012-01-11 18:11:35 +04:00
( self . dnstr , estr ) )
2012-09-27 20:30:47 +04:00
if not found :
2012-03-04 04:05:23 +04:00
raise Exception ( " nTDSConnection for ( %s ) doesn ' t exist! " %
2012-01-11 18:11:35 +04:00
self . dnstr )
if self . enabled :
enablestr = " TRUE "
else :
enablestr = " FALSE "
# Prepare a message for modifying the samdb
m = ldb . Message ( )
m . dn = ldb . Dn ( samdb , self . dnstr )
m [ " enabledConnection " ] = \
2012-03-04 04:05:23 +04:00
ldb . MessageElement ( enablestr , ldb . FLAG_MOD_REPLACE ,
2012-01-11 18:11:35 +04:00
" enabledConnection " )
m [ " fromServer " ] = \
2012-03-04 04:05:23 +04:00
ldb . MessageElement ( self . from_dnstr , ldb . FLAG_MOD_REPLACE ,
2012-01-11 18:11:35 +04:00
" fromServer " )
m [ " options " ] = \
2012-03-04 04:05:23 +04:00
ldb . MessageElement ( str ( self . options ) , ldb . FLAG_MOD_REPLACE ,
2012-01-11 18:11:35 +04:00
" options " )
m [ " systemFlags " ] = \
2012-03-04 04:05:23 +04:00
ldb . MessageElement ( str ( self . system_flags ) , ldb . FLAG_MOD_REPLACE ,
2012-01-11 18:11:35 +04:00
" systemFlags " )
if self . transport_dnstr is not None :
m [ " transportType " ] = \
2012-03-04 04:05:23 +04:00
ldb . MessageElement ( str ( self . transport_dnstr ) ,
2012-01-11 18:11:35 +04:00
ldb . FLAG_MOD_REPLACE , " transportType " )
else :
m [ " transportType " ] = \
2012-03-04 04:05:23 +04:00
ldb . MessageElement ( [ ] , ldb . FLAG_MOD_DELETE , " transportType " )
2012-01-11 18:11:35 +04:00
if self . schedule is not None :
m [ " schedule " ] = \
2012-03-04 04:05:23 +04:00
ldb . MessageElement ( ndr_pack ( self . schedule ) ,
2012-01-11 18:11:35 +04:00
ldb . FLAG_MOD_REPLACE , " schedule " )
else :
m [ " schedule " ] = \
2012-03-04 04:05:23 +04:00
ldb . MessageElement ( [ ] , ldb . FLAG_MOD_DELETE , " schedule " )
2012-01-11 18:11:35 +04:00
try :
samdb . modify ( m )
except ldb . LdbError , ( enum , estr ) :
2012-03-04 04:05:23 +04:00
raise Exception ( " Could not modify nTDSConnection for ( %s ) - ( %s ) " %
2012-01-11 18:11:35 +04:00
( self . dnstr , estr ) )
def set_modified ( self , truefalse ) :
self . to_be_modified = truefalse
def set_added ( self , truefalse ) :
self . to_be_added = truefalse
def set_deleted ( self , truefalse ) :
self . to_be_deleted = truefalse
2011-11-03 21:37:24 +04:00
2011-12-04 21:08:56 +04:00
def is_schedule_minimum_once_per_week ( self ) :
""" Returns True if our schedule includes at least one
2012-01-11 18:11:35 +04:00
replication interval within the week . False otherwise
2011-12-04 21:08:56 +04:00
"""
if self . schedule is None or self . schedule . dataArray [ 0 ] is None :
return False
for slot in self . schedule . dataArray [ 0 ] . slots :
if ( slot & 0x0F ) != 0x0 :
return True
return False
2012-01-11 18:11:35 +04:00
def is_equivalent_schedule ( self , sched ) :
""" Returns True if our schedule is equivalent to the input
comparison schedule .
: param shed : schedule to compare to
"""
if self . schedule is not None :
if sched is None :
return False
elif sched is None :
return True
2012-03-04 04:05:23 +04:00
if ( self . schedule . size != sched . size or
self . schedule . bandwidth != sched . bandwidth or
self . schedule . numberOfSchedules != sched . numberOfSchedules ) :
2012-01-11 18:11:35 +04:00
return False
for i , header in enumerate ( self . schedule . headerArray ) :
if self . schedule . headerArray [ i ] . type != sched . headerArray [ i ] . type :
return False
if self . schedule . headerArray [ i ] . offset != \
sched . headerArray [ i ] . offset :
return False
2012-09-27 20:30:47 +04:00
for a , b in zip ( self . schedule . dataArray [ i ] . slots ,
2012-01-11 18:11:35 +04:00
sched . dataArray [ i ] . slots ) :
if a != b :
return False
return True
2011-12-04 21:08:56 +04:00
def convert_schedule_to_repltimes ( self ) :
""" Convert NTDS Connection schedule to replTime schedule.
2012-01-11 18:11:35 +04:00
NTDS Connection schedule slots are double the size of
the replTime slots but the top portion of the NTDS
Connection schedule slot ( 4 most significant bits in
uchar ) are unused . The 4 least significant bits have
the same ( 15 minute interval ) bit positions as replTimes .
We thus pack two elements of the NTDS Connection schedule
slots into one element of the replTimes slot
If no schedule appears in NTDS Connection then a default
of 0x11 is set in each replTimes slot as per behaviour
noted in a Windows DC . That default would cause replication
within the last 15 minutes of each hour .
2011-12-04 21:08:56 +04:00
"""
times = [ 0x11 ] * 84
for i , slot in enumerate ( times ) :
if self . schedule is not None and \
self . schedule . dataArray [ 0 ] is not None :
slot = ( self . schedule . dataArray [ 0 ] . slots [ i * 2 ] & 0xF ) << 4 | \
( self . schedule . dataArray [ 0 ] . slots [ i * 2 ] & 0xF )
return times
def is_rodc_topology ( self ) :
""" Returns True if NTDS Connection specifies RODC
2012-01-11 18:11:35 +04:00
topology only
2011-12-04 21:08:56 +04:00
"""
if self . options & dsdb . NTDSCONN_OPT_RODC_TOPOLOGY == 0 :
return False
return True
2012-01-11 18:11:35 +04:00
def is_generated ( self ) :
""" Returns True if NTDS Connection was generated by the
KCC topology algorithm as opposed to set by the administrator
"""
if self . options & dsdb . NTDSCONN_OPT_IS_GENERATED == 0 :
return False
return True
def is_override_notify_default ( self ) :
""" Returns True if NTDS Connection should override notify default
"""
if self . options & dsdb . NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT == 0 :
return False
return True
def is_use_notify ( self ) :
""" Returns True if NTDS Connection should use notify
"""
if self . options & dsdb . NTDSCONN_OPT_USE_NOTIFY == 0 :
return False
return True
def is_twoway_sync ( self ) :
""" Returns True if NTDS Connection should use twoway sync
"""
if self . options & dsdb . NTDSCONN_OPT_TWOWAY_SYNC == 0 :
return False
return True
def is_intersite_compression_disabled ( self ) :
""" Returns True if NTDS Connection intersite compression
is disabled
"""
if self . options & dsdb . NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION == 0 :
return False
return True
def is_user_owned_schedule ( self ) :
""" Returns True if NTDS Connection has a user owned schedule
"""
if self . options & dsdb . NTDSCONN_OPT_USER_OWNED_SCHEDULE == 0 :
return False
return True
2011-12-04 21:08:56 +04:00
def is_enabled ( self ) :
""" Returns True if NTDS Connection is enabled
"""
return self . enabled
2011-11-03 21:37:24 +04:00
def get_from_dnstr ( self ) :
''' Return fromServer dn string attribute '''
return self . from_dnstr
2011-12-08 23:20:03 +04:00
2011-11-03 21:37:24 +04:00
class Partition ( NamingContext ) :
2011-12-08 23:20:03 +04:00
""" A naming context discovered thru Partitions DN of the config schema.
This is a more specific form of NamingContext class ( inheriting from that
class ) and it identifies unique attributes enumerated in the Partitions
such as which nTDSDSAs are cross referenced for replicas
2011-11-03 21:37:24 +04:00
"""
def __init__ ( self , partstr ) :
2011-12-08 23:20:03 +04:00
self . partstr = partstr
2012-01-11 18:11:35 +04:00
self . enabled = True
self . system_flags = 0
2011-11-03 21:37:24 +04:00
self . rw_location_list = [ ]
self . ro_location_list = [ ]
# We don't have enough info to properly
# fill in the naming context yet. We'll get that
# fully set up with load_partition().
NamingContext . __init__ ( self , None )
def load_partition ( self , samdb ) :
2011-12-08 23:20:03 +04:00
""" Given a Partition class object that has been initialized with its
partition dn string , load the partition from the sam database , identify
the type of the partition ( schema , domain , etc ) and record the list of
nTDSDSAs that appear in the cross reference attributes
msDS - NC - Replica - Locations and msDS - NC - RO - Replica - Locations .
: param samdb : sam database to load partition from
2011-11-03 21:37:24 +04:00
"""
2011-11-07 02:53:06 +04:00
attrs = [ " nCName " ,
2012-01-11 18:11:35 +04:00
" Enabled " ,
" systemFlags " ,
2011-11-03 21:37:24 +04:00
" msDS-NC-Replica-Locations " ,
" msDS-NC-RO-Replica-Locations " ]
try :
res = samdb . search ( base = self . partstr , scope = ldb . SCOPE_BASE ,
2012-01-11 18:11:35 +04:00
attrs = attrs )
2011-11-03 21:37:24 +04:00
except ldb . LdbError , ( enum , estr ) :
raise Exception ( " Unable to find partition for ( %s ) - ( %s ) " % (
self . partstr , estr ) )
msg = res [ 0 ]
for k in msg . keys ( ) :
if k == " dn " :
continue
2012-01-11 18:11:35 +04:00
if k == " Enabled " :
if msg [ k ] [ 0 ] . upper ( ) . lstrip ( ) . rstrip ( ) == " TRUE " :
self . enabled = True
else :
self . enabled = False
continue
if k == " systemFlags " :
self . system_flags = int ( msg [ k ] [ 0 ] )
continue
2011-11-03 21:37:24 +04:00
for value in msg [ k ] :
2012-03-04 04:05:23 +04:00
dsdn = dsdb_Dn ( samdb , value )
2012-01-11 18:11:35 +04:00
dnstr = str ( dsdn . dn )
2011-11-03 21:37:24 +04:00
if k == " nCName " :
2012-01-11 18:11:35 +04:00
self . nc_dnstr = dnstr
2011-11-03 21:37:24 +04:00
continue
if k == " msDS-NC-Replica-Locations " :
2012-01-11 18:11:35 +04:00
self . rw_location_list . append ( dnstr )
2011-11-03 21:37:24 +04:00
continue
if k == " msDS-NC-RO-Replica-Locations " :
2012-01-11 18:11:35 +04:00
self . ro_location_list . append ( dnstr )
2011-11-03 21:37:24 +04:00
continue
# Now identify what type of NC this partition
# enumerated
self . identify_by_basedn ( samdb )
2012-01-11 18:11:35 +04:00
def is_enabled ( self ) :
""" Returns True if partition is enabled
"""
return self . is_enabled
def is_foreign ( self ) :
""" Returns True if this is not an Active Directory NC in our
forest but is instead something else ( e . g . a foreign NC )
"""
if ( self . system_flags & dsdb . SYSTEM_FLAG_CR_NTDS_NC ) == 0 :
return True
else :
return False
2011-11-03 21:37:24 +04:00
def should_be_present ( self , target_dsa ) :
""" Tests whether this partition should have an NC replica
2012-01-11 18:11:35 +04:00
on the target dsa . This method returns a tuple of
needed = True / False , ro = True / False , partial = True / False
: param target_dsa : should NC be present on target dsa
2011-11-03 21:37:24 +04:00
"""
2011-12-08 23:20:03 +04:00
needed = False
ro = False
2011-11-03 21:37:24 +04:00
partial = False
# If this is the config, schema, or default
# domain NC for the target dsa then it should
# be present
if self . nc_type == NCType . config or \
self . nc_type == NCType . schema or \
2011-12-08 23:20:03 +04:00
( self . nc_type == NCType . domain and
2011-11-03 21:37:24 +04:00
self . nc_dnstr == target_dsa . default_dnstr ) :
needed = True
# A writable replica of an application NC should be present
# if there a cross reference to the target DSA exists. Depending
# on whether the DSA is ro we examine which type of cross reference
# to look for (msDS-NC-Replica-Locations or
# msDS-NC-RO-Replica-Locations
if self . nc_type == NCType . application :
if target_dsa . is_ro ( ) :
if target_dsa . dsa_dnstr in self . ro_location_list :
needed = True
else :
if target_dsa . dsa_dnstr in self . rw_location_list :
needed = True
# If the target dsa is a gc then a partial replica of a
# domain NC (other than the DSAs default domain) should exist
# if there is also a cross reference for the DSA
if target_dsa . is_gc ( ) and \
self . nc_type == NCType . domain and \
self . nc_dnstr != target_dsa . default_dnstr and \
2011-12-08 23:20:03 +04:00
( target_dsa . dsa_dnstr in self . ro_location_list or
2011-11-03 21:37:24 +04:00
target_dsa . dsa_dnstr in self . rw_location_list ) :
2011-12-08 23:20:03 +04:00
needed = True
2011-11-03 21:37:24 +04:00
partial = True
# partial NCs are always readonly
if needed and ( target_dsa . is_ro ( ) or partial ) :
ro = True
return needed , ro , partial
def __str__ ( self ) :
''' Debug dump string output of class '''
text = " %s " % NamingContext . __str__ ( self )
text = text + " \n \t partdn= %s " % self . partstr
for k in self . rw_location_list :
text = text + " \n \t msDS-NC-Replica-Locations= %s " % k
for k in self . ro_location_list :
text = text + " \n \t msDS-NC-RO-Replica-Locations= %s " % k
return text
2011-12-08 23:20:03 +04:00
class Site ( object ) :
2012-01-11 18:11:35 +04:00
""" An individual site object discovered thru the configuration
naming context . Contains all DSAs that exist within the site
"""
2015-03-05 01:40:55 +03:00
def __init__ ( self , site_dnstr , unix_now ) :
2012-03-04 04:05:23 +04:00
self . site_dnstr = site_dnstr
2015-02-24 01:04:58 +03:00
self . site_guid = None
2012-03-04 04:05:23 +04:00
self . site_options = 0
2012-01-11 18:11:35 +04:00
self . site_topo_generator = None
2012-03-04 04:05:23 +04:00
self . site_topo_failover = 0 # appears to be in minutes
self . dsa_table = { }
2015-03-05 01:40:55 +03:00
self . unix_now = unix_now
2011-11-03 21:37:24 +04:00
def load_site ( self , samdb ) :
""" Loads the NTDS Site Settions options attribute for the site
2012-01-11 18:11:35 +04:00
as well as querying and loading all DSAs that appear within
the site .
2011-11-03 21:37:24 +04:00
"""
ssdn = " CN=NTDS Site Settings, %s " % self . site_dnstr
2012-01-11 18:11:35 +04:00
attrs = [ " options " ,
" interSiteTopologyFailover " ,
2015-02-24 04:13:52 +03:00
" interSiteTopologyGenerator " ]
2011-11-03 21:37:24 +04:00
try :
res = samdb . search ( base = ssdn , scope = ldb . SCOPE_BASE ,
2012-01-11 18:11:35 +04:00
attrs = attrs )
2015-02-24 04:13:52 +03:00
self_res = samdb . search ( base = self . site_dnstr , scope = ldb . SCOPE_BASE ,
attrs = [ ' objectGUID ' ] )
2011-11-03 21:37:24 +04:00
except ldb . LdbError , ( enum , estr ) :
2011-12-08 23:20:03 +04:00
raise Exception ( " Unable to find site settings for ( %s ) - ( %s ) " %
2011-11-03 21:37:24 +04:00
( ssdn , estr ) )
msg = res [ 0 ]
2011-11-07 02:53:06 +04:00
if " options " in msg :
2011-11-03 21:37:24 +04:00
self . site_options = int ( msg [ " options " ] [ 0 ] )
2011-12-04 21:08:56 +04:00
2012-01-11 18:11:35 +04:00
if " interSiteTopologyGenerator " in msg :
self . site_topo_generator = str ( msg [ " interSiteTopologyGenerator " ] [ 0 ] )
if " interSiteTopologyFailover " in msg :
self . site_topo_failover = int ( msg [ " interSiteTopologyFailover " ] [ 0 ] )
2015-02-24 04:13:52 +03:00
msg = self_res [ 0 ]
2015-02-24 01:04:58 +03:00
if " objectGUID " in msg :
self . site_guid = misc . GUID ( samdb . schema_format_value ( " objectGUID " ,
msg [ " objectGUID " ] [ 0 ] ) )
2011-12-04 21:08:56 +04:00
self . load_all_dsa ( samdb )
2011-11-03 21:37:24 +04:00
2011-12-04 21:08:56 +04:00
def load_all_dsa ( self , samdb ) :
""" Discover all nTDSDSA thru the sites entry and
2012-01-11 18:11:35 +04:00
instantiate and load the DSAs . Each dsa is inserted
into the dsa_table by dn string .
2011-12-04 21:08:56 +04:00
"""
try :
res = samdb . search ( self . site_dnstr ,
scope = ldb . SCOPE_SUBTREE ,
expression = " (objectClass=nTDSDSA) " )
except ldb . LdbError , ( enum , estr ) :
raise Exception ( " Unable to find nTDSDSAs - ( %s ) " % estr )
for msg in res :
dnstr = str ( msg . dn )
# already loaded
2015-03-06 05:32:27 +03:00
if dnstr in self . dsa_table :
2011-12-04 21:08:56 +04:00
continue
dsa = DirectoryServiceAgent ( dnstr )
dsa . load_dsa ( samdb )
# Assign this dsa to my dsa table
# and index by dsa dn
self . dsa_table [ dnstr ] = dsa
def get_dsa_by_guidstr ( self , guidstr ) :
for dsa in self . dsa_table . values ( ) :
if str ( dsa . dsa_guid ) == guidstr :
return dsa
return None
def get_dsa ( self , dnstr ) :
""" Return a previously loaded DSA object by consulting
2012-01-11 18:11:35 +04:00
the sites dsa_table for the provided DSA dn string
2011-12-08 23:20:03 +04:00
: return : None if DSA doesn ' t exist
2011-12-04 21:08:56 +04:00
"""
if dnstr in self . dsa_table . keys ( ) :
return self . dsa_table [ dnstr ]
return None
2011-11-03 21:37:24 +04:00
2012-01-11 18:11:35 +04:00
def select_istg ( self , samdb , mydsa , ro ) :
""" Determine if my DC should be an intersite topology
generator . If my DC is the istg and is both a writeable
DC and the database is opened in write mode then we perform
an originating update to set the interSiteTopologyGenerator
attribute in the NTDS Site Settings object . An RODC always
acts as an ISTG for itself .
"""
# The KCC on an RODC always acts as an ISTG for itself
if mydsa . dsa_is_ro :
mydsa . dsa_is_istg = True
return True
2015-03-06 05:17:24 +03:00
c_rep = get_dsa_config_rep ( mydsa )
2012-01-11 18:11:35 +04:00
2015-02-18 02:13:38 +03:00
# Load repsFrom and replUpToDateVector if not already loaded so we can get the current
2012-01-11 18:11:35 +04:00
# state of the config replica and whether we are getting updates
# from the istg
c_rep . load_repsFrom ( samdb )
2015-02-18 02:13:38 +03:00
c_rep . load_replUpToDateVector ( samdb )
# From MS-ADTS 6.2.2.3.1 ISTG selection:
2012-01-11 18:11:35 +04:00
# First, the KCC on a writable DC determines whether it acts
# as an ISTG for its site
#
# Let s be the object such that s!lDAPDisplayName = nTDSDSA
# and classSchema in s!objectClass.
#
# Let D be the sequence of objects o in the site of the local
# DC such that o!objectCategory = s. D is sorted in ascending
# order by objectGUID.
#
# Which is a fancy way of saying "sort all the nTDSDSA objects
# in the site by guid in ascending order". Place sorted list
# in D_sort[]
2015-03-06 07:59:14 +03:00
D_sort = sorted ( self . dsa_table . values ( ) , cmp = sort_dsa_by_guid )
2012-01-11 18:11:35 +04:00
2015-03-05 01:40:55 +03:00
ntnow = unix2nttime ( self . unix_now ) # double word number of 100 nanosecond
# intervals since 1600s
2012-01-11 18:11:35 +04:00
# Let f be the duration o!interSiteTopologyFailover seconds, or 2 hours
# if o!interSiteTopologyFailover is 0 or has no value.
#
# Note: lastSuccess and ntnow are in 100 nanosecond intervals
# so it appears we have to turn f into the same interval
#
# interSiteTopologyFailover (if set) appears to be in minutes
# so we'll need to convert to senconds and then 100 nanosecond
# intervals
2015-04-23 03:16:36 +03:00
# XXX [MS-ADTS] 6.2.2.3.1 says it is seconds, not minutes.
2012-01-11 18:11:35 +04:00
#
# 10,000,000 is number of 100 nanosecond intervals in a second
if self . site_topo_failover == 0 :
f = 2 * 60 * 60 * 10000000
else :
f = self . site_topo_failover * 60 * 10000000
2015-03-06 07:59:14 +03:00
# Let o be the site settings object for the site of the local
# DC, or NULL if no such o exists.
d_dsa = self . dsa_table . get ( self . site_topo_generator )
2015-02-18 02:13:38 +03:00
# From MS-ADTS 6.2.2.3.1 ISTG selection:
2012-01-11 18:11:35 +04:00
# If o != NULL and o!interSiteTopologyGenerator is not the
# nTDSDSA object for the local DC and
# o!interSiteTopologyGenerator is an element dj of sequence D:
#
if d_dsa is not None and d_dsa is not mydsa :
2015-02-18 02:13:38 +03:00
# From MS-ADTS 6.2.2.3.1 ISTG Selection:
2012-01-11 18:11:35 +04:00
# Let c be the cursor in the replUpToDateVector variable
# associated with the NC replica of the config NC such
# that c.uuidDsa = dj!invocationId. If no such c exists
# (No evidence of replication from current ITSG):
# Let i = j.
# Let t = 0.
#
# Else if the current time < c.timeLastSyncSuccess - f
# (Evidence of time sync problem on current ISTG):
# Let i = 0.
# Let t = 0.
#
# Else (Evidence of replication from current ITSG):
# Let i = j.
# Let t = c.timeLastSyncSuccess.
#
# last_success appears to be a double word containing
# number of 100 nanosecond intervals since the 1600s
2015-03-06 07:59:14 +03:00
j_idx = D_sort . index ( d_dsa )
2015-02-18 02:13:38 +03:00
found = False
for cursor in c_rep . rep_replUpToDateVector_cursors :
if d_dsa . dsa_ivid == cursor . source_dsa_invocation_id :
found = True
break
if not found :
i_idx = j_idx
t_time = 0
2012-01-11 18:11:35 +04:00
2015-04-23 03:16:36 +03:00
#XXX doc says current time < c.timeLastSyncSuccess - f
# which is true only if f is negative or clocks are wrong.
# f is not negative in the default case (2 hours).
2015-03-04 03:05:37 +03:00
elif ntnow - cursor . last_sync_success > f :
2012-03-04 04:05:23 +04:00
i_idx = 0
2012-01-11 18:11:35 +04:00
t_time = 0
else :
2012-03-04 04:05:23 +04:00
i_idx = j_idx
2015-02-18 02:13:38 +03:00
t_time = cursor . last_sync_success
2012-01-11 18:11:35 +04:00
# Otherwise (Nominate local DC as ISTG):
# Let i be the integer such that di is the nTDSDSA
# object for the local DC.
# Let t = the current time.
else :
2012-03-04 04:05:23 +04:00
i_idx = D_sort . index ( mydsa )
2012-01-11 18:11:35 +04:00
t_time = ntnow
# Compute a function that maintains the current ISTG if
# it is alive, cycles through other candidates if not.
#
# Let k be the integer (i + ((current time - t) /
# o!interSiteTopologyFailover)) MOD |D|.
#
# Note: We don't want to divide by zero here so they must
# have meant "f" instead of "o!interSiteTopologyFailover"
k_idx = ( i_idx + ( ( ntnow - t_time ) / f ) ) % len ( D_sort )
# The local writable DC acts as an ISTG for its site if and
# only if dk is the nTDSDSA object for the local DC. If the
# local DC does not act as an ISTG, the KCC skips the
# remainder of this task.
d_dsa = D_sort [ k_idx ]
d_dsa . dsa_is_istg = True
# Update if we are the ISTG, otherwise return
if d_dsa is not mydsa :
return False
# Nothing to do
if self . site_topo_generator == mydsa . dsa_dnstr :
return True
self . site_topo_generator = mydsa . dsa_dnstr
# If readonly database then do not perform a
# persistent update
2012-09-27 20:30:47 +04:00
if ro :
2012-01-11 18:11:35 +04:00
return True
# Perform update to the samdb
ssdn = " CN=NTDS Site Settings, %s " % self . site_dnstr
m = ldb . Message ( )
m . dn = ldb . Dn ( samdb , ssdn )
m [ " interSiteTopologyGenerator " ] = \
2012-03-04 04:05:23 +04:00
ldb . MessageElement ( mydsa . dsa_dnstr , ldb . FLAG_MOD_REPLACE ,
2012-01-11 18:11:35 +04:00
" interSiteTopologyGenerator " )
try :
samdb . modify ( m )
except ldb . LdbError , estr :
2012-03-04 04:05:23 +04:00
raise Exception (
" Could not set interSiteTopologyGenerator for ( %s ) - ( %s ) " %
( ssdn , estr ) )
2012-01-11 18:11:35 +04:00
return True
2011-11-03 21:37:24 +04:00
def is_intrasite_topology_disabled ( self ) :
2012-01-11 18:11:35 +04:00
''' Returns True if intra-site topology is disabled for site '''
2011-12-08 23:20:03 +04:00
if ( self . site_options &
2011-11-03 21:37:24 +04:00
dsdb . DS_NTDSSETTINGS_OPT_IS_AUTO_TOPOLOGY_DISABLED ) != 0 :
return True
return False
2012-01-11 18:11:35 +04:00
def is_intersite_topology_disabled ( self ) :
''' Returns True if inter-site topology is disabled for site '''
if ( self . site_options &
dsdb . DS_NTDSSETTINGS_OPT_IS_INTER_SITE_AUTO_TOPOLOGY_DISABLED ) != 0 :
return True
return False
def is_random_bridgehead_disabled ( self ) :
''' Returns True if selection of random bridgehead is disabled '''
2011-12-08 23:20:03 +04:00
if ( self . site_options &
2012-01-11 18:11:35 +04:00
dsdb . DS_NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED ) != 0 :
2011-11-03 21:37:24 +04:00
return True
return False
2012-01-11 18:11:35 +04:00
def is_detect_stale_disabled ( self ) :
''' Returns True if detect stale is disabled for site '''
if ( self . site_options &
dsdb . DS_NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED ) != 0 :
return True
return False
def is_cleanup_ntdsconn_disabled ( self ) :
''' Returns True if NTDS Connection cleanup is disabled for site '''
if ( self . site_options &
dsdb . DS_NTDSSETTINGS_OPT_IS_TOPL_CLEANUP_DISABLED ) != 0 :
return True
return False
def same_site ( self , dsa ) :
''' Return True if dsa is in this site '''
if self . get_dsa ( dsa . dsa_dnstr ) :
return True
return False
2011-12-04 21:08:56 +04:00
def __str__ ( self ) :
''' Debug dump string output of class '''
text = " %s : " % self . __class__ . __name__
2012-01-11 18:11:35 +04:00
text = text + " \n \t dn= %s " % self . site_dnstr
text = text + " \n \t options=0x %X " % self . site_options
text = text + " \n \t topo_generator= %s " % self . site_topo_generator
text = text + " \n \t topo_failover= %d " % self . site_topo_failover
2011-12-04 21:08:56 +04:00
for key , dsa in self . dsa_table . items ( ) :
text = text + " \n %s " % dsa
return text
2011-11-03 21:37:24 +04:00
2011-12-08 23:20:03 +04:00
class GraphNode ( object ) :
""" A graph node describing a set of edges that should be directed to it.
2012-01-11 18:11:35 +04:00
2011-12-08 23:20:03 +04:00
Each edge is a connection for a particular naming context replica directed
from another node in the forest to this node .
2011-11-03 21:37:24 +04:00
"""
2011-12-08 23:20:03 +04:00
2011-11-03 21:37:24 +04:00
def __init__ ( self , dsa_dnstr , max_node_edges ) :
""" Instantiate the graph node according to a DSA dn string
2011-12-08 23:20:03 +04:00
: param max_node_edges : maximum number of edges that should ever
be directed to the node
2011-11-03 21:37:24 +04:00
"""
self . max_edges = max_node_edges
self . dsa_dnstr = dsa_dnstr
self . edge_from = [ ]
def __str__ ( self ) :
2011-12-04 21:08:56 +04:00
text = " %s : " % self . __class__ . __name__
text = text + " \n \t dsa_dnstr= %s " % self . dsa_dnstr
text = text + " \n \t max_edges= %d " % self . max_edges
for i , edge in enumerate ( self . edge_from ) :
text = text + " \n \t edge_from[ %d ]= %s " % ( i , edge )
2011-11-03 21:37:24 +04:00
return text
def add_edge_from ( self , from_dsa_dnstr ) :
""" Add an edge from the dsa to our graph nodes edge from list
2012-01-11 18:11:35 +04:00
2011-12-08 23:20:03 +04:00
: param from_dsa_dnstr : the dsa that the edge emanates from
2011-11-03 21:37:24 +04:00
"""
2011-12-04 21:08:56 +04:00
assert from_dsa_dnstr is not None
2011-11-03 21:37:24 +04:00
# No edges from myself to myself
if from_dsa_dnstr == self . dsa_dnstr :
return False
# Only one edge from a particular node
if from_dsa_dnstr in self . edge_from :
return False
# Not too many edges
if len ( self . edge_from ) > = self . max_edges :
return False
self . edge_from . append ( from_dsa_dnstr )
return True
def add_edges_from_connections ( self , dsa ) :
""" For each nTDSConnection object associated with a particular
2012-01-11 18:11:35 +04:00
DSA , we test if it implies an edge to this graph node ( i . e .
the " fromServer " attribute ) . If it does then we add an
edge from the server unless we are over the max edges for this
graph node
2011-12-08 23:20:03 +04:00
: param dsa : dsa with a dnstr equivalent to his graph node
2011-11-03 21:37:24 +04:00
"""
for dnstr , connect in dsa . connect_table . items ( ) :
self . add_edge_from ( connect . from_dnstr )
2015-03-13 06:40:11 +03:00
2011-11-03 21:37:24 +04:00
def add_connections_from_edges ( self , dsa ) :
""" For each edge directed to this graph node, ensure there
is a corresponding nTDSConnection object in the dsa .
"""
for edge_dnstr in self . edge_from :
connect = dsa . get_connection_by_from_dnstr ( edge_dnstr )
# For each edge directed to the NC replica that
# "should be present" on the local DC, the KCC determines
# whether an object c exists such that:
#
# c is a child of the DC's nTDSDSA object.
# c.objectCategory = nTDSConnection
#
# Given the NC replica ri from which the edge is directed,
# c.fromServer is the dsname of the nTDSDSA object of
# the DC on which ri "is present".
#
# c.options does not contain NTDSCONN_OPT_RODC_TOPOLOGY
2012-09-27 20:30:47 +04:00
if connect and not connect . is_rodc_topology ( ) :
2015-03-13 03:53:51 +03:00
continue
2011-11-03 21:37:24 +04:00
# if no such object exists then the KCC adds an object
# c with the following attributes
# Generate a new dnstr for this nTDSConnection
2012-03-04 04:05:23 +04:00
opt = dsdb . NTDSCONN_OPT_IS_GENERATED
2012-01-11 18:11:35 +04:00
flags = dsdb . SYSTEM_FLAG_CONFIG_ALLOW_RENAME + \
dsdb . SYSTEM_FLAG_CONFIG_ALLOW_MOVE
2011-11-03 21:37:24 +04:00
2015-01-22 23:43:55 +03:00
dsa . new_connection ( opt , flags , None , edge_dnstr , None )
2011-11-03 21:37:24 +04:00
def has_sufficient_edges ( self ) :
''' Return True if we have met the maximum " from edges " criteria '''
if len ( self . edge_from ) > = self . max_edges :
return True
return False
2011-12-04 21:08:56 +04:00
2011-12-08 23:20:03 +04:00
class Transport ( object ) :
2011-12-04 21:08:56 +04:00
""" Class defines a Inter-site transport found under Sites
"""
2011-12-08 23:20:03 +04:00
2011-12-04 21:08:56 +04:00
def __init__ ( self , dnstr ) :
2011-12-08 23:20:03 +04:00
self . dnstr = dnstr
self . options = 0
self . guid = None
2012-01-11 18:11:35 +04:00
self . name = None
2011-12-08 23:20:03 +04:00
self . address_attr = None
2012-01-11 18:11:35 +04:00
self . bridgehead_list = [ ]
2011-12-04 21:08:56 +04:00
def __str__ ( self ) :
''' Debug dump string output of Transport object '''
text = " %s : \n \t dn= %s " % ( self . __class__ . __name__ , self . dnstr )
text = text + " \n \t guid= %s " % str ( self . guid )
text = text + " \n \t options= %d " % self . options
text = text + " \n \t address_attr= %s " % self . address_attr
2012-01-11 18:11:35 +04:00
text = text + " \n \t name= %s " % self . name
for dnstr in self . bridgehead_list :
text = text + " \n \t bridgehead_list= %s " % dnstr
2011-12-04 21:08:56 +04:00
return text
def load_transport ( self , samdb ) :
""" Given a Transport object with an prior initialization
2012-01-11 18:11:35 +04:00
for the object ' s DN, search for the DN and load attributes
from the samdb .
2011-12-04 21:08:56 +04:00
"""
attrs = [ " objectGUID " ,
" options " ,
2012-01-11 18:11:35 +04:00
" name " ,
" bridgeheadServerListBL " ,
2011-12-04 21:08:56 +04:00
" transportAddressAttribute " ]
try :
res = samdb . search ( base = self . dnstr , scope = ldb . SCOPE_BASE ,
attrs = attrs )
except ldb . LdbError , ( enum , estr ) :
2011-12-08 23:20:03 +04:00
raise Exception ( " Unable to find Transport for ( %s ) - ( %s ) " %
2011-12-04 21:08:56 +04:00
( self . dnstr , estr ) )
msg = res [ 0 ]
self . guid = misc . GUID ( samdb . schema_format_value ( " objectGUID " ,
msg [ " objectGUID " ] [ 0 ] ) )
if " options " in msg :
self . options = int ( msg [ " options " ] [ 0 ] )
2012-01-11 18:11:35 +04:00
2011-12-04 21:08:56 +04:00
if " transportAddressAttribute " in msg :
self . address_attr = str ( msg [ " transportAddressAttribute " ] [ 0 ] )
2012-01-11 18:11:35 +04:00
if " name " in msg :
self . name = str ( msg [ " name " ] [ 0 ] )
if " bridgeheadServerListBL " in msg :
for value in msg [ " bridgeheadServerListBL " ] :
2012-03-04 04:05:23 +04:00
dsdn = dsdb_Dn ( samdb , value )
2012-01-11 18:11:35 +04:00
dnstr = str ( dsdn . dn )
if dnstr not in self . bridgehead_list :
self . bridgehead_list . append ( dnstr )
2012-03-04 04:05:23 +04:00
2011-12-04 21:08:56 +04:00
2011-12-08 23:20:03 +04:00
class RepsFromTo ( object ) :
2011-12-04 21:08:56 +04:00
""" Class encapsulation of the NDR repsFromToBlob.
2011-12-08 23:20:03 +04:00
Removes the necessity of external code having to
understand about other_info or manipulation of
update flags .
2011-12-04 21:08:56 +04:00
"""
def __init__ ( self , nc_dnstr = None , ndr_blob = None ) :
self . __dict__ [ ' to_be_deleted ' ] = False
2011-12-08 23:20:03 +04:00
self . __dict__ [ ' nc_dnstr ' ] = nc_dnstr
self . __dict__ [ ' update_flags ' ] = 0x0
2011-12-04 21:08:56 +04:00
# WARNING:
#
# There is a very subtle bug here with python
# and our NDR code. If you assign directly to
# a NDR produced struct (e.g. t_repsFrom.ctr.other_info)
# then a proper python GC reference count is not
# maintained.
#
# To work around this we maintain an internal
# reference to "dns_name(x)" and "other_info" elements
# of repsFromToBlob. This internal reference
# is hidden within this class but it is why you
# see statements like this below:
#
# self.__dict__['ndr_blob'].ctr.other_info = \
# self.__dict__['other_info'] = drsblobs.repsFromTo1OtherInfo()
#
# That would appear to be a redundant assignment but
# it is necessary to hold a proper python GC reference
# count.
if ndr_blob is None :
2011-12-08 23:20:03 +04:00
self . __dict__ [ ' ndr_blob ' ] = drsblobs . repsFromToBlob ( )
2011-12-04 21:08:56 +04:00
self . __dict__ [ ' ndr_blob ' ] . version = 0x1
2011-12-08 23:20:03 +04:00
self . __dict__ [ ' dns_name1 ' ] = None
self . __dict__ [ ' dns_name2 ' ] = None
2011-12-04 21:08:56 +04:00
self . __dict__ [ ' ndr_blob ' ] . ctr . other_info = \
self . __dict__ [ ' other_info ' ] = drsblobs . repsFromTo1OtherInfo ( )
else :
2011-12-08 23:20:03 +04:00
self . __dict__ [ ' ndr_blob ' ] = ndr_blob
2011-12-04 21:08:56 +04:00
self . __dict__ [ ' other_info ' ] = ndr_blob . ctr . other_info
if ndr_blob . version == 0x1 :
2011-12-08 23:20:03 +04:00
self . __dict__ [ ' dns_name1 ' ] = ndr_blob . ctr . other_info . dns_name
self . __dict__ [ ' dns_name2 ' ] = None
2011-12-04 21:08:56 +04:00
else :
2011-12-08 23:20:03 +04:00
self . __dict__ [ ' dns_name1 ' ] = ndr_blob . ctr . other_info . dns_name1
self . __dict__ [ ' dns_name2 ' ] = ndr_blob . ctr . other_info . dns_name2
2011-12-04 21:08:56 +04:00
def __str__ ( self ) :
''' Debug dump string output of class '''
text = " %s : " % self . __class__ . __name__
text = text + " \n \t dnstr= %s " % self . nc_dnstr
text = text + " \n \t update_flags=0x %X " % self . update_flags
text = text + " \n \t version= %d " % self . version
text = text + " \n \t source_dsa_obj_guid= %s " % \
str ( self . source_dsa_obj_guid )
text = text + " \n \t source_dsa_invocation_id= %s " % \
str ( self . source_dsa_invocation_id )
text = text + " \n \t transport_guid= %s " % \
str ( self . transport_guid )
text = text + " \n \t replica_flags=0x %X " % \
self . replica_flags
text = text + " \n \t consecutive_sync_failures= %d " % \
self . consecutive_sync_failures
text = text + " \n \t last_success= %s " % \
self . last_success
text = text + " \n \t last_attempt= %s " % \
self . last_attempt
text = text + " \n \t dns_name1= %s " % \
str ( self . dns_name1 )
text = text + " \n \t dns_name2= %s " % \
str ( self . dns_name2 )
text = text + " \n \t schedule[ "
for slot in self . schedule :
text = text + " 0x %X " % slot
text = text + " ] "
return text
def __setattr__ ( self , item , value ) :
2011-12-08 23:20:03 +04:00
if item in [ ' schedule ' , ' replica_flags ' , ' transport_guid ' ,
' source_dsa_obj_guid ' , ' source_dsa_invocation_id ' ,
' consecutive_sync_failures ' , ' last_success ' ,
2011-12-04 21:08:56 +04:00
' last_attempt ' ] :
2012-01-11 18:11:35 +04:00
if item in [ ' replica_flags ' ] :
self . __dict__ [ ' update_flags ' ] | = drsuapi . DRSUAPI_DRS_UPDATE_FLAGS
elif item in [ ' schedule ' ] :
self . __dict__ [ ' update_flags ' ] | = drsuapi . DRSUAPI_DRS_UPDATE_SCHEDULE
2011-12-04 21:08:56 +04:00
setattr ( self . __dict__ [ ' ndr_blob ' ] . ctr , item , value )
elif item in [ ' dns_name1 ' ] :
self . __dict__ [ ' dns_name1 ' ] = value
if self . __dict__ [ ' ndr_blob ' ] . version == 0x1 :
self . __dict__ [ ' ndr_blob ' ] . ctr . other_info . dns_name = \
self . __dict__ [ ' dns_name1 ' ]
else :
self . __dict__ [ ' ndr_blob ' ] . ctr . other_info . dns_name1 = \
self . __dict__ [ ' dns_name1 ' ]
elif item in [ ' dns_name2 ' ] :
self . __dict__ [ ' dns_name2 ' ] = value
if self . __dict__ [ ' ndr_blob ' ] . version == 0x1 :
raise AttributeError ( item )
else :
self . __dict__ [ ' ndr_blob ' ] . ctr . other_info . dns_name2 = \
self . __dict__ [ ' dns_name2 ' ]
2012-01-11 18:11:35 +04:00
elif item in [ ' nc_dnstr ' ] :
self . __dict__ [ ' nc_dnstr ' ] = value
elif item in [ ' to_be_deleted ' ] :
self . __dict__ [ ' to_be_deleted ' ] = value
2011-12-04 21:08:56 +04:00
elif item in [ ' version ' ] :
2014-06-02 04:37:11 +04:00
raise AttributeError ( " Attempt to set readonly attribute %s " % item )
2011-12-04 21:08:56 +04:00
else :
2014-06-02 04:37:11 +04:00
raise AttributeError ( " Unknown attribute %s " % item )
2011-12-04 21:08:56 +04:00
2012-01-11 18:11:35 +04:00
self . __dict__ [ ' update_flags ' ] | = drsuapi . DRSUAPI_DRS_UPDATE_ADDRESS
2011-12-04 21:08:56 +04:00
def __getattr__ ( self , item ) :
2011-12-08 23:20:03 +04:00
""" Overload of RepsFromTo attribute retrieval.
2012-01-11 18:11:35 +04:00
2011-12-08 23:20:03 +04:00
Allows external code to ignore substructures within the blob
2011-12-04 21:08:56 +04:00
"""
2011-12-08 23:20:03 +04:00
if item in [ ' schedule ' , ' replica_flags ' , ' transport_guid ' ,
' source_dsa_obj_guid ' , ' source_dsa_invocation_id ' ,
' consecutive_sync_failures ' , ' last_success ' ,
2011-12-04 21:08:56 +04:00
' last_attempt ' ] :
return getattr ( self . __dict__ [ ' ndr_blob ' ] . ctr , item )
elif item in [ ' version ' ] :
return self . __dict__ [ ' ndr_blob ' ] . version
elif item in [ ' dns_name1 ' ] :
if self . __dict__ [ ' ndr_blob ' ] . version == 0x1 :
return self . __dict__ [ ' ndr_blob ' ] . ctr . other_info . dns_name
else :
return self . __dict__ [ ' ndr_blob ' ] . ctr . other_info . dns_name1
elif item in [ ' dns_name2 ' ] :
if self . __dict__ [ ' ndr_blob ' ] . version == 0x1 :
raise AttributeError ( item )
else :
return self . __dict__ [ ' ndr_blob ' ] . ctr . other_info . dns_name2
2012-01-11 18:11:35 +04:00
elif item in [ ' to_be_deleted ' ] :
return self . __dict__ [ ' to_be_deleted ' ]
elif item in [ ' nc_dnstr ' ] :
return self . __dict__ [ ' nc_dnstr ' ]
elif item in [ ' update_flags ' ] :
return self . __dict__ [ ' update_flags ' ]
2015-04-10 07:17:50 +03:00
raise AttributeError ( " Unknown attribute %s " % item )
2011-12-04 21:08:56 +04:00
def is_modified ( self ) :
return ( self . update_flags != 0x0 )
2012-01-11 18:11:35 +04:00
def set_unmodified ( self ) :
self . __dict__ [ ' update_flags ' ] = 0x0
2012-03-04 04:05:23 +04:00
2012-01-11 18:11:35 +04:00
class SiteLink ( object ) :
""" Class defines a site link found under sites
"""
def __init__ ( self , dnstr ) :
2012-03-04 04:05:23 +04:00
self . dnstr = dnstr
self . options = 0
2012-01-11 18:11:35 +04:00
self . system_flags = 0
2012-03-04 04:05:23 +04:00
self . cost = 0
self . schedule = None
self . interval = None
self . site_list = [ ]
2012-01-11 18:11:35 +04:00
def __str__ ( self ) :
''' Debug dump string output of Transport object '''
text = " %s : \n \t dn= %s " % ( self . __class__ . __name__ , self . dnstr )
text = text + " \n \t options= %d " % self . options
text = text + " \n \t system_flags= %d " % self . system_flags
text = text + " \n \t cost= %d " % self . cost
text = text + " \n \t interval= %s " % self . interval
if self . schedule is not None :
text = text + " \n \t schedule.size= %s " % self . schedule . size
text = text + " \n \t schedule.bandwidth= %s " % self . schedule . bandwidth
text = text + " \n \t schedule.numberOfSchedules= %s " % \
self . schedule . numberOfSchedules
for i , header in enumerate ( self . schedule . headerArray ) :
text = text + " \n \t schedule.headerArray[ %d ].type= %d " % \
( i , header . type )
text = text + " \n \t schedule.headerArray[ %d ].offset= %d " % \
( i , header . offset )
text = text + " \n \t schedule.dataArray[ %d ].slots[ " % i
for slot in self . schedule . dataArray [ i ] . slots :
text = text + " 0x %X " % slot
text = text + " ] "
for dnstr in self . site_list :
text = text + " \n \t site_list= %s " % dnstr
return text
def load_sitelink ( self , samdb ) :
""" Given a siteLink object with an prior initialization
for the object ' s DN, search for the DN and load attributes
from the samdb .
"""
attrs = [ " options " ,
" systemFlags " ,
" cost " ,
" schedule " ,
" replInterval " ,
" siteList " ]
try :
res = samdb . search ( base = self . dnstr , scope = ldb . SCOPE_BASE ,
2015-02-24 04:13:52 +03:00
attrs = attrs , controls = [ ' extended_dn:0 ' ] )
2012-01-11 18:11:35 +04:00
except ldb . LdbError , ( enum , estr ) :
raise Exception ( " Unable to find SiteLink for ( %s ) - ( %s ) " %
( self . dnstr , estr ) )
msg = res [ 0 ]
if " options " in msg :
self . options = int ( msg [ " options " ] [ 0 ] )
if " systemFlags " in msg :
self . system_flags = int ( msg [ " systemFlags " ] [ 0 ] )
if " cost " in msg :
self . cost = int ( msg [ " cost " ] [ 0 ] )
if " replInterval " in msg :
self . interval = int ( msg [ " replInterval " ] [ 0 ] )
if " siteList " in msg :
for value in msg [ " siteList " ] :
2012-03-04 04:05:23 +04:00
dsdn = dsdb_Dn ( samdb , value )
2015-02-24 04:13:52 +03:00
guid = misc . GUID ( dsdn . dn . get_extended_component ( ' GUID ' ) )
if guid not in self . site_list :
self . site_list . append ( guid )
2012-01-11 18:11:35 +04:00
2015-03-13 04:36:05 +03:00
class KCCFailedObject ( object ) :
def __init__ ( self , uuid , failure_count , time_first_failure , last_result , dns_name ) :
self . uuid = uuid
self . failure_count = failure_count
self . time_first_failure = time_first_failure
self . last_result = last_result
self . dns_name = dns_name
2012-03-04 04:05:23 +04:00
class VertexColor ( object ) :
2015-02-18 08:21:19 +03:00
( red , black , white , unknown ) = range ( 0 , 4 )
2012-01-11 18:11:35 +04:00
2012-03-04 04:05:23 +04:00
2012-01-11 18:11:35 +04:00
class Vertex ( object ) :
""" Class encapsulation of a Site Vertex in the
intersite topology replication algorithm
"""
def __init__ ( self , site , part ) :
2012-03-04 04:05:23 +04:00
self . site = site
self . part = part
2012-01-11 18:11:35 +04:00
self . color = VertexColor . unknown
2015-02-18 08:21:19 +03:00
self . edges = [ ]
self . accept_red_red = [ ]
self . accept_black = [ ]
2015-03-13 04:36:05 +03:00
self . repl_info = ReplInfo ( )
self . root = self
2015-02-18 08:21:19 +03:00
self . guid = None
2015-03-13 04:36:05 +03:00
self . component_id = self
self . demoted = False
self . options = 0
self . interval = 0
2012-01-11 18:11:35 +04:00
def color_vertex ( self ) :
""" Color each vertex to indicate which kind of NC
replica it contains
"""
# IF s contains one or more DCs with full replicas of the
# NC cr!nCName
# SET v.Color to COLOR.RED
# ELSEIF s contains one or more partial replicas of the NC
# SET v.Color to COLOR.BLACK
#ELSE
# SET v.Color to COLOR.WHITE
# set to minimum (no replica)
self . color = VertexColor . white
for dnstr , dsa in self . site . dsa_table . items ( ) :
rep = dsa . get_current_replica ( self . part . nc_dnstr )
if rep is None :
continue
# We have a full replica which is the largest
# value so exit
2012-09-27 20:30:47 +04:00
if not rep . is_partial ( ) :
2012-01-11 18:11:35 +04:00
self . color = VertexColor . red
break
else :
self . color = VertexColor . black
2015-03-13 04:36:05 +03:00
2012-01-11 18:11:35 +04:00
def is_red ( self ) :
assert ( self . color != VertexColor . unknown )
return ( self . color == VertexColor . red )
def is_black ( self ) :
assert ( self . color != VertexColor . unknown )
return ( self . color == VertexColor . black )
def is_white ( self ) :
assert ( self . color != VertexColor . unknown )
return ( self . color == VertexColor . white )
2015-02-18 08:21:19 +03:00
class IntersiteGraph ( object ) :
""" Graph for representing the intersite """
def __init__ ( self ) :
2015-03-13 04:36:05 +03:00
self . vertices = set ( )
self . edges = set ( )
self . edge_set = set ( )
# All vertices that are endpoints of edges
self . connected_vertices = None
2015-02-18 08:21:19 +03:00
class MultiEdgeSet ( object ) :
""" Defines a multi edge set """
def __init__ ( self ) :
self . guid = 0 # objectGuid siteLinkBridge
self . edges = [ ]
class MultiEdge ( object ) :
def __init__ ( self ) :
2015-03-13 04:36:05 +03:00
self . site_link = None # object siteLink
2015-02-18 08:21:19 +03:00
self . vertices = [ ]
self . con_type = None # interSiteTransport GUID
2015-03-13 04:36:05 +03:00
self . repl_info = ReplInfo ( )
self . directed = True
2015-02-18 08:21:19 +03:00
class ReplInfo ( object ) :
def __init__ ( self ) :
self . cost = 0
self . interval = 0
self . options = 0
2015-03-13 04:36:05 +03:00
self . schedule = None
2015-02-18 08:21:19 +03:00
class InternalEdge ( object ) :
def __init__ ( self , v1 , v2 , redred , repl , eType ) :
self . v1 = v1
self . v2 = v2
self . red_red = redred
self . repl_info = repl
self . e_type = eType
def __eq__ ( self , other ) :
return not self < other and not other < self
def __ne__ ( self , other ) :
return self < other or other < self
def __gt__ ( self , other ) :
return other < self
def __ge__ ( self , other ) :
return not self < other
def __le__ ( self , other ) :
return not other < self
2015-03-13 04:36:05 +03:00
# TODO compare options and interval
2015-02-18 08:21:19 +03:00
def __lt__ ( self , other ) :
if self . red_red != other . red_red :
return self . red_red
if self . repl_info . cost != other . repl_info . cost :
return self . repl_info . cost < other . repl_info . cost
self_time = total_schedule ( self . repl_info . schedule )
other_time = total_schedule ( other . repl_info . schedule )
if self_time != other_time :
return self_time > other_time
2015-03-17 01:50:02 +03:00
#XXX guid comparison using ndr_pack
2015-02-18 08:21:19 +03:00
if self . v1 . guid != other . v1 . guid :
2015-03-17 01:50:02 +03:00
return self . v1 . ndrpacked_guid < other . v1 . ndrpacked_guid
2015-02-18 08:21:19 +03:00
if self . v2 . guid != other . v2 . guid :
2015-03-17 01:50:02 +03:00
return self . v2 . ndrpacked_guid < other . v2 . ndrpacked_guid
2015-02-18 08:21:19 +03:00
2015-03-13 04:36:05 +03:00
return self . e_type < other . e_type
2015-02-18 08:21:19 +03:00
2012-01-11 18:11:35 +04:00
##################################################
2015-02-24 01:44:11 +03:00
# Global Functions and Variables
2012-01-11 18:11:35 +04:00
##################################################
2015-02-24 01:44:11 +03:00
MAX_DWORD = 2 * * 32 - 1
2015-03-06 05:17:24 +03:00
def get_dsa_config_rep ( dsa ) :
# Find configuration NC replica for the DSA
for c_rep in dsa . current_rep_table . values ( ) :
if c_rep . is_config ( ) :
return c_rep
raise KCCError ( " Unable to find config NC replica for ( %s ) " %
dsa . dsa_dnstr )
2012-01-11 18:11:35 +04:00
def sort_dsa_by_guid ( dsa1 , dsa2 ) :
2015-03-17 01:57:22 +03:00
" use ndr_pack for GUID comparison, as appears correct in some places " " "
return cmp ( ndr_pack ( dsa1 . dsa_guid ) , ndr_pack ( dsa2 . dsa_guid ) )
2015-03-13 04:36:05 +03:00
def total_schedule ( schedule ) :
if schedule is None :
return 84 * 8 # 84 bytes = 84 * 8 bits
total = 0
for byte in schedule :
while byte != 0 :
total + = byte & 1
byte >> = 1
return total
# Returns true if schedule intersect
def combine_repl_info ( info_a , info_b , info_c ) :
info_c . interval = max ( info_a . interval , info_b . interval )
info_c . options = info_a . options & info_b . options
if info_a . schedule is None :
info_a . schedule = [ 0xFF ] * 84
if info_b . schedule is None :
info_b . schedule = [ 0xFF ] * 84
new_info = [ 0 ] * 84
i = 0
count = 0
while i < 84 :
# Note that this operation is actually bitwise
new_info = info_a . schedule [ i ] & info_b . schedule [ i ]
if new_info != 0 :
count + = 1
i + = 1
if count == 0 :
return False
info_c . schedule = new_info
# Truncate to MAX_DWORD
info_c . cost = info_a . cost + info_b . cost
2015-02-24 01:44:11 +03:00
if info_c . cost > MAX_DWORD :
info_c . cost = MAX_DWORD
2015-03-13 04:36:05 +03:00
return True
2015-02-27 08:21:19 +03:00
2015-03-11 06:22:43 +03:00
def write_dot_file ( basename , edge_list , vertices = None , label = None , destdir = None ,
2015-03-12 01:56:59 +03:00
reformat_labels = True , directed = False , debug = None ) :
2015-02-27 08:21:19 +03:00
from tempfile import NamedTemporaryFile
if label :
basename + = ' _ ' + label . translate ( None , ' , ' ) #fix DN, guid labels
f = NamedTemporaryFile ( suffix = ' .dot ' , prefix = basename + ' _ ' , delete = False , dir = destdir )
2015-03-12 01:56:59 +03:00
if debug is not None :
debug ( f . name )
2015-02-27 08:21:19 +03:00
graphname = ' ' . join ( x for x in basename if x . isalnum ( ) )
2015-03-06 05:28:29 +03:00
print >> f , ' %s %s { ' % ( ' digraph ' if directed else ' graph ' , graphname )
print >> f , ' label= " %s " ; \n fontsize=20; ' % ( label or graphname )
2015-03-11 06:22:43 +03:00
if vertices :
for v in vertices :
if reformat_labels :
v = v . replace ( ' , ' , ' \\ n ' )
print >> f , ' " %s " ; ' % ( v , )
2015-02-27 08:21:19 +03:00
for a , b in edge_list :
2015-03-06 05:28:29 +03:00
if reformat_labels :
a = a . replace ( ' , ' , ' \\ n ' )
b = b . replace ( ' , ' , ' \\ n ' )
line = ' -> ' if directed else ' -- '
print >> f , ' " %s " %s " %s " ; ' % ( a , line , b )
2015-02-27 08:21:19 +03:00
print >> f , ' } '
f . close ( )
2015-03-11 03:53:38 +03:00
class KCCGraphError ( Exception ) :
pass
2015-03-12 02:00:03 +03:00
def verify_graph_complete ( edges , vertices , edge_vertices ) :
2015-03-12 00:19:51 +03:00
""" The graph is complete, which is to say there is an edge between
every pair of nodes . """
2015-03-11 03:53:38 +03:00
for v in vertices :
remotes = set ( )
for a , b in edges :
if a == v :
remotes . add ( b )
elif b == v :
remotes . add ( a )
if len ( remotes ) + 1 != len ( vertices ) :
raise KCCGraphError ( " graph is not fully connected " )
def verify_graph_connected ( edges , vertices , edge_vertices ) :
2015-03-12 00:19:51 +03:00
""" There is a path between any two nodes. """
2015-03-11 03:53:38 +03:00
if not edges :
if len ( vertices ) < = 1 :
return
raise KCCGraphError ( " disconnected vertices were found: \n vertices: %s \n edges: %s " %
( sorted ( vertices ) , sorted ( edges ) ) )
remaining_edges = list ( edges )
reached = set ( remaining_edges . pop ( ) )
while True :
doomed = [ ]
for i , e in enumerate ( remaining_edges ) :
a , b = e
if a in reached :
reached . add ( b )
doomed . append ( i )
elif b in reached :
reached . add ( a )
doomed . append ( i )
if not doomed :
break
for i in reversed ( doomed ) :
del remaining_edges [ i ]
if remaining_edges or reached != vertices :
raise KCCGraphError ( " graph is not connected: \n vertices: %s \n edges: %s " %
( sorted ( vertices ) , sorted ( edges ) ) )
def verify_graph_forest ( edges , vertices , edge_vertices ) :
2015-03-12 00:19:51 +03:00
""" The graph contains no loops. A forest that is also connected is a
tree . """
2015-03-11 03:53:38 +03:00
trees = [ set ( e ) for e in edges ]
while True :
for a , b in itertools . combinations ( trees , 2 ) :
intersection = a & b
if intersection :
if len ( intersection ) == 1 :
a | = b
trees . remove ( b )
break
else :
raise KCCGraphError ( " there is a loop in the graph " )
else :
# no break in itertools.combinations loop means no
# further mergers, so we're done.
#
# XXX here we also know whether it is a tree or a
# forest by len(trees) but the connected test already
# tells us that.
return
def verify_graph_multi_edge_forest ( edges , vertices , edge_vertices ) :
""" This allows a forest with duplicate edges. That is if multiple
edges go between the same two vertices , they are treated as a
single edge by this test .
e . g . :
o
pass : o - o = o o = o ( | ) fail : o - o
` o o ` o '
"""
unique_edges = set ( edges )
trees = [ set ( e ) for e in unique_edges ]
while True :
for a , b in itertools . combinations ( trees , 2 ) :
intersection = a & b
if intersection :
if len ( intersection ) == 1 :
a | = b
trees . remove ( b )
break
else :
raise KCCGraphError ( " there is a loop in the graph " )
else :
return
def verify_graph_no_lonely_vertices ( edges , vertices , edge_vertices ) :
2015-03-12 00:19:51 +03:00
""" There are no vertices without edges. """
2015-03-11 03:53:38 +03:00
lonely = vertices - edge_vertices
if lonely :
raise KCCGraphError ( " some vertices are not connected: \n %s " % ' \n ' . join ( sorted ( lonely ) ) )
def verify_graph_no_unknown_vertices ( edges , vertices , edge_vertices ) :
2015-03-12 00:19:51 +03:00
""" The edge endpoints contain no vertices that are otherwise unknown. """
2015-03-11 03:53:38 +03:00
unknown = edge_vertices - vertices
if unknown :
raise KCCGraphError ( " some edge vertices are seemingly unknown: \n %s " % ' \n ' . join ( sorted ( unknown ) ) )
2015-03-12 00:21:42 +03:00
def verify_graph_directed_double_ring ( edges , vertices , edge_vertices ) :
""" Each node has two directed edges leaving it, and two arriving. The
edges work in pairs that have the same end points but point in
opposite directions . The pairs form a path that touches every
vertex and form a loop .
"""
#XXX possibly the 1 and 2 vertex cases are special cases.
if not edges :
return
if len ( edges ) != 2 * len ( vertices ) :
raise KCCGraphError ( " directed double ring requires edges == 2 * vertices " )
exits = { }
for start , end in edges :
s = exits . setdefault ( start , [ ] )
s . append ( end )
try :
#follow both paths at once -- they should be the same length
#XXX there is probably a simpler way.
forwards , backwards = exits [ start ]
fprev , bprev = ( start , start )
f_path = [ start ]
b_path = [ start ]
for i in range ( len ( vertices ) ) :
a , b = exits [ forwards ]
if a == fprev :
fnext = b
else :
fnext = a
f_path . append ( forwards )
fprev = forwards
forwards = fnext
a , b = exits [ backwards ]
if a == bprev :
bnext = b
else :
bnext = a
b_path . append ( backwards )
bprev = backwards
backwards = bnext
except ValueError , e :
raise KCCGraphError ( " wrong number of exits ' %s ' " % e )
f_set = set ( f_path )
b_set = set ( b_path )
if ( f_path != list ( reversed ( b_path ) ) or
len ( f_path ) != len ( f_set ) + 1 or
len ( f_set ) != len ( vertices ) ) :
raise KCCGraphError ( " doesn ' t seem like a double ring to me! " )
2015-03-12 04:44:54 +03:00
def verify_graph_directed_double_ring_or_small ( edges , vertices , edge_vertices ) :
if len ( vertices ) < 3 :
return
return verify_graph_directed_double_ring ( edges , vertices , edge_vertices )
2015-03-11 03:53:38 +03:00
def verify_graph ( title , edges , vertices = None , directed = False , properties = ( ) , fatal = False ,
debug = None ) :
errors = [ ]
if debug is None :
def debug ( * args ) : pass
debug ( " %s Starting verify_graph for %s %s %s " % ( PURPLE , MAGENTA , title , C_NORMAL ) )
properties = [ x . replace ( ' ' , ' _ ' ) for x in properties ]
edge_vertices = set ( )
for a , b in edges :
edge_vertices . add ( a )
edge_vertices . add ( b )
if vertices is None :
vertices = edge_vertices
else :
vertices = set ( vertices )
if vertices != edge_vertices :
debug ( " vertices in edges don ' t match given vertices: \n %s != %s " %
( sorted ( edge_vertices ) , sorted ( vertices ) ) )
for p in properties :
fn = ' verify_graph_ %s ' % p
try :
f = globals ( ) [ fn ]
except KeyError :
errors . append ( ( p , " There is no verification check for ' %s ' " % p ) )
try :
f ( edges , vertices , edge_vertices )
debug ( " %s %18s : %s verified! " % ( DARK_GREEN , p , C_NORMAL ) )
except KCCGraphError , e :
errors . append ( ( p , e ) )
if errors :
if fatal :
raise KCCGraphError ( " The graph lacks the following properties: \n " + ' \n ' . join ( errors ) )
debug ( ( " %s %s %s FAILED: " % ( MAGENTA , title , RED ) ) )
for p , e in errors :
debug ( " %18s : %s %s %s " % ( p , DARK_YELLOW , e , RED ) )
debug ( C_NORMAL )
2015-03-11 06:22:43 +03:00
def verify_and_dot ( basename , edges , vertices = None , label = None , destdir = None ,
reformat_labels = True , directed = False , properties = ( ) , fatal = False ,
debug = None , verify = True , dot_files = False ) :
title = ' %s %s ' % ( basename , label or ' ' )
if verify :
verify_graph ( title , edges , vertices , properties = properties , fatal = fatal ,
debug = debug )
if dot_files :
write_dot_file ( basename , edges , vertices = vertices , label = label , destdir = destdir ,
2015-03-12 02:00:03 +03:00
reformat_labels = reformat_labels , directed = directed , debug = debug )
2015-03-12 00:19:51 +03:00
def list_verify_tests ( ) :
for k , v in sorted ( globals ( ) . items ( ) ) :
if k . startswith ( ' verify_graph_ ' ) :
print k . replace ( ' verify_graph_ ' , ' ' )
if v . __doc__ :
print ' %s %s %s ' % ( GREY , v . __doc__ , C_NORMAL )
else :
print