From cb9486308780c5736d2f7a3dde5f45a0e7f4a8ce Mon Sep 17 00:00:00 2001 From: Douglas Bagnall Date: Thu, 4 Jun 2015 11:19:56 +1200 Subject: [PATCH] KCC: switch samba_kcc over to samba.kcc module Signed-off-by: Douglas Bagnall Reviewed-by: Garming Sam Reviewed-by: Andrew Bartlett --- source4/scripting/bin/samba_kcc | 3140 +------------------------------ 1 file changed, 19 insertions(+), 3121 deletions(-) diff --git a/source4/scripting/bin/samba_kcc b/source4/scripting/bin/samba_kcc index 0d3de445c59..b85e4679450 100755 --- a/source4/scripting/bin/samba_kcc +++ b/source4/scripting/bin/samba_kcc @@ -24,7 +24,6 @@ import os import sys import random -import uuid # ensure we get messages out immediately, so they get in the samba logs, # and don't get swallowed by a timeout @@ -40,3105 +39,23 @@ os.environ["TZ"] = "GMT" sys.path.insert(0, "bin/python") import optparse -import logging -import itertools -import heapq import time -from functools import partial - -from samba import ( - getopt as options, - ldb, - dsdb, - drs_utils, - nttime2unix, - unix2nttime) -from samba.auth import system_session -from samba.samdb import SamDB -from samba.dcerpc import drsuapi -from samba.kcc import ldif_import_export -from samba.kcc.kcc_utils import * -from samba.kcc.graph import * -from samba.kcc.debug import * - -class KCC(object): - """The Knowledge Consistency Checker class. - - A container for objects and methods allowing a run of the KCC. Produces a - set of connections in the samdb for which the Distributed Replication - Service can then utilize to replicate naming contexts - - :param unix_now: The putative current time in seconds since 1970. - :param read_only: Don't write to the database. - :param verify: Check topological invariants for the generated graphs - :param debug: Write verbosely to stderr. - "param dot_files: write Graphviz files in /tmp showing topology - """ - def __init__(self): - """Initializes the partitions class which can hold - our local DCs partitions or all the partitions in - the forest - """ - self.part_table = {} # partition objects - self.site_table = {} - self.transport_table = {} - self.ip_transport = None - self.sitelink_table = {} - self.dsa_by_dnstr = {} - self.dsa_by_guid = {} - - self.get_dsa_by_guidstr = self.dsa_by_guid.get - self.get_dsa = self.dsa_by_dnstr.get - - # TODO: These should be backed by a 'permanent' store so that when - # calling DRSGetReplInfo with DS_REPL_INFO_KCC_DSA_CONNECT_FAILURES, - # the failure information can be returned - self.kcc_failed_links = {} - self.kcc_failed_connections = set() - - # Used in inter-site topology computation. A list - # of connections (by NTDSConnection object) that are - # to be kept when pruning un-needed NTDS Connections - self.kept_connections = set() - - self.my_dsa_dnstr = None # My dsa DN - self.my_dsa = None # My dsa object - - self.my_site_dnstr = None - self.my_site = None - - self.samdb = None - - def load_all_transports(self): - """Loads the inter-site transport objects for Sites - - :return: None - :raise KCCError: if no IP transport is found - """ - try: - res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" % - self.samdb.get_config_basedn(), - scope=ldb.SCOPE_SUBTREE, - expression="(objectClass=interSiteTransport)") - except ldb.LdbError, (enum, estr): - raise KCCError("Unable to find inter-site transports - (%s)" % - estr) - - for msg in res: - dnstr = str(msg.dn) - - transport = Transport(dnstr) - - transport.load_transport(self.samdb) - self.transport_table.setdefault(str(transport.guid), - transport) - if transport.name == 'IP': - self.ip_transport = transport - - if self.ip_transport is None: - raise KCCError("there doesn't seem to be an IP transport") - - def load_all_sitelinks(self): - """Loads the inter-site siteLink objects - - :return: None - :raise KCCError: if site-links aren't found - """ - try: - res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" % - self.samdb.get_config_basedn(), - scope=ldb.SCOPE_SUBTREE, - expression="(objectClass=siteLink)") - except ldb.LdbError, (enum, estr): - raise KCCError("Unable to find inter-site siteLinks - (%s)" % estr) - - for msg in res: - dnstr = str(msg.dn) - - # already loaded - if dnstr in self.sitelink_table: - continue - - sitelink = SiteLink(dnstr) - - sitelink.load_sitelink(self.samdb) - - # Assign this siteLink to table - # and index by dn - self.sitelink_table[dnstr] = sitelink - - def load_site(self, dn_str): - """Helper for load_my_site and load_all_sites. - - Put all the site's DSAs into the KCC indices. - - :param dn_str: a site dn_str - :return: the Site object pertaining to the dn_str - """ - site = Site(dn_str, unix_now) - site.load_site(self.samdb) - - # We avoid replacing the site with an identical copy in case - # somewhere else has a reference to the old one, which would - # lead to all manner of confusion and chaos. - guid = str(site.site_guid) - if guid not in self.site_table: - self.site_table[guid] = site - self.dsa_by_dnstr.update(site.dsa_table) - self.dsa_by_guid.update((str(x.dsa_guid), x) - for x in site.dsa_table.values()) - - return self.site_table[guid] - - def load_my_site(self): - """Load the Site object for the local DSA. - - :return: None - """ - self.my_site_dnstr = ("CN=%s,CN=Sites,%s" % ( - self.samdb.server_site_name(), - self.samdb.get_config_basedn())) - - self.my_site = self.load_site(self.my_site_dnstr) - - def load_all_sites(self): - """Discover all sites and create Site objects. - - :return: None - :raise: KCCError if sites can't be found - """ - try: - res = self.samdb.search("CN=Sites,%s" % - self.samdb.get_config_basedn(), - scope=ldb.SCOPE_SUBTREE, - expression="(objectClass=site)") - except ldb.LdbError, (enum, estr): - raise KCCError("Unable to find sites - (%s)" % estr) - - for msg in res: - sitestr = str(msg.dn) - self.load_site(sitestr) - - def load_my_dsa(self): - """Discover my nTDSDSA dn thru the rootDSE entry - - :return: None - :raise: KCCError if DSA can't be found - """ - dn = ldb.Dn(self.samdb, "" % self.samdb.get_ntds_GUID()) - try: - res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE, - attrs=["objectGUID"]) - except ldb.LdbError, (enum, estr): - logger.warning("Search for %s failed: %s. This typically happens" - " in --importldif mode due to lack of module" - " support.", dn, estr) - try: - # We work around the failure above by looking at the - # dsServiceName that was put in the fake rootdse by - # the --exportldif, rather than the - # samdb.get_ntds_GUID(). The disadvantage is that this - # mode requires we modify the @ROOTDSE dnq to support - # --forced-local-dsa - service_name_res = self.samdb.search(base="", - scope=ldb.SCOPE_BASE, - attrs=["dsServiceName"]) - dn = ldb.Dn(self.samdb, - service_name_res[0]["dsServiceName"][0]) - - res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE, - attrs=["objectGUID"]) - except ldb.LdbError, (enum, estr): - raise KCCError("Unable to find my nTDSDSA - (%s)" % estr) - - if len(res) != 1: - raise KCCError("Unable to find my nTDSDSA at %s" % - dn.extended_str()) - - ntds_guid = misc.GUID(self.samdb.get_ntds_GUID()) - if misc.GUID(res[0]["objectGUID"][0]) != ntds_guid: - raise KCCError("Did not find the GUID we expected," - " perhaps due to --importldif") - - self.my_dsa_dnstr = str(res[0].dn) - - self.my_dsa = self.my_site.get_dsa(self.my_dsa_dnstr) - - if self.my_dsa_dnstr not in self.dsa_by_dnstr: - DEBUG_DARK_YELLOW("my_dsa %s isn't in self.dsas_by_dnstr:" - " it must be RODC.\n" - "Let's add it, because my_dsa is special!\n" - "(likewise for self.dsa_by_guid of course)" % - self.my_dsas_dnstr) - - self.dsa_by_dnstr[self.my_dsa_dnstr] = self.my_dsa - self.dsa_by_guid[str(self.my_dsa.dsa_guid)] = self.my_dsa - - def load_all_partitions(self): - """Discover and load all partitions. - - Each NC is inserted into the part_table by partition - dn string (not the nCName dn string) - - :return: None - :raise: KCCError if partitions can't be found - """ - try: - res = self.samdb.search("CN=Partitions,%s" % - self.samdb.get_config_basedn(), - scope=ldb.SCOPE_SUBTREE, - expression="(objectClass=crossRef)") - except ldb.LdbError, (enum, estr): - raise KCCError("Unable to find partitions - (%s)" % estr) - - for msg in res: - partstr = str(msg.dn) - - # already loaded - if partstr in self.part_table: - continue - - part = Partition(partstr) - - part.load_partition(self.samdb) - self.part_table[partstr] = part - - def should_be_present_test(self): - """Enumerate all loaded partitions and DSAs in local - site and test if NC should be present as replica - """ - for partdn, part in self.part_table.items(): - for dsadn, dsa in self.my_site.dsa_table.items(): - needed, ro, partial = part.should_be_present(dsa) - logger.info("dsadn:%s\nncdn:%s\nneeded=%s:ro=%s:partial=%s\n" % - (dsadn, part.nc_dnstr, needed, ro, partial)) - - def refresh_failed_links_connections(self): - """Based on MS-ADTS 6.2.2.1""" - - # Instead of NULL link with failure_count = 0, the tuple is - # simply removed - - # LINKS: Refresh failed links - self.kcc_failed_links = {} - current, needed = self.my_dsa.get_rep_tables() - for replica in current.values(): - # For every possible connection to replicate - for reps_from in replica.rep_repsFrom: - failure_count = reps_from.consecutive_sync_failures - if failure_count <= 0: - continue - - dsa_guid = str(reps_from.source_dsa_obj_guid) - time_first_failure = reps_from.last_success - last_result = reps_from.last_attempt - dns_name = reps_from.dns_name1 - - f = self.kcc_failed_links.get(dsa_guid) - if not f: - f = KCCFailedObject(dsa_guid, failure_count, - time_first_failure, last_result, - dns_name) - self.kcc_failed_links[dsa_guid] = f - #elif f.failure_count == 0: - # f.failure_count = failure_count - # f.time_first_failure = time_first_failure - # f.last_result = last_result - else: - f.failure_count = max(f.failure_count, failure_count) - f.time_first_failure = min(f.time_first_failure, - time_first_failure) - f.last_result = last_result - - # CONNECTIONS: Refresh failed connections - restore_connections = set() - if opts.attempt_live_connections: - DEBUG("refresh_failed_links: checking if links are still down") - for connection in self.kcc_failed_connections: - try: - drs_utils.drsuapi_connect(connection.dns_name, lp, creds) - # Failed connection is no longer failing - restore_connections.add(connection) - except drs_utils.drsException: - # Failed connection still failing - connection.failure_count += 1 - else: - DEBUG("refresh_failed_links: not checking live links because we\n" - "weren't asked to --attempt-live-connections") - - # Remove the restored connections from the failed connections - self.kcc_failed_connections.difference_update(restore_connections) - - def is_stale_link_connection(self, target_dsa): - """Check whether a link to a remote DSA is stale - - Used in MS-ADTS 6.2.2.2 Intrasite Connection Creation - - Returns True if the remote seems to have been down for at - least two hours, otherwise False. - - :param target_dsa: the remote DSA object - :return: True if link is stale, otherwise False - """ - failed_link = self.kcc_failed_links.get(str(target_dsa.dsa_guid)) - if failed_link: - # failure_count should be > 0, but check anyways - if failed_link.failure_count > 0: - unix_first_failure = \ - nttime2unix(failed_link.time_first_failure) - # TODO guard against future - if unix_first_failure > unix_now: - logger.error("The last success time attribute for \ - repsFrom is in the future!") - - # Perform calculation in seconds - if (unix_now - unix_first_failure) > 60 * 60 * 2: - return True - - # TODO connections - - return False - - # TODO: This should be backed by some form of local database - def remove_unneeded_failed_links_connections(self): - # Remove all tuples in kcc_failed_links where failure count = 0 - # In this implementation, this should never happen. - - # Remove all connections which were not used this run or connections - # that became active during this run. - pass - - def remove_unneeded_ntdsconn(self, all_connected): - """Remove unneeded NTDS Connections once topology is calculated - - Based on MS-ADTS 6.2.2.4 Removing Unnecessary Connections - - :param all_connected: indicates whether all sites are connected - :return: None - """ - mydsa = self.my_dsa - - # New connections won't have GUIDs which are needed for - # sorting. Add them. - for cn_conn in mydsa.connect_table.values(): - if cn_conn.guid is None: - if opts.readonly: - cn_conn.guid = misc.GUID(str(uuid.uuid4())) - cn_conn.whenCreated = nt_now - else: - cn_conn.load_connection(self.samdb) - - for cn_conn in mydsa.connect_table.values(): - - s_dnstr = cn_conn.get_from_dnstr() - if s_dnstr is None: - cn_conn.to_be_deleted = True - continue - - # Get the source DSA no matter what site - # XXX s_dsa is NEVER USED. It will be removed. - s_dsa = self.get_dsa(s_dnstr) - - #XXX should an RODC be regarded as same site - same_site = s_dnstr in self.my_site.dsa_table - - # Given an nTDSConnection object cn, if the DC with the - # nTDSDSA object dc that is the parent object of cn and - # the DC with the nTDSDA object referenced by cn!fromServer - # are in the same site, the KCC on dc deletes cn if all of - # the following are true: - # - # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options. - # - # No site settings object s exists for the local DC's site, or - # bit NTDSSETTINGS_OPT_IS_TOPL_CLEANUP_DISABLED is clear in - # s!options. - # - # Another nTDSConnection object cn2 exists such that cn and - # cn2 have the same parent object, cn!fromServer = cn2!fromServer, - # and either - # - # cn!whenCreated < cn2!whenCreated - # - # cn!whenCreated = cn2!whenCreated and - # cn!objectGUID < cn2!objectGUID - # - # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options - if same_site: - if not cn_conn.is_generated(): - continue - - if self.my_site.is_cleanup_ntdsconn_disabled(): - continue - - # Loop thru connections looking for a duplicate that - # fulfills the previous criteria - lesser = False - packed_guid = ndr_pack(cn_conn.guid) - for cn2_conn in mydsa.connect_table.values(): - if cn2_conn is cn_conn: - continue - - s2_dnstr = cn2_conn.get_from_dnstr() - - # If the NTDS Connections has a different - # fromServer field then no match - if s2_dnstr != s_dnstr: - continue - - #XXX GUID comparison - lesser = (cn_conn.whenCreated < cn2_conn.whenCreated or - (cn_conn.whenCreated == cn2_conn.whenCreated and - packed_guid < ndr_pack(cn2_conn.guid))) - - if lesser: - break - - if lesser and not cn_conn.is_rodc_topology(): - cn_conn.to_be_deleted = True - - # Given an nTDSConnection object cn, if the DC with the nTDSDSA - # object dc that is the parent object of cn and the DC with - # the nTDSDSA object referenced by cn!fromServer are in - # different sites, a KCC acting as an ISTG in dc's site - # deletes cn if all of the following are true: - # - # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options. - # - # cn!fromServer references an nTDSDSA object for a DC - # in a site other than the local DC's site. - # - # The keepConnections sequence returned by - # CreateIntersiteConnections() does not contain - # cn!objectGUID, or cn is "superseded by" (see below) - # another nTDSConnection cn2 and keepConnections - # contains cn2!objectGUID. - # - # The return value of CreateIntersiteConnections() - # was true. - # - # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in - # cn!options - # - else: # different site - - if not mydsa.is_istg(): - continue - - if not cn_conn.is_generated(): - continue - - # TODO - # We are directly using this connection in intersite or - # we are using a connection which can supersede this one. - # - # MS-ADTS 6.2.2.4 - Removing Unnecessary Connections does not - # appear to be correct. - # - # 1. cn!fromServer and cn!parent appear inconsistent with - # no cn2 - # 2. The repsFrom do not imply each other - # - if cn_conn in self.kept_connections: # and not_superceded: - continue - - # This is the result of create_intersite_connections - if not all_connected: - continue - - if not cn_conn.is_rodc_topology(): - cn_conn.to_be_deleted = True - - if mydsa.is_ro() or opts.readonly: - for connect in mydsa.connect_table.values(): - if connect.to_be_deleted: - DEBUG_FN("TO BE DELETED:\n%s" % connect) - if connect.to_be_added: - DEBUG_FN("TO BE ADDED:\n%s" % connect) - - # Peform deletion from our tables but perform - # no database modification - mydsa.commit_connections(self.samdb, ro=True) - else: - # Commit any modified connections - mydsa.commit_connections(self.samdb) - - def modify_repsFrom(self, n_rep, t_repsFrom, s_rep, s_dsa, cn_conn): - """Update an repsFrom object if required. - - Part of MS-ADTS 6.2.2.5. - - Update t_repsFrom if necessary to satisfy requirements. Such - updates are typically required when the IDL_DRSGetNCChanges - server has moved from one site to another--for example, to - enable compression when the server is moved from the - client's site to another site. - - The repsFrom.update_flags bit field may be modified - auto-magically if any changes are made here. See - kcc_utils.RepsFromTo for gory details. - - - :param n_rep: NC replica we need - :param t_repsFrom: repsFrom tuple to modify - :param s_rep: NC replica at source DSA - :param s_dsa: source DSA - :param cn_conn: Local DSA NTDSConnection child - - :return: None - """ - s_dnstr = s_dsa.dsa_dnstr - update = 0x0 - - same_site = s_dnstr in self.my_site.dsa_table - - # if schedule doesn't match then update and modify - times = convert_schedule_to_repltimes(cn_conn.schedule) - if times != t_repsFrom.schedule: - t_repsFrom.schedule = times - update |= drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE - - # Bit DRS_PER_SYNC is set in replicaFlags if and only - # if nTDSConnection schedule has a value v that specifies - # scheduled replication is to be performed at least once - # per week. - if cn_conn.is_schedule_minimum_once_per_week(): - - if ((t_repsFrom.replica_flags & - drsuapi.DRSUAPI_DRS_PER_SYNC) == 0x0): - t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_PER_SYNC - - # Bit DRS_INIT_SYNC is set in t.replicaFlags if and only - # if the source DSA and the local DC's nTDSDSA object are - # in the same site or source dsa is the FSMO role owner - # of one or more FSMO roles in the NC replica. - if same_site or n_rep.is_fsmo_role_owner(s_dnstr): - - if ((t_repsFrom.replica_flags & - drsuapi.DRSUAPI_DRS_INIT_SYNC) == 0x0): - t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_INIT_SYNC - - # If bit NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT is set in - # cn!options, bit DRS_NEVER_NOTIFY is set in t.replicaFlags - # if and only if bit NTDSCONN_OPT_USE_NOTIFY is clear in - # cn!options. Otherwise, bit DRS_NEVER_NOTIFY is set in - # t.replicaFlags if and only if s and the local DC's - # nTDSDSA object are in different sites. - if ((cn_conn.options & - dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT) != 0x0): - - if (cn_conn.options & dsdb.NTDSCONN_OPT_USE_NOTIFY) == 0x0: - # XXX WARNING - # - # it LOOKS as if this next test is a bit silly: it - # checks the flag then sets it if it not set; the same - # effect could be achieved by unconditionally setting - # it. But in fact the repsFrom object has special - # magic attached to it, and altering replica_flags has - # side-effects. That is bad in my opinion, but there - # you go. - if ((t_repsFrom.replica_flags & - drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0): - t_repsFrom.replica_flags |= \ - drsuapi.DRSUAPI_DRS_NEVER_NOTIFY - - elif not same_site: - - if ((t_repsFrom.replica_flags & - drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0): - t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_NEVER_NOTIFY - - # Bit DRS_USE_COMPRESSION is set in t.replicaFlags if - # and only if s and the local DC's nTDSDSA object are - # not in the same site and the - # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION bit is - # clear in cn!options - if (not same_site and - (cn_conn.options & - dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION) == 0x0): - - if ((t_repsFrom.replica_flags & - drsuapi.DRSUAPI_DRS_USE_COMPRESSION) == 0x0): - t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_USE_COMPRESSION - - # Bit DRS_TWOWAY_SYNC is set in t.replicaFlags if and only - # if bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options. - if (cn_conn.options & dsdb.NTDSCONN_OPT_TWOWAY_SYNC) != 0x0: - - if ((t_repsFrom.replica_flags & - drsuapi.DRSUAPI_DRS_TWOWAY_SYNC) == 0x0): - t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_TWOWAY_SYNC - - # Bits DRS_DISABLE_AUTO_SYNC and DRS_DISABLE_PERIODIC_SYNC are - # set in t.replicaFlags if and only if cn!enabledConnection = false. - if not cn_conn.is_enabled(): - - if ((t_repsFrom.replica_flags & - drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC) == 0x0): - t_repsFrom.replica_flags |= \ - drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC - - if ((t_repsFrom.replica_flags & - drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC) == 0x0): - t_repsFrom.replica_flags |= \ - drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC - - # If s and the local DC's nTDSDSA object are in the same site, - # cn!transportType has no value, or the RDN of cn!transportType - # is CN=IP: - # - # Bit DRS_MAIL_REP in t.replicaFlags is clear. - # - # t.uuidTransport = NULL GUID. - # - # t.uuidDsa = The GUID-based DNS name of s. - # - # Otherwise: - # - # Bit DRS_MAIL_REP in t.replicaFlags is set. - # - # If x is the object with dsname cn!transportType, - # t.uuidTransport = x!objectGUID. - # - # Let a be the attribute identified by - # x!transportAddressAttribute. If a is - # the dNSHostName attribute, t.uuidDsa = the GUID-based - # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a. - # - # It appears that the first statement i.e. - # - # "If s and the local DC's nTDSDSA object are in the same - # site, cn!transportType has no value, or the RDN of - # cn!transportType is CN=IP:" - # - # could be a slightly tighter statement if it had an "or" - # between each condition. I believe this should - # be interpreted as: - # - # IF (same-site) OR (no-value) OR (type-ip) - # - # because IP should be the primary transport mechanism - # (even in inter-site) and the absense of the transportType - # attribute should always imply IP no matter if its multi-site - # - # NOTE MS-TECH INCORRECT: - # - # All indications point to these statements above being - # incorrectly stated: - # - # t.uuidDsa = The GUID-based DNS name of s. - # - # Let a be the attribute identified by - # x!transportAddressAttribute. If a is - # the dNSHostName attribute, t.uuidDsa = the GUID-based - # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a. - # - # because the uuidDSA is a GUID and not a GUID-base DNS - # name. Nor can uuidDsa hold (s!parent)!a if not - # dNSHostName. What should have been said is: - # - # t.naDsa = The GUID-based DNS name of s - # - # That would also be correct if transportAddressAttribute - # were "mailAddress" because (naDsa) can also correctly - # hold the SMTP ISM service address. - # - nastr = "%s._msdcs.%s" % (s_dsa.dsa_guid, self.samdb.forest_dns_name()) - - # We're not currently supporting SMTP replication - # so is_smtp_replication_available() is currently - # always returning False - if ((same_site or - cn_conn.transport_dnstr is None or - cn_conn.transport_dnstr.find("CN=IP") == 0 or - not is_smtp_replication_available())): - - if ((t_repsFrom.replica_flags & - drsuapi.DRSUAPI_DRS_MAIL_REP) != 0x0): - t_repsFrom.replica_flags &= ~drsuapi.DRSUAPI_DRS_MAIL_REP - - t_repsFrom.transport_guid = misc.GUID() - - # See (NOTE MS-TECH INCORRECT) above - if t_repsFrom.version == 0x1: - if t_repsFrom.dns_name1 is None or \ - t_repsFrom.dns_name1 != nastr: - t_repsFrom.dns_name1 = nastr - else: - if t_repsFrom.dns_name1 is None or \ - t_repsFrom.dns_name2 is None or \ - t_repsFrom.dns_name1 != nastr or \ - t_repsFrom.dns_name2 != nastr: - t_repsFrom.dns_name1 = nastr - t_repsFrom.dns_name2 = nastr - - else: - # XXX This entire branch is NEVER used! Because we don't do SMTP! - # (see the if condition above). Just close your eyes here. - if ((t_repsFrom.replica_flags & - drsuapi.DRSUAPI_DRS_MAIL_REP) == 0x0): - t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_MAIL_REP - - # We have a transport type but its not an - # object in the database - if cn_conn.transport_guid not in self.transport_table: - raise KCCError("Missing inter-site transport - (%s)" % - cn_conn.transport_dnstr) - - x_transport = self.transport_table[str(cn_conn.transport_guid)] - - if t_repsFrom.transport_guid != x_transport.guid: - t_repsFrom.transport_guid = x_transport.guid - - # See (NOTE MS-TECH INCORRECT) above - if x_transport.address_attr == "dNSHostName": - - if t_repsFrom.version == 0x1: - if t_repsFrom.dns_name1 is None or \ - t_repsFrom.dns_name1 != nastr: - t_repsFrom.dns_name1 = nastr - else: - if t_repsFrom.dns_name1 is None or \ - t_repsFrom.dns_name2 is None or \ - t_repsFrom.dns_name1 != nastr or \ - t_repsFrom.dns_name2 != nastr: - t_repsFrom.dns_name1 = nastr - t_repsFrom.dns_name2 = nastr - - else: - # MS tech specification says we retrieve the named - # attribute in "transportAddressAttribute" from the parent of - # the DSA object - try: - pdnstr = s_dsa.get_parent_dnstr() - attrs = [x_transport.address_attr] - - res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE, - attrs=attrs) - except ldb.LdbError, (enum, estr): - raise KCCError( - "Unable to find attr (%s) for (%s) - (%s)" % - (x_transport.address_attr, pdnstr, estr)) - - msg = res[0] - nastr = str(msg[x_transport.address_attr][0]) - - # See (NOTE MS-TECH INCORRECT) above - if t_repsFrom.version == 0x1: - if t_repsFrom.dns_name1 is None or \ - t_repsFrom.dns_name1 != nastr: - t_repsFrom.dns_name1 = nastr - else: - if t_repsFrom.dns_name1 is None or \ - t_repsFrom.dns_name2 is None or \ - t_repsFrom.dns_name1 != nastr or \ - t_repsFrom.dns_name2 != nastr: - - t_repsFrom.dns_name1 = nastr - t_repsFrom.dns_name2 = nastr - - if t_repsFrom.is_modified(): - logger.debug("modify_repsFrom(): %s" % t_repsFrom) - - def is_repsFrom_implied(self, n_rep, cn_conn): - """Given a NC replica and NTDS Connection, determine if the connection - implies a repsFrom tuple should be present from the source DSA listed - in the connection to the naming context - - :param n_rep: NC replica - :param conn: NTDS Connection - ::returns (True || False), source DSA: - """ - #XXX different conditions for "implies" than MS-ADTS 6.2.2 - - # NTDS Connection must satisfy all the following criteria - # to imply a repsFrom tuple is needed: - # - # cn!enabledConnection = true. - # cn!options does not contain NTDSCONN_OPT_RODC_TOPOLOGY. - # cn!fromServer references an nTDSDSA object. - - s_dsa = None - - if cn_conn.is_enabled() and not cn_conn.is_rodc_topology(): - s_dnstr = cn_conn.get_from_dnstr() - if s_dnstr is not None: - s_dsa = self.get_dsa(s_dnstr) - - # No DSA matching this source DN string? - if s_dsa is None: - return False, None - - # To imply a repsFrom tuple is needed, each of these - # must be True: - # - # An NC replica of the NC "is present" on the DC to - # which the nTDSDSA object referenced by cn!fromServer - # corresponds. - # - # An NC replica of the NC "should be present" on - # the local DC - s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr) - - if s_rep is None or not s_rep.is_present(): - return False, None - - # To imply a repsFrom tuple is needed, each of these - # must be True: - # - # The NC replica on the DC referenced by cn!fromServer is - # a writable replica or the NC replica that "should be - # present" on the local DC is a partial replica. - # - # The NC is not a domain NC, the NC replica that - # "should be present" on the local DC is a partial - # replica, cn!transportType has no value, or - # cn!transportType has an RDN of CN=IP. - # - implied = (not s_rep.is_ro() or n_rep.is_partial()) and \ - (not n_rep.is_domain() or - n_rep.is_partial() or - cn_conn.transport_dnstr is None or - cn_conn.transport_dnstr.find("CN=IP") == 0) - - if implied: - return True, s_dsa - else: - return False, None - - def translate_ntdsconn(self, current_dsa=None): - """Adjust repsFrom to match NTDSConnections - - This function adjusts values of repsFrom abstract attributes of NC - replicas on the local DC to match those implied by - nTDSConnection objects. - - Based on [MS-ADTS] 6.2.2.5 - - :param current_dsa: optional DSA on whose behalf we are acting. - :return: None - """ - count = 0 - - if current_dsa is None: - current_dsa = self.my_dsa - - if current_dsa.is_translate_ntdsconn_disabled(): - logger.debug("skipping translate_ntdsconn() " - "because disabling flag is set") - return - - logger.debug("translate_ntdsconn(): enter") - - current_rep_table, needed_rep_table = current_dsa.get_rep_tables() - - # Filled in with replicas we currently have that need deleting - delete_reps = set() - - # We're using the MS notation names here to allow - # correlation back to the published algorithm. - # - # n_rep - NC replica (n) - # t_repsFrom - tuple (t) in n!repsFrom - # s_dsa - Source DSA of the replica. Defined as nTDSDSA - # object (s) such that (s!objectGUID = t.uuidDsa) - # In our IDL representation of repsFrom the (uuidDsa) - # attribute is called (source_dsa_obj_guid) - # cn_conn - (cn) is nTDSConnection object and child of the local - # DC's nTDSDSA object and (cn!fromServer = s) - # s_rep - source DSA replica of n - # - # If we have the replica and its not needed - # then we add it to the "to be deleted" list. - for dnstr in current_rep_table: - if dnstr not in needed_rep_table: - delete_reps.add(dnstr) - - DEBUG_FN('current %d needed %d delete %d' % (len(current_rep_table), - len(needed_rep_table), len(delete_reps))) - - if delete_reps: - DEBUG('deleting these reps: %s' % delete_reps) - for dnstr in delete_reps: - del current_rep_table[dnstr] - - # Now perform the scan of replicas we'll need - # and compare any current repsFrom against the - # connections - for n_rep in needed_rep_table.values(): - - # load any repsFrom and fsmo roles as we'll - # need them during connection translation - n_rep.load_repsFrom(self.samdb) - n_rep.load_fsmo_roles(self.samdb) - - # Loop thru the existing repsFrom tupples (if any) - # XXX This is a list and could contain duplicates - # (multiple load_repsFrom calls) - for t_repsFrom in n_rep.rep_repsFrom: - - # for each tuple t in n!repsFrom, let s be the nTDSDSA - # object such that s!objectGUID = t.uuidDsa - guidstr = str(t_repsFrom.source_dsa_obj_guid) - s_dsa = self.get_dsa_by_guidstr(guidstr) - - # Source dsa is gone from config (strange) - # so cleanup stale repsFrom for unlisted DSA - if s_dsa is None: - logger.warning("repsFrom source DSA guid (%s) not found" % - guidstr) - t_repsFrom.to_be_deleted = True - continue - - s_dnstr = s_dsa.dsa_dnstr - - # Retrieve my DSAs connection object (if it exists) - # that specifies the fromServer equivalent to - # the DSA that is specified in the repsFrom source - connections = current_dsa.get_connection_by_from_dnstr(s_dnstr) - - count = 0 - cn_conn = None - - for con in connections: - if con.is_rodc_topology(): - continue - cn_conn = con - - # Let (cn) be the nTDSConnection object such that (cn) - # is a child of the local DC's nTDSDSA object and - # (cn!fromServer = s) and (cn!options) does not contain - # NTDSCONN_OPT_RODC_TOPOLOGY or NULL if no such (cn) exists. - - # KCC removes this repsFrom tuple if any of the following - # is true: - # cn = NULL. - # [...] - - #XXX varying possible interpretations of rodc_topology - if cn_conn is None: - t_repsFrom.to_be_deleted = True - continue - - # [...] KCC removes this repsFrom tuple if: - # - # No NC replica of the NC "is present" on DSA that - # would be source of replica - # - # A writable replica of the NC "should be present" on - # the local DC, but a partial replica "is present" on - # the source DSA - s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr) - - if s_rep is None or not s_rep.is_present() or \ - (not n_rep.is_ro() and s_rep.is_partial()): - - t_repsFrom.to_be_deleted = True - continue - - # If the KCC did not remove t from n!repsFrom, it updates t - self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn) - - # Loop thru connections and add implied repsFrom tuples - # for each NTDSConnection under our local DSA if the - # repsFrom is not already present - for cn_conn in current_dsa.connect_table.values(): - - implied, s_dsa = self.is_repsFrom_implied(n_rep, cn_conn) - if not implied: - continue - - # Loop thru the existing repsFrom tupples (if any) and - # if we already have a tuple for this connection then - # no need to proceed to add. It will have been changed - # to have the correct attributes above - for t_repsFrom in n_rep.rep_repsFrom: - guidstr = str(t_repsFrom.source_dsa_obj_guid) - #XXX what? - if s_dsa is self.get_dsa_by_guidstr(guidstr): - s_dsa = None - break - - if s_dsa is None: - continue - - # Create a new RepsFromTo and proceed to modify - # it according to specification - t_repsFrom = RepsFromTo(n_rep.nc_dnstr) - - t_repsFrom.source_dsa_obj_guid = s_dsa.dsa_guid - - s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr) - - self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn) - - # Add to our NC repsFrom as this is newly computed - if t_repsFrom.is_modified(): - n_rep.rep_repsFrom.append(t_repsFrom) - - if opts.readonly: - # Display any to be deleted or modified repsFrom - text = n_rep.dumpstr_to_be_deleted() - if text: - logger.info("TO BE DELETED:\n%s" % text) - text = n_rep.dumpstr_to_be_modified() - if text: - logger.info("TO BE MODIFIED:\n%s" % text) - - # Peform deletion from our tables but perform - # no database modification - n_rep.commit_repsFrom(self.samdb, ro=True) - else: - # Commit any modified repsFrom to the NC replica - n_rep.commit_repsFrom(self.samdb) - - def merge_failed_links(self): - """Merge of kCCFailedLinks and kCCFailedLinks from bridgeheads. - - The KCC on a writable DC attempts to merge the link and connection - failure information from bridgehead DCs in its own site to help it - identify failed bridgehead DCs. - - Based on MS-ADTS 6.2.2.3.2 "Merge of kCCFailedLinks and kCCFailedLinks - from Bridgeheads" - - :param ping: An oracle of current bridgehead availability - :return: None - """ - # 1. Queries every bridgehead server in your site (other than yourself) - # 2. For every ntDSConnection that references a server in a different - # site merge all the failure info - # - # XXX - not implemented yet - if opts.attempt_live_connections: - DEBUG_RED("merge_failed_links() is NOT IMPLEMENTED") - else: - DEBUG_FN("skipping merge_failed_links() because it requires " - "real network connections\n" - "and we weren't asked to --attempt-live-connections") - - def setup_graph(self, part): - """Set up an intersite graph - - An intersite graph has a Vertex for each site object, a - MultiEdge for each SiteLink object, and a MutliEdgeSet for - each siteLinkBridge object (or implied siteLinkBridge). It - reflects the intersite topology in a slightly more abstract - graph form. - - Roughly corresponds to MS-ADTS 6.2.2.3.4.3 - - :param part: a Partition object - :returns: an InterSiteGraph object - """ - guid_to_vertex = {} - # Create graph - g = IntersiteGraph() - # Add vertices - for site_guid, site in self.site_table.items(): - vertex = Vertex(site, part) - vertex.guid = site_guid - vertex.ndrpacked_guid = ndr_pack(site.site_guid) - g.vertices.add(vertex) - - if not guid_to_vertex.get(site_guid): - guid_to_vertex[site_guid] = [] - - guid_to_vertex[site_guid].append(vertex) - - connected_vertices = set() - for transport_guid, transport in self.transport_table.items(): - # Currently only ever "IP" - if transport.name != 'IP': - DEBUG_FN("setup_graph is ignoring transport %s" % - transport.name) - continue - for site_link_dn, site_link in self.sitelink_table.items(): - new_edge = create_edge(transport_guid, site_link, - guid_to_vertex) - connected_vertices.update(new_edge.vertices) - g.edges.add(new_edge) - - # If 'Bridge all site links' is enabled and Win2k3 bridges required - # is not set - # NTDSTRANSPORT_OPT_BRIDGES_REQUIRED 0x00000002 - # No documentation for this however, ntdsapi.h appears to have: - # NTDSSETTINGS_OPT_W2K3_BRIDGES_REQUIRED = 0x00001000 - if (((self.my_site.site_options & 0x00000002) == 0 - and (self.my_site.site_options & 0x00001000) == 0)): - g.edge_set.add(create_auto_edge_set(g, transport_guid)) - else: - # TODO get all site link bridges - for site_link_bridge in []: - g.edge_set.add(create_edge_set(g, transport_guid, - site_link_bridge)) - - g.connected_vertices = connected_vertices - - #be less verbose in dot file output unless --debug - do_dot_files = opts.dot_files and opts.debug - dot_edges = [] - for edge in g.edges: - for a, b in itertools.combinations(edge.vertices, 2): - dot_edges.append((a.site.site_dnstr, b.site.site_dnstr)) - verify_properties = () - verify_and_dot('site_edges', dot_edges, directed=False, - label=self.my_dsa_dnstr, - properties=verify_properties, debug=DEBUG, - verify=opts.verify, - dot_files=do_dot_files) - - return g - - def get_bridgehead(self, site, part, transport, partial_ok, detect_failed): - """Get a bridghead DC for a site. - - Part of MS-ADTS 6.2.2.3.4.4 - - :param site: site object representing for which a bridgehead - DC is desired. - :param part: crossRef for NC to replicate. - :param transport: interSiteTransport object for replication - traffic. - :param partial_ok: True if a DC containing a partial - replica or a full replica will suffice, False if only - a full replica will suffice. - :param detect_failed: True to detect failed DCs and route - replication traffic around them, False to assume no DC - has failed. - :return: dsa object for the bridgehead DC or None - """ - - bhs = self.get_all_bridgeheads(site, part, transport, - partial_ok, detect_failed) - if len(bhs) == 0: - DEBUG_MAGENTA("get_bridgehead:\n\tsitedn=%s\n\tbhdn=None" % - site.site_dnstr) - return None - else: - DEBUG_GREEN("get_bridgehead:\n\tsitedn=%s\n\tbhdn=%s" % - (site.site_dnstr, bhs[0].dsa_dnstr)) - return bhs[0] - - def get_all_bridgeheads(self, site, part, transport, - partial_ok, detect_failed): - """Get all bridghead DCs on a site satisfying the given criteria - - Part of MS-ADTS 6.2.2.3.4.4 - - :param site: site object representing the site for which - bridgehead DCs are desired. - :param part: partition for NC to replicate. - :param transport: interSiteTransport object for - replication traffic. - :param partial_ok: True if a DC containing a partial - replica or a full replica will suffice, False if - only a full replica will suffice. - :param detect_failed: True to detect failed DCs and route - replication traffic around them, FALSE to assume - no DC has failed. - :return: list of dsa object for available bridgehead DCs - """ - - bhs = [] - - logger.debug("get_all_bridgeheads: %s" % transport.name) - if 'Site-5' in site.site_dnstr: - DEBUG_RED("get_all_bridgeheads with %s, part%s, partial_ok %s" - " detect_failed %s" % (site.site_dnstr, part.partstr, - partial_ok, detect_failed)) - logger.debug(site.rw_dsa_table) - for dsa in site.rw_dsa_table.values(): - - pdnstr = dsa.get_parent_dnstr() - - # IF t!bridgeheadServerListBL has one or more values and - # t!bridgeheadServerListBL does not contain a reference - # to the parent object of dc then skip dc - if ((len(transport.bridgehead_list) != 0 and - pdnstr not in transport.bridgehead_list)): - continue - - # IF dc is in the same site as the local DC - # IF a replica of cr!nCName is not in the set of NC replicas - # that "should be present" on dc or a partial replica of the - # NC "should be present" but partialReplicasOkay = FALSE - # Skip dc - if self.my_site.same_site(dsa): - needed, ro, partial = part.should_be_present(dsa) - if not needed or (partial and not partial_ok): - continue - rep = dsa.get_current_replica(part.nc_dnstr) - - # ELSE - # IF an NC replica of cr!nCName is not in the set of NC - # replicas that "are present" on dc or a partial replica of - # the NC "is present" but partialReplicasOkay = FALSE - # Skip dc - else: - rep = dsa.get_current_replica(part.nc_dnstr) - if rep is None or (rep.is_partial() and not partial_ok): - continue - - # IF AmIRODC() and cr!nCName corresponds to default NC then - # Let dsaobj be the nTDSDSA object of the dc - # IF dsaobj.msDS-Behavior-Version < DS_DOMAIN_FUNCTION_2008 - # Skip dc - if self.my_dsa.is_ro() and rep is not None and rep.is_default(): - if not dsa.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008): - continue - - # IF t!name != "IP" and the parent object of dc has no value for - # the attribute specified by t!transportAddressAttribute - # Skip dc - if transport.name != "IP": - # MS tech specification says we retrieve the named - # attribute in "transportAddressAttribute" from the parent - # of the DSA object - try: - attrs = [transport.address_attr] - - res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE, - attrs=attrs) - except ldb.LdbError, (enum, estr): - continue - - msg = res[0] - if transport.address_attr not in msg: - continue - #XXX nastr is NEVER USED. It will be removed. - nastr = str(msg[transport.address_attr][0]) - - # IF BridgeheadDCFailed(dc!objectGUID, detectFailedDCs) = TRUE - # Skip dc - if self.is_bridgehead_failed(dsa, detect_failed): - DEBUG("bridgehead is failed") - continue - - logger.debug("get_all_bridgeheads: dsadn=%s" % dsa.dsa_dnstr) - bhs.append(dsa) - - # IF bit NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED is set in - # s!options - # SORT bhs such that all GC servers precede DCs that are not GC - # servers, and otherwise by ascending objectGUID - # ELSE - # SORT bhs in a random order - if site.is_random_bridgehead_disabled(): - bhs.sort(sort_dsa_by_gc_and_guid) - else: - random.shuffle(bhs) - DEBUG_YELLOW(bhs) - return bhs - - def is_bridgehead_failed(self, dsa, detect_failed): - """Determine whether a given DC is known to be in a failed state - - :param dsa: the bridgehead to test - :param detect_failed: True to really check, False to assume no failure - :return: True if and only if the DC should be considered failed - - Here we DEPART from the pseudo code spec which appears to be - wrong. It says, in full: - - /***** BridgeheadDCFailed *****/ - /* Determine whether a given DC is known to be in a failed state. - * IN: objectGUID - objectGUID of the DC's nTDSDSA object. - * IN: detectFailedDCs - TRUE if and only failed DC detection is - * enabled. - * RETURNS: TRUE if and only if the DC should be considered to be in a - * failed state. - */ - BridgeheadDCFailed(IN GUID objectGUID, IN bool detectFailedDCs) : bool - { - IF bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set in - the options attribute of the site settings object for the local - DC's site - RETURN FALSE - ELSEIF a tuple z exists in the kCCFailedLinks or - kCCFailedConnections variables such that z.UUIDDsa = - objectGUID, z.FailureCount > 1, and the current time - - z.TimeFirstFailure > 2 hours - RETURN TRUE - ELSE - RETURN detectFailedDCs - ENDIF - } - - where you will see detectFailedDCs is not behaving as - advertised -- it is acting as a default return code in the - event that a failure is not detected, not a switch turning - detection on or off. Elsewhere the documentation seems to - concur with the comment rather than the code. - """ - if not detect_failed: - return False - - # NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED = 0x00000008 - # When DETECT_STALE_DISABLED, we can never know of if - # it's in a failed state - if self.my_site.site_options & 0x00000008: - return False - - return self.is_stale_link_connection(dsa) - - def create_connection(self, part, rbh, rsite, transport, - lbh, lsite, link_opt, link_sched, - partial_ok, detect_failed): - """Create an nTDSConnection object as specified if it doesn't exist. - - Part of MS-ADTS 6.2.2.3.4.5 - - :param part: crossRef object for the NC to replicate. - :param rbh: nTDSDSA object for DC to act as the - IDL_DRSGetNCChanges server (which is in a site other - than the local DC's site). - :param rsite: site of the rbh - :param transport: interSiteTransport object for the transport - to use for replication traffic. - :param lbh: nTDSDSA object for DC to act as the - IDL_DRSGetNCChanges client (which is in the local DC's site). - :param lsite: site of the lbh - :param link_opt: Replication parameters (aggregated siteLink options, - etc.) - :param link_sched: Schedule specifying the times at which - to begin replicating. - :partial_ok: True if bridgehead DCs containing partial - replicas of the NC are acceptable. - :param detect_failed: True to detect failed DCs and route - replication traffic around them, FALSE to assume no DC - has failed. - """ - rbhs_all = self.get_all_bridgeheads(rsite, part, transport, - partial_ok, False) - rbh_table = {x.dsa_dnstr: x for x in rbhs_all} - - DEBUG_GREY("rbhs_all: %s %s" % (len(rbhs_all), - [x.dsa_dnstr for x in rbhs_all])) - - # MS-TECH says to compute rbhs_avail but then doesn't use it - # rbhs_avail = self.get_all_bridgeheads(rsite, part, transport, - # partial_ok, detect_failed) - - lbhs_all = self.get_all_bridgeheads(lsite, part, transport, - partial_ok, False) - if lbh.is_ro(): - lbhs_all.append(lbh) - - DEBUG_GREY("lbhs_all: %s %s" % (len(lbhs_all), - [x.dsa_dnstr for x in lbhs_all])) - - # MS-TECH says to compute lbhs_avail but then doesn't use it - # lbhs_avail = self.get_all_bridgeheads(lsite, part, transport, - # partial_ok, detect_failed) - - # FOR each nTDSConnection object cn such that the parent of cn is - # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll - for ldsa in lbhs_all: - for cn in ldsa.connect_table.values(): - - rdsa = rbh_table.get(cn.from_dnstr) - if rdsa is None: - continue - - DEBUG_DARK_YELLOW("rdsa is %s" % rdsa.dsa_dnstr) - # IF bit NTDSCONN_OPT_IS_GENERATED is set in cn!options and - # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options and - # cn!transportType references t - if ((cn.is_generated() and - not cn.is_rodc_topology() and - cn.transport_guid == transport.guid)): - - # IF bit NTDSCONN_OPT_USER_OWNED_SCHEDULE is clear in - # cn!options and cn!schedule != sch - # Perform an originating update to set cn!schedule to - # sched - if ((not cn.is_user_owned_schedule() and - not cn.is_equivalent_schedule(link_sched))): - cn.schedule = link_sched - cn.set_modified(True) - - # IF bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and - # NTDSCONN_OPT_USE_NOTIFY are set in cn - if cn.is_override_notify_default() and \ - cn.is_use_notify(): - - # IF bit NTDSSITELINK_OPT_USE_NOTIFY is clear in - # ri.Options - # Perform an originating update to clear bits - # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and - # NTDSCONN_OPT_USE_NOTIFY in cn!options - if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) == 0: - cn.options &= \ - ~(dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT | - dsdb.NTDSCONN_OPT_USE_NOTIFY) - cn.set_modified(True) - - # ELSE - else: - - # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in - # ri.Options - # Perform an originating update to set bits - # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and - # NTDSCONN_OPT_USE_NOTIFY in cn!options - if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0: - cn.options |= \ - (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT | - dsdb.NTDSCONN_OPT_USE_NOTIFY) - cn.set_modified(True) - - # IF bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options - if cn.is_twoway_sync(): - - # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is clear in - # ri.Options - # Perform an originating update to clear bit - # NTDSCONN_OPT_TWOWAY_SYNC in cn!options - if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) == 0: - cn.options &= ~dsdb.NTDSCONN_OPT_TWOWAY_SYNC - cn.set_modified(True) - - # ELSE - else: - - # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in - # ri.Options - # Perform an originating update to set bit - # NTDSCONN_OPT_TWOWAY_SYNC in cn!options - if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0: - cn.options |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC - cn.set_modified(True) - - # IF bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION is set - # in cn!options - if cn.is_intersite_compression_disabled(): - - # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is clear - # in ri.Options - # Perform an originating update to clear bit - # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in - # cn!options - if ((link_opt & - dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) == 0): - cn.options &= \ - ~dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION - cn.set_modified(True) - - # ELSE - else: - # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in - # ri.Options - # Perform an originating update to set bit - # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in - # cn!options - if ((link_opt & - dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0): - cn.options |= \ - dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION - cn.set_modified(True) - - # Display any modified connection - if opts.readonly: - if cn.to_be_modified: - logger.info("TO BE MODIFIED:\n%s" % cn) - - ldsa.commit_connections(self.samdb, ro=True) - else: - ldsa.commit_connections(self.samdb) - # ENDFOR - - valid_connections = 0 - - # FOR each nTDSConnection object cn such that cn!parent is - # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll - for ldsa in lbhs_all: - for cn in ldsa.connect_table.values(): - - rdsa = rbh_table.get(cn.from_dnstr) - if rdsa is None: - continue - - DEBUG_DARK_YELLOW("round 2: rdsa is %s" % rdsa.dsa_dnstr) - - # IF (bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options or - # cn!transportType references t) and - # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options - if (((not cn.is_generated() or - cn.transport_guid == transport.guid) and - not cn.is_rodc_topology())): - - # LET rguid be the objectGUID of the nTDSDSA object - # referenced by cn!fromServer - # LET lguid be (cn!parent)!objectGUID - - # IF BridgeheadDCFailed(rguid, detectFailedDCs) = FALSE and - # BridgeheadDCFailed(lguid, detectFailedDCs) = FALSE - # Increment cValidConnections by 1 - if ((not self.is_bridgehead_failed(rdsa, detect_failed) and - not self.is_bridgehead_failed(ldsa, detect_failed))): - valid_connections += 1 - - # IF keepConnections does not contain cn!objectGUID - # APPEND cn!objectGUID to keepConnections - self.kept_connections.add(cn) - - # ENDFOR - DEBUG_RED("valid connections %d" % valid_connections) - DEBUG("kept_connections:\n%s" % (self.kept_connections,)) - # IF cValidConnections = 0 - if valid_connections == 0: - - # LET opt be NTDSCONN_OPT_IS_GENERATED - opt = dsdb.NTDSCONN_OPT_IS_GENERATED - - # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in ri.Options - # SET bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and - # NTDSCONN_OPT_USE_NOTIFY in opt - if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0: - opt |= (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT | - dsdb.NTDSCONN_OPT_USE_NOTIFY) - - # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in ri.Options - # SET bit NTDSCONN_OPT_TWOWAY_SYNC opt - if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0: - opt |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC - - # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in - # ri.Options - # SET bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in opt - if ((link_opt & - dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0): - opt |= dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION - - # Perform an originating update to create a new nTDSConnection - # object cn that is a child of lbh, cn!enabledConnection = TRUE, - # cn!options = opt, cn!transportType is a reference to t, - # cn!fromServer is a reference to rbh, and cn!schedule = sch - DEBUG_FN("new connection, KCC dsa: %s" % self.my_dsa.dsa_dnstr) - cn = lbh.new_connection(opt, 0, transport, - rbh.dsa_dnstr, link_sched) - - # Display any added connection - if opts.readonly: - if cn.to_be_added: - logger.info("TO BE ADDED:\n%s" % cn) - - lbh.commit_connections(self.samdb, ro=True) - else: - lbh.commit_connections(self.samdb) - - # APPEND cn!objectGUID to keepConnections - self.kept_connections.add(cn) - - def add_transports(self, vertex, local_vertex, graph, detect_failed): - """Build a Vertex's transport lists - - Each vertex has accept_red_red and accept_black lists that - list what transports they accept under various conditions. The - only transport that is ever accepted is IP, and a dummy extra - transport called "EDGE_TYPE_ALL". - - Part of MS-ADTS 6.2.2.3.4.3 -- ColorVertices - - :param vertex: the remote vertex we are thinking about - :param local_vertex: the vertex relating to the local site. - :param graph: the intersite graph - :param detect_failed: whether to detect failed links - :return: True if some bridgeheads were not found - """ - # The docs ([MS-ADTS] 6.2.2.3.4.3) say to use local_vertex - # here, but using vertex seems to make more sense. That is, - # the docs want this: - # - #bh = self.get_bridgehead(vertex.site, vertex.part, transport, - # local_vertex.is_black(), detect_failed) - # - # TODO WHY????? - - vertex.accept_red_red = [] - vertex.accept_black = [] - found_failed = False - for t_guid, transport in self.transport_table.items(): - if transport.name != 'IP': - #XXX well this is cheating a bit - logging.warning("WARNING: we are ignoring a transport named %r" - % transport.name) - continue - - # FLAG_CR_NTDS_DOMAIN 0x00000002 - if ((vertex.is_red() and transport.name != "IP" and - vertex.part.system_flags & 0x00000002)): - continue - - if vertex not in graph.connected_vertices: - continue - - partial_replica_okay = vertex.is_black() - bh = self.get_bridgehead(vertex.site, vertex.part, transport, - partial_replica_okay, detect_failed) - if bh is None: - found_failed = True - continue - - vertex.accept_red_red.append(t_guid) - vertex.accept_black.append(t_guid) - - # Add additional transport to allow another run of Dijkstra - vertex.accept_red_red.append("EDGE_TYPE_ALL") - vertex.accept_black.append("EDGE_TYPE_ALL") - - return found_failed - - def create_connections(self, graph, part, detect_failed): - """Construct an NC replica graph for the NC identified by - the given crossRef, then create any additional nTDSConnection - objects required. - - :param graph: site graph. - :param part: crossRef object for NC. - :param detect_failed: True to detect failed DCs and route - replication traffic around them, False to assume no DC - has failed. - - Modifies self.kept_connections by adding any connections - deemed to be "in use". - - ::returns: (all_connected, found_failed_dc) - (all_connected) True if the resulting NC replica graph - connects all sites that need to be connected. - (found_failed_dc) True if one or more failed DCs were - detected. - """ - all_connected = True - found_failed = False - - logger.debug("create_connections(): enter\n" - "\tpartdn=%s\n\tdetect_failed=%s" % - (part.nc_dnstr, detect_failed)) - - # XXX - This is a highly abbreviated function from the MS-TECH - # ref. It creates connections between bridgeheads to all - # sites that have appropriate replicas. Thus we are not - # creating a minimum cost spanning tree but instead - # producing a fully connected tree. This should produce - # a full (albeit not optimal cost) replication topology. - - my_vertex = Vertex(self.my_site, part) - my_vertex.color_vertex() - - for v in graph.vertices: - v.color_vertex() - if self.add_transports(v, my_vertex, graph, False): - found_failed = True - - # No NC replicas for this NC in the site of the local DC, - # so no nTDSConnection objects need be created - if my_vertex.is_white(): - return all_connected, found_failed - - edge_list, n_components = get_spanning_tree_edges(graph, - self.my_site, - label=part.partstr) - - logger.debug("%s Number of components: %d" % - (part.nc_dnstr, n_components)) - if n_components > 1: - all_connected = False - - # LET partialReplicaOkay be TRUE if and only if - # localSiteVertex.Color = COLOR.BLACK - partial_ok = my_vertex.is_black() - - # Utilize the IP transport only for now - transport = self.ip_transport - - DEBUG("edge_list %s" % edge_list) - for e in edge_list: - # XXX more accurate comparison? - if e.directed and e.vertices[0].site is self.my_site: - continue - - if e.vertices[0].site is self.my_site: - rsite = e.vertices[1].site - else: - rsite = e.vertices[0].site - - # We don't make connections to our own site as that - # is intrasite topology generator's job - if rsite is self.my_site: - DEBUG("rsite is my_site") - continue - - # Determine bridgehead server in remote site - rbh = self.get_bridgehead(rsite, part, transport, - partial_ok, detect_failed) - if rbh is None: - continue - - # RODC acts as an BH for itself - # IF AmIRODC() then - # LET lbh be the nTDSDSA object of the local DC - # ELSE - # LET lbh be the result of GetBridgeheadDC(localSiteVertex.ID, - # cr, t, partialReplicaOkay, detectFailedDCs) - if self.my_dsa.is_ro(): - lsite = self.my_site - lbh = self.my_dsa - else: - lsite = self.my_site - lbh = self.get_bridgehead(lsite, part, transport, - partial_ok, detect_failed) - # TODO - if lbh is None: - DEBUG_RED("DISASTER! lbh is None") - return False, True - - DEBUG_CYAN("SITES") - print lsite, rsite - DEBUG_BLUE("vertices") - print e.vertices - DEBUG_BLUE("bridgeheads") - print lbh, rbh - DEBUG_BLUE("-" * 70) - - sitelink = e.site_link - if sitelink is None: - link_opt = 0x0 - link_sched = None - else: - link_opt = sitelink.options - link_sched = sitelink.schedule - - self.create_connection(part, rbh, rsite, transport, - lbh, lsite, link_opt, link_sched, - partial_ok, detect_failed) - - return all_connected, found_failed - - def create_intersite_connections(self): - """Computes an NC replica graph for each NC replica that "should be - present" on the local DC or "is present" on any DC in the same site - as the local DC. For each edge directed to an NC replica on such a - DC from an NC replica on a DC in another site, the KCC creates an - nTDSConnection object to imply that edge if one does not already - exist. - - Modifies self.kept_connections - A set of nTDSConnection - objects for edges that are directed - to the local DC's site in one or more NC replica graphs. - - returns: True if spanning trees were created for all NC replica - graphs, otherwise False. - """ - all_connected = True - self.kept_connections = set() - - # LET crossRefList be the set containing each object o of class - # crossRef such that o is a child of the CN=Partitions child of the - # config NC - - # FOR each crossRef object cr in crossRefList - # IF cr!enabled has a value and is false, or if FLAG_CR_NTDS_NC - # is clear in cr!systemFlags, skip cr. - # LET g be the GRAPH return of SetupGraph() - - for part in self.part_table.values(): - - if not part.is_enabled(): - continue - - if part.is_foreign(): - continue - - graph = self.setup_graph(part) - - # Create nTDSConnection objects, routing replication traffic - # around "failed" DCs. - found_failed = False - - connected, found_failed = self.create_connections(graph, - part, True) - - DEBUG("with detect_failed: connected %s Found failed %s" % - (connected, found_failed)) - if not connected: - all_connected = False - - if found_failed: - # One or more failed DCs preclude use of the ideal NC - # replica graph. Add connections for the ideal graph. - self.create_connections(graph, part, False) - - return all_connected - - def intersite(self): - """The head method for generating the inter-site KCC replica - connection graph and attendant nTDSConnection objects - in the samdb. - - Produces self.kept_connections set of NTDS Connections - that should be kept during subsequent pruning process. - - ::return (True or False): (True) if the produced NC replica - graph connects all sites that need to be connected - """ - - # Retrieve my DSA - mydsa = self.my_dsa - mysite = self.my_site - all_connected = True - - logger.debug("intersite(): enter") - - # Determine who is the ISTG - if opts.readonly: - mysite.select_istg(self.samdb, mydsa, ro=True) - else: - mysite.select_istg(self.samdb, mydsa, ro=False) - - # Test whether local site has topology disabled - if mysite.is_intersite_topology_disabled(): - logger.debug("intersite(): exit disabled all_connected=%d" % - all_connected) - return all_connected - - if not mydsa.is_istg(): - logger.debug("intersite(): exit not istg all_connected=%d" % - all_connected) - return all_connected - - self.merge_failed_links() - - # For each NC with an NC replica that "should be present" on the - # local DC or "is present" on any DC in the same site as the - # local DC, the KCC constructs a site graph--a precursor to an NC - # replica graph. The site connectivity for a site graph is defined - # by objects of class interSiteTransport, siteLink, and - # siteLinkBridge in the config NC. - - all_connected = self.create_intersite_connections() - - logger.debug("intersite(): exit all_connected=%d" % all_connected) - return all_connected - - def update_rodc_connection(self): - """Runs when the local DC is an RODC and updates the RODC NTFRS - connection object. - """ - # Given an nTDSConnection object cn1, such that cn1.options contains - # NTDSCONN_OPT_RODC_TOPOLOGY, and another nTDSConnection object cn2, - # does not contain NTDSCONN_OPT_RODC_TOPOLOGY, modify cn1 to ensure - # that the following is true: - # - # cn1.fromServer = cn2.fromServer - # cn1.schedule = cn2.schedule - # - # If no such cn2 can be found, cn1 is not modified. - # If no such cn1 can be found, nothing is modified by this task. - - if not self.my_dsa.is_ro(): - return - - all_connections = self.my_dsa.connect_table.values() - ro_connections = [x for x in all_connections if x.is_rodc_topology()] - rw_connections = [x for x in all_connections - if x not in ro_connections] - - # XXX here we are dealing with multiple RODC_TOPO connections, - # if they exist. It is not clear whether the spec means that - # or if it ever arises. - if rw_connections and ro_connections: - for con in ro_connections: - cn2 = rw_connections[0] - con.from_dnstr = cn2.from_dnstr - con.schedule = cn2.schedule - con.to_be_modified = True - - self.my_dsa.commit_connections(self.samdb, ro=opts.readonly) - - def intrasite_max_node_edges(self, node_count): - """Returns the maximum number of edges directed to a node in - the intrasite replica graph. - - The KCC does not create more - than 50 edges directed to a single DC. To optimize replication, - we compute that each node should have n+2 total edges directed - to it such that (n) is the smallest non-negative integer - satisfying (node_count <= 2*(n*n) + 6*n + 7) - - (If the number of edges is m (i.e. n + 2), that is the same as - 2 * m*m - 2 * m + 3). - - edges n nodecount - 2 0 7 - 3 1 15 - 4 2 27 - 5 3 43 - ... - 50 48 4903 - - :param node_count: total number of nodes in the replica graph - - The intention is that there should be no more than 3 hops - between any two DSAs at a site. With up to 7 nodes the 2 edges - of the ring are enough; any configuration of extra edges with - 8 nodes will be enough. It is less clear that the 3 hop - guarantee holds at e.g. 15 nodes in degenerate cases, but - those are quite unlikely given the extra edges are randomly - arranged. - """ - n = 0 - while True: - if node_count <= (2 * (n * n) + (6 * n) + 7): - break - n = n + 1 - n = n + 2 - if n < 50: - return n - return 50 - - def construct_intrasite_graph(self, site_local, dc_local, - nc_x, gc_only, detect_stale): - # [MS-ADTS] 6.2.2.2 - # We're using the MS notation names here to allow - # correlation back to the published algorithm. - # - # nc_x - naming context (x) that we are testing if it - # "should be present" on the local DC - # f_of_x - replica (f) found on a DC (s) for NC (x) - # dc_s - DC where f_of_x replica was found - # dc_local - local DC that potentially needs a replica - # (f_of_x) - # r_list - replica list R - # p_of_x - replica (p) is partial and found on a DC (s) - # for NC (x) - # l_of_x - replica (l) is the local replica for NC (x) - # that should appear on the local DC - # r_len = is length of replica list |R| - # - # If the DSA doesn't need a replica for this - # partition (NC x) then continue - needed, ro, partial = nc_x.should_be_present(dc_local) - - DEBUG_YELLOW("construct_intrasite_graph(): enter" + - "\n\tgc_only=%d" % gc_only + - "\n\tdetect_stale=%d" % detect_stale + - "\n\tneeded=%s" % needed + - "\n\tro=%s" % ro + - "\n\tpartial=%s" % partial + - "\n%s" % nc_x) - - if not needed: - DEBUG_RED("%s lacks 'should be present' status, " - "aborting construct_intersite_graph!" % - nc_x.nc_dnstr) - return - - # Create a NCReplica that matches what the local replica - # should say. We'll use this below in our r_list - l_of_x = NCReplica(dc_local.dsa_dnstr, dc_local.dsa_guid, - nc_x.nc_dnstr) - - l_of_x.identify_by_basedn(self.samdb) - - l_of_x.rep_partial = partial - l_of_x.rep_ro = ro - - # Add this replica that "should be present" to the - # needed replica table for this DSA - dc_local.add_needed_replica(l_of_x) - - # Replica list - # - # Let R be a sequence containing each writable replica f of x - # such that f "is present" on a DC s satisfying the following - # criteria: - # - # * s is a writable DC other than the local DC. - # - # * s is in the same site as the local DC. - # - # * If x is a read-only full replica and x is a domain NC, - # then the DC's functional level is at least - # DS_BEHAVIOR_WIN2008. - # - # * Bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set - # in the options attribute of the site settings object for - # the local DC's site, or no tuple z exists in the - # kCCFailedLinks or kCCFailedConnections variables such - # that z.UUIDDsa is the objectGUID of the nTDSDSA object - # for s, z.FailureCount > 0, and the current time - - # z.TimeFirstFailure > 2 hours. - - r_list = [] - - # We'll loop thru all the DSAs looking for - # writeable NC replicas that match the naming - # context dn for (nc_x) - # - for dc_s in self.my_site.dsa_table.values(): - # If this partition (nc_x) doesn't appear as a - # replica (f_of_x) on (dc_s) then continue - if not nc_x.nc_dnstr in dc_s.current_rep_table: - continue - - # Pull out the NCReplica (f) of (x) with the dn - # that matches NC (x) we are examining. - f_of_x = dc_s.current_rep_table[nc_x.nc_dnstr] - - # Replica (f) of NC (x) must be writable - if f_of_x.is_ro(): - continue - - # Replica (f) of NC (x) must satisfy the - # "is present" criteria for DC (s) that - # it was found on - if not f_of_x.is_present(): - continue - - # DC (s) must be a writable DSA other than - # my local DC. In other words we'd only replicate - # from other writable DC - if dc_s.is_ro() or dc_s is dc_local: - continue - - # Certain replica graphs are produced only - # for global catalogs, so test against - # method input parameter - if gc_only and not dc_s.is_gc(): - continue - - # DC (s) must be in the same site as the local DC - # as this is the intra-site algorithm. This is - # handled by virtue of placing DSAs in per - # site objects (see enclosing for() loop) - - # If NC (x) is intended to be read-only full replica - # for a domain NC on the target DC then the source - # DC should have functional level at minimum WIN2008 - # - # Effectively we're saying that in order to replicate - # to a targeted RODC (which was introduced in Windows 2008) - # then we have to replicate from a DC that is also minimally - # at that level. - # - # You can also see this requirement in the MS special - # considerations for RODC which state that to deploy - # an RODC, at least one writable domain controller in - # the domain must be running Windows Server 2008 - if ro and not partial and nc_x.nc_type == NCType.domain: - if not dc_s.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008): - continue - - # If we haven't been told to turn off stale connection - # detection and this dsa has a stale connection then - # continue - if detect_stale and self.is_stale_link_connection(dc_s): - continue - - # Replica meets criteria. Add it to table indexed - # by the GUID of the DC that it appears on - r_list.append(f_of_x) - - # If a partial (not full) replica of NC (x) "should be present" - # on the local DC, append to R each partial replica (p of x) - # such that p "is present" on a DC satisfying the same - # criteria defined above for full replica DCs. - # - # XXX This loop and the previous one differ only in whether - # the replica is partial or not. here we only accept partial - # (because we're partial); before we only accepted full. Order - # doen't matter (the list is sorted a few lines down) so these - # loops could easily be merged. Or this could be a helper - # function. - - if partial: - # Now we loop thru all the DSAs looking for - # partial NC replicas that match the naming - # context dn for (NC x) - for dc_s in self.my_site.dsa_table.values(): - - # If this partition NC (x) doesn't appear as a - # replica (p) of NC (x) on the dsa DC (s) then - # continue - if not nc_x.nc_dnstr in dc_s.current_rep_table: - continue - - # Pull out the NCReplica with the dn that - # matches NC (x) we are examining. - p_of_x = dc_s.current_rep_table[nc_x.nc_dnstr] - - # Replica (p) of NC (x) must be partial - if not p_of_x.is_partial(): - continue - - # Replica (p) of NC (x) must satisfy the - # "is present" criteria for DC (s) that - # it was found on - if not p_of_x.is_present(): - continue - - # DC (s) must be a writable DSA other than - # my DSA. In other words we'd only replicate - # from other writable DSA - if dc_s.is_ro() or dc_s is dc_local: - continue - - # Certain replica graphs are produced only - # for global catalogs, so test against - # method input parameter - if gc_only and not dc_s.is_gc(): - continue - - # If we haven't been told to turn off stale connection - # detection and this dsa has a stale connection then - # continue - if detect_stale and self.is_stale_link_connection(dc_s): - continue - - # Replica meets criteria. Add it to table indexed - # by the GUID of the DSA that it appears on - r_list.append(p_of_x) - - # Append to R the NC replica that "should be present" - # on the local DC - r_list.append(l_of_x) - - r_list.sort(sort_replica_by_dsa_guid) - r_len = len(r_list) - - max_node_edges = self.intrasite_max_node_edges(r_len) - - # Add a node for each r_list element to the replica graph - graph_list = [] - for rep in r_list: - node = GraphNode(rep.rep_dsa_dnstr, max_node_edges) - graph_list.append(node) - - # For each r(i) from (0 <= i < |R|-1) - i = 0 - while i < (r_len-1): - # Add an edge from r(i) to r(i+1) if r(i) is a full - # replica or r(i+1) is a partial replica - if not r_list[i].is_partial() or r_list[i+1].is_partial(): - graph_list[i+1].add_edge_from(r_list[i].rep_dsa_dnstr) - - # Add an edge from r(i+1) to r(i) if r(i+1) is a full - # replica or ri is a partial replica. - if not r_list[i+1].is_partial() or r_list[i].is_partial(): - graph_list[i].add_edge_from(r_list[i+1].rep_dsa_dnstr) - i = i + 1 - - # Add an edge from r|R|-1 to r0 if r|R|-1 is a full replica - # or r0 is a partial replica. - if not r_list[r_len-1].is_partial() or r_list[0].is_partial(): - graph_list[0].add_edge_from(r_list[r_len-1].rep_dsa_dnstr) - - # Add an edge from r0 to r|R|-1 if r0 is a full replica or - # r|R|-1 is a partial replica. - if not r_list[0].is_partial() or r_list[r_len-1].is_partial(): - graph_list[r_len-1].add_edge_from(r_list[0].rep_dsa_dnstr) - - DEBUG("r_list is length %s" % len(r_list)) - DEBUG('\n'.join(str((x.rep_dsa_guid, x.rep_dsa_dnstr)) - for x in r_list)) - - do_dot_files = opts.dot_files and opts.debug - if opts.verify or do_dot_files: - dot_edges = [] - dot_vertices = set() - for v1 in graph_list: - dot_vertices.add(v1.dsa_dnstr) - for v2 in v1.edge_from: - dot_edges.append((v2, v1.dsa_dnstr)) - dot_vertices.add(v2) - - verify_properties = ('connected', 'directed_double_ring_or_small') - verify_and_dot('intrasite_pre_ntdscon', dot_edges, dot_vertices, - label='%s__%s__%s' % (site_local.site_dnstr, - nctype_lut[nc_x.nc_type], - nc_x.nc_dnstr), - properties=verify_properties, debug=DEBUG, - verify=opts.verify, - dot_files=do_dot_files, directed=True) - - # For each existing nTDSConnection object implying an edge - # from rj of R to ri such that j != i, an edge from rj to ri - # is not already in the graph, and the total edges directed - # to ri is less than n+2, the KCC adds that edge to the graph. - for vertex in graph_list: - dsa = self.my_site.dsa_table[vertex.dsa_dnstr] - for connect in dsa.connect_table.values(): - remote = connect.from_dnstr - if remote in self.my_site.dsa_table: - vertex.add_edge_from(remote) - - DEBUG('reps are: %s' % ' '.join(x.rep_dsa_dnstr for x in r_list)) - DEBUG('dsas are: %s' % ' '.join(x.dsa_dnstr for x in graph_list)) - - for tnode in graph_list: - # To optimize replication latency in sites with many NC - # replicas, the KCC adds new edges directed to ri to bring - # the total edges to n+2, where the NC replica rk of R - # from which the edge is directed is chosen at random such - # that k != i and an edge from rk to ri is not already in - # the graph. - # - # Note that the KCC tech ref does not give a number for - # the definition of "sites with many NC replicas". At a - # bare minimum to satisfy n+2 edges directed at a node we - # have to have at least three replicas in |R| (i.e. if n - # is zero then at least replicas from two other graph - # nodes may direct edges to us). - if r_len >= 3 and not tnode.has_sufficient_edges(): - candidates = [x for x in graph_list if - (x is not tnode and - x.dsa_dnstr not in tnode.edge_from)] - - DEBUG_BLUE("looking for random link for %s. r_len %d, " - "graph len %d candidates %d" - % (tnode.dsa_dnstr, r_len, len(graph_list), - len(candidates))) - - DEBUG("candidates %s" % [x.dsa_dnstr for x in candidates]) - - while candidates and not tnode.has_sufficient_edges(): - other = random.choice(candidates) - DEBUG("trying to add candidate %s" % other.dsa_dstr) - if not tnode.add_edge_from(other): - DEBUG_RED("could not add %s" % other.dsa_dstr) - candidates.remove(other) - else: - DEBUG_FN("not adding links to %s: nodes %s, links is %s/%s" % - (tnode.dsa_dnstr, r_len, len(tnode.edge_from), - tnode.max_edges)) - - # Print the graph node in debug mode - logger.debug("%s" % tnode) - - # For each edge directed to the local DC, ensure a nTDSConnection - # points to us that satisfies the KCC criteria - - if tnode.dsa_dnstr == dc_local.dsa_dnstr: - tnode.add_connections_from_edges(dc_local) - - if opts.verify or do_dot_files: - dot_edges = [] - dot_vertices = set() - for v1 in graph_list: - dot_vertices.add(v1.dsa_dnstr) - for v2 in v1.edge_from: - dot_edges.append((v2, v1.dsa_dnstr)) - dot_vertices.add(v2) - - verify_properties = ('connected', 'directed_double_ring_or_small') - verify_and_dot('intrasite_post_ntdscon', dot_edges, dot_vertices, - label='%s__%s__%s' % (site_local.site_dnstr, - nctype_lut[nc_x.nc_type], - nc_x.nc_dnstr), - properties=verify_properties, debug=DEBUG, - verify=opts.verify, - dot_files=do_dot_files, directed=True) - - def intrasite(self): - """The head method for generating the intra-site KCC replica - connection graph and attendant nTDSConnection objects - in the samdb - """ - # Retrieve my DSA - mydsa = self.my_dsa - - logger.debug("intrasite(): enter") - - # Test whether local site has topology disabled - mysite = self.my_site - if mysite.is_intrasite_topology_disabled(): - return - - detect_stale = (not mysite.is_detect_stale_disabled()) - for connect in mydsa.connect_table.values(): - if connect.to_be_added: - DEBUG_CYAN("TO BE ADDED:\n%s" % connect) - - # Loop thru all the partitions, with gc_only False - for partdn, part in self.part_table.items(): - self.construct_intrasite_graph(mysite, mydsa, part, False, - detect_stale) - for connect in mydsa.connect_table.values(): - if connect.to_be_added: - DEBUG_BLUE("TO BE ADDED:\n%s" % connect) - - # If the DC is a GC server, the KCC constructs an additional NC - # replica graph (and creates nTDSConnection objects) for the - # config NC as above, except that only NC replicas that "are present" - # on GC servers are added to R. - for connect in mydsa.connect_table.values(): - if connect.to_be_added: - DEBUG_YELLOW("TO BE ADDED:\n%s" % connect) - - # Do it again, with gc_only True - for partdn, part in self.part_table.items(): - if part.is_config(): - self.construct_intrasite_graph(mysite, mydsa, part, True, - detect_stale) - - # The DC repeats the NC replica graph computation and nTDSConnection - # creation for each of the NC replica graphs, this time assuming - # that no DC has failed. It does so by re-executing the steps as - # if the bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED were - # set in the options attribute of the site settings object for - # the local DC's site. (ie. we set "detec_stale" flag to False) - for connect in mydsa.connect_table.values(): - if connect.to_be_added: - DEBUG_BLUE("TO BE ADDED:\n%s" % connect) - - # Loop thru all the partitions. - for partdn, part in self.part_table.items(): - self.construct_intrasite_graph(mysite, mydsa, part, False, - False) # don't detect stale - - # If the DC is a GC server, the KCC constructs an additional NC - # replica graph (and creates nTDSConnection objects) for the - # config NC as above, except that only NC replicas that "are present" - # on GC servers are added to R. - for connect in mydsa.connect_table.values(): - if connect.to_be_added: - DEBUG_RED("TO BE ADDED:\n%s" % connect) - - for partdn, part in self.part_table.items(): - if part.is_config(): - self.construct_intrasite_graph(mysite, mydsa, part, True, - False) # don't detect stale - - if opts.readonly: - # Display any to be added or modified repsFrom - for connect in mydsa.connect_table.values(): - if connect.to_be_deleted: - logger.info("TO BE DELETED:\n%s" % connect) - if connect.to_be_modified: - logger.info("TO BE MODIFIED:\n%s" % connect) - if connect.to_be_added: - DEBUG_GREEN("TO BE ADDED:\n%s" % connect) - - mydsa.commit_connections(self.samdb, ro=True) - else: - # Commit any newly created connections to the samdb - mydsa.commit_connections(self.samdb) - - def list_dsas(self): - """Compile a comprehensive list of DSA DNs - - These are all the DSAs on all the sites that KCC would be - dealing with. - - This method is not idempotent and may not work correctly in - sequence with KCC.run(). - - :return: a list of DSA DN strings. - """ - self.load_my_site() - self.load_my_dsa() - - self.load_all_sites() - self.load_all_partitions() - self.load_all_transports() - self.load_all_sitelinks() - dsas = [] - for site in self.site_table.values(): - dsas.extend([dsa.dsa_dnstr.replace('CN=NTDS Settings,', '', 1) - for dsa in site.dsa_table.values()]) - return dsas - - def load_samdb(self, dburl, lp, creds): - """Load the database using an url, loadparm, and credentials - - :param dburl: a database url. - :param lp: a loadparm object. - :param cred: a Credentials object. - """ - self.samdb = SamDB(url=dburl, - session_info=system_session(), - credentials=creds, lp=lp) - - def plot_all_connections(self, basename, verify_properties=()): - verify = verify_properties and opts.verify - plot = opts.dot_files - if not (verify or plot): - return - - dot_edges = [] - dot_vertices = [] - edge_colours = [] - vertex_colours = [] - - for dsa in self.dsa_by_dnstr.values(): - dot_vertices.append(dsa.dsa_dnstr) - if dsa.is_ro(): - vertex_colours.append('#cc0000') - else: - vertex_colours.append('#0000cc') - for con in dsa.connect_table.values(): - if con.is_rodc_topology(): - edge_colours.append('red') - else: - edge_colours.append('blue') - dot_edges.append((con.from_dnstr, dsa.dsa_dnstr)) - - verify_and_dot(basename, dot_edges, vertices=dot_vertices, - label=self.my_dsa_dnstr, properties=verify_properties, - debug=DEBUG, verify=verify, dot_files=plot, - directed=True, edge_colors=edge_colours, - vertex_colors=vertex_colours) - - def run(self, dburl, lp, creds, forced_local_dsa=None, - forget_local_links=False, forget_intersite_links=False): - """Method to perform a complete run of the KCC and - produce an updated topology for subsequent NC replica - syncronization between domain controllers - """ - # We may already have a samdb setup if we are - # currently importing an ldif for a test run - if self.samdb is None: - try: - self.load_samdb(dburl, lp, creds) - except ldb.LdbError, (num, msg): - logger.error("Unable to open sam database %s : %s" % - (dburl, msg)) - return 1 - - if forced_local_dsa: - self.samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" % - forced_local_dsa) - - try: - # Setup - self.load_my_site() - self.load_my_dsa() - - self.load_all_sites() - self.load_all_partitions() - self.load_all_transports() - self.load_all_sitelinks() - - if opts.verify or opts.dot_files: - guid_to_dnstr = {} - for site in self.site_table.values(): - guid_to_dnstr.update((str(dsa.dsa_guid), dnstr) - for dnstr, dsa - in site.dsa_table.items()) - - self.plot_all_connections('dsa_initial') - - dot_edges = [] - current_reps, needed_reps = self.my_dsa.get_rep_tables() - for dnstr, c_rep in current_reps.items(): - DEBUG("c_rep %s" % c_rep) - dot_edges.append((self.my_dsa.dsa_dnstr, dnstr)) - - verify_and_dot('dsa_repsFrom_initial', dot_edges, - directed=True, label=self.my_dsa_dnstr, - properties=(), debug=DEBUG, verify=opts.verify, - dot_files=opts.dot_files) - - dot_edges = [] - for site in self.site_table.values(): - for dsa in site.dsa_table.values(): - current_reps, needed_reps = dsa.get_rep_tables() - for dn_str, rep in current_reps.items(): - for reps_from in rep.rep_repsFrom: - DEBUG("rep %s" % rep) - dsa_guid = str(reps_from.source_dsa_obj_guid) - dsa_dn = guid_to_dnstr[dsa_guid] - dot_edges.append((dsa.dsa_dnstr, dsa_dn)) - - verify_and_dot('dsa_repsFrom_initial_all', dot_edges, - directed=True, label=self.my_dsa_dnstr, - properties=(), debug=DEBUG, verify=opts.verify, - dot_files=opts.dot_files) - - dot_edges = [] - for link in self.sitelink_table.values(): - for a, b in itertools.combinations(link.site_list, 2): - dot_edges.append((str(a), str(b))) - properties = ('connected',) - verify_and_dot('dsa_sitelink_initial', dot_edges, - directed=False, - label=self.my_dsa_dnstr, properties=properties, - debug=DEBUG, verify=opts.verify, - dot_files=opts.dot_files) - - if forget_local_links: - for dsa in self.my_site.dsa_table.values(): - dsa.connect_table = {k: v for k, v in - dsa.connect_table.items() - if v.is_rodc_topology()} - self.plot_all_connections('dsa_forgotten_local') - - if forget_intersite_links: - for site in self.site_table.values(): - for dsa in site.dsa_table.values(): - dsa.connect_table = {k: v for k, v in - dsa.connect_table.items() - if site is self.my_site and - v.is_rodc_topology()} - - self.plot_all_connections('dsa_forgotten_all') - # These are the published steps (in order) for the - # MS-TECH description of the KCC algorithm ([MS-ADTS] 6.2.2) - - # Step 1 - self.refresh_failed_links_connections() - - # Step 2 - self.intrasite() - - # Step 3 - all_connected = self.intersite() - - # Step 4 - self.remove_unneeded_ntdsconn(all_connected) - - # Step 5 - self.translate_ntdsconn() - - # Step 6 - self.remove_unneeded_failed_links_connections() - - # Step 7 - self.update_rodc_connection() - - if opts.verify or opts.dot_files: - self.plot_all_connections('dsa_final', - ('connected', 'forest_of_rings')) - - DEBUG_MAGENTA("there are %d dsa guids" % len(guid_to_dnstr)) - - dot_edges = [] - edge_colors = [] - my_dnstr = self.my_dsa.dsa_dnstr - current_reps, needed_reps = self.my_dsa.get_rep_tables() - for dnstr, n_rep in needed_reps.items(): - for reps_from in n_rep.rep_repsFrom: - guid_str = str(reps_from.source_dsa_obj_guid) - dot_edges.append((my_dnstr, guid_to_dnstr[guid_str])) - edge_colors.append('#' + str(n_rep.nc_guid)[:6]) - - verify_and_dot('dsa_repsFrom_final', dot_edges, directed=True, - label=self.my_dsa_dnstr, - properties=(), debug=DEBUG, verify=opts.verify, - dot_files=opts.dot_files, - edge_colors=edge_colors) - - dot_edges = [] - - for site in self.site_table.values(): - for dsa in site.dsa_table.values(): - current_reps, needed_reps = dsa.get_rep_tables() - for n_rep in needed_reps.values(): - for reps_from in n_rep.rep_repsFrom: - dsa_guid = str(reps_from.source_dsa_obj_guid) - dsa_dn = guid_to_dnstr[dsa_guid] - dot_edges.append((dsa.dsa_dnstr, dsa_dn)) - - verify_and_dot('dsa_repsFrom_final_all', dot_edges, - directed=True, label=self.my_dsa_dnstr, - properties=(), debug=DEBUG, verify=opts.verify, - dot_files=opts.dot_files) - - except: - raise - - return 0 - - def import_ldif(self, dburl, lp, creds, ldif_file): - """Import all objects and attributes that are relevent - to the KCC algorithms from a previously exported LDIF file. - - The point of this function is to allow a programmer/debugger to - import an LDIF file with non-security relevent information that - was previously extracted from a DC database. The LDIF file is used - to create a temporary abbreviated database. The KCC algorithm can - then run against this abbreviated database for debug or test - verification that the topology generated is computationally the - same between different OSes and algorithms. - - :param dburl: path to the temporary abbreviated db to create - :param ldif_file: path to the ldif file to import - """ - try: - self.samdb = ldif_import_export.ldif_to_samdb(dburl, lp, ldif_file, - opts.forced_local_dsa) - except ldif_import_export.LdifError, e: - print e - return 1 - return 0 - - def export_ldif(self, dburl, lp, creds, ldif_file): - """Routine to extract all objects and attributes that are relevent - to the KCC algorithms from a DC database. - - The point of this function is to allow a programmer/debugger to - extract an LDIF file with non-security relevent information from - a DC database. The LDIF file can then be used to "import" via - the import_ldif() function this file into a temporary abbreviated - database. The KCC algorithm can then run against this abbreviated - database for debug or test verification that the topology generated - is computationally the same between different OSes and algorithms. - - :param dburl: LDAP database URL to extract info from - :param ldif_file: output LDIF file name to create - """ - try: - ldif_import_export.samdb_to_ldif_file(self.samdb, dburl, lp, creds, - ldif_file) - except ldif_import_export.LdifError, e: - print e - return 1 - return 0 - -################################################## -# Global Functions -################################################## - - -def get_spanning_tree_edges(graph, my_site, label=None): - # Phase 1: Run Dijkstra's to get a list of internal edges, which are - # just the shortest-paths connecting colored vertices - - internal_edges = set() - - for e_set in graph.edge_set: - edgeType = None - for v in graph.vertices: - v.edges = [] - - # All con_type in an edge set is the same - for e in e_set.edges: - edgeType = e.con_type - for v in e.vertices: - v.edges.append(e) - - if opts.verify or opts.dot_files: - graph_edges = [(a.site.site_dnstr, b.site.site_dnstr) - for a, b in - itertools.chain( - *(itertools.combinations(edge.vertices, 2) - for edge in e_set.edges))] - graph_nodes = [v.site.site_dnstr for v in graph.vertices] - - if opts.dot_files and opts.debug: - write_dot_file('edgeset_%s' % (edgeType,), graph_edges, - vertices=graph_nodes, label=label) - - if opts.verify: - verify_graph('spanning tree edge set %s' % edgeType, - graph_edges, vertices=graph_nodes, - properties=('complete', 'connected'), - debug=DEBUG) - - # Run dijkstra's algorithm with just the red vertices as seeds - # Seed from the full replicas - dijkstra(graph, edgeType, False) - - # Process edge set - process_edge_set(graph, e_set, internal_edges) - - # Run dijkstra's algorithm with red and black vertices as the seeds - # Seed from both full and partial replicas - dijkstra(graph, edgeType, True) - - # Process edge set - process_edge_set(graph, e_set, internal_edges) - - # All vertices have root/component as itself - setup_vertices(graph) - process_edge_set(graph, None, internal_edges) - - if opts.verify or opts.dot_files: - graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr) - for e in internal_edges] - graph_nodes = [v.site.site_dnstr for v in graph.vertices] - verify_properties = ('multi_edge_forest',) - verify_and_dot('prekruskal', graph_edges, graph_nodes, label=label, - properties=verify_properties, debug=DEBUG, - verify=opts.verify, - dot_files=opts.dot_files) - - # Phase 2: Run Kruskal's on the internal edges - output_edges, components = kruskal(graph, internal_edges) - - # This recalculates the cost for the path connecting the - # closest red vertex. Ignoring types is fine because NO - # suboptimal edge should exist in the graph - dijkstra(graph, "EDGE_TYPE_ALL", False) # TODO rename - # Phase 3: Process the output - for v in graph.vertices: - if v.is_red(): - v.dist_to_red = 0 - else: - v.dist_to_red = v.repl_info.cost - - if opts.verify or opts.dot_files: - graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr) - for e in internal_edges] - graph_nodes = [v.site.site_dnstr for v in graph.vertices] - verify_properties = ('multi_edge_forest',) - verify_and_dot('postkruskal', graph_edges, graph_nodes, - label=label, properties=verify_properties, - debug=DEBUG, verify=opts.verify, - dot_files=opts.dot_files) - - # Ensure only one-way connections for partial-replicas, - # and make sure they point the right way. - edge_list = [] - for edge in output_edges: - # We know these edges only have two endpoints because we made - # them. - v, w = edge.vertices - if v.site is my_site or w.site is my_site: - if (((v.is_black() or w.is_black()) and - v.dist_to_red != MAX_DWORD)): - edge.directed = True - - if w.dist_to_red < v.dist_to_red: - edge.vertices[:] = w, v - edge_list.append(edge) - - if opts.verify or opts.dot_files: - graph_edges = [[x.site.site_dnstr for x in e.vertices] - for e in edge_list] - #add the reverse edge if not directed. - graph_edges.extend([x.site.site_dnstr - for x in reversed(e.vertices)] - for e in edge_list if not e.directed) - graph_nodes = [x.site.site_dnstr for x in graph.vertices] - verify_properties = () - verify_and_dot('post-one-way-partial', graph_edges, graph_nodes, - label=label, properties=verify_properties, - debug=DEBUG, verify=opts.verify, - directed=True, - dot_files=opts.dot_files) - - # count the components - return edge_list, components - - -def sort_replica_by_dsa_guid(rep1, rep2): - """Helper to sort NCReplicas by their DSA guids - - The guids need to be sorted in their NDR form. - - :param rep1: An NC replica - :param rep2: Another replica - :return: -1, 0, or 1, indicating sort order. - """ - return cmp(ndr_pack(rep1.rep_dsa_guid), ndr_pack(rep2.rep_dsa_guid)) - - -def sort_dsa_by_gc_and_guid(dsa1, dsa2): - """Helper to sort DSAs by guid global catalog status - - GC DSAs come before non-GC DSAs, other than that, the guids are - sorted in NDR form. - - :param dsa1: A DSA object - :param dsa2: Another DSA - :return: -1, 0, or 1, indicating sort order. - """ - if dsa1.is_gc() and not dsa2.is_gc(): - return -1 - if not dsa1.is_gc() and dsa2.is_gc(): - return +1 - return cmp(ndr_pack(dsa1.dsa_guid), ndr_pack(dsa2.dsa_guid)) - - -def is_smtp_replication_available(): - """Can the KCC use SMTP replication? - - Currently always returns false because Samba doesn't implement - SMTP transfer for NC changes between DCs. - - :return: Boolean (always False) - """ - return False - - -def create_edge(con_type, site_link, guid_to_vertex): - e = MultiEdge() - e.site_link = site_link - e.vertices = [] - for site_guid in site_link.site_list: - if str(site_guid) in guid_to_vertex: - e.vertices.extend(guid_to_vertex.get(str(site_guid))) - e.repl_info.cost = site_link.cost - e.repl_info.options = site_link.options - e.repl_info.interval = site_link.interval - e.repl_info.schedule = convert_schedule_to_repltimes(site_link.schedule) - e.con_type = con_type - e.directed = False - return e - - -def create_auto_edge_set(graph, transport): - e_set = MultiEdgeSet() - # use a NULL guid, not associated with a SiteLinkBridge object - e_set.guid = misc.GUID() - for site_link in graph.edges: - if site_link.con_type == transport: - e_set.edges.append(site_link) - - return e_set - - -def create_edge_set(graph, transport, site_link_bridge): - # TODO not implemented - need to store all site link bridges - e_set = MultiEdgeSet() - # e_set.guid = site_link_bridge - return e_set - - -def setup_vertices(graph): - for v in graph.vertices: - if v.is_white(): - v.repl_info.cost = MAX_DWORD - v.root = None - v.component_id = None - else: - v.repl_info.cost = 0 - v.root = v - v.component_id = v - - v.repl_info.interval = 0 - v.repl_info.options = 0xFFFFFFFF - v.repl_info.schedule = None # TODO highly suspicious - v.demoted = False - - -def dijkstra(graph, edge_type, include_black): - queue = [] - setup_dijkstra(graph, edge_type, include_black, queue) - while len(queue) > 0: - cost, guid, vertex = heapq.heappop(queue) - for edge in vertex.edges: - for v in edge.vertices: - if v is not vertex: - # add new path from vertex to v - try_new_path(graph, queue, vertex, edge, v) - - -def setup_dijkstra(graph, edge_type, include_black, queue): - setup_vertices(graph) - for vertex in graph.vertices: - if vertex.is_white(): - continue - - if (((vertex.is_black() and not include_black) - or edge_type not in vertex.accept_black - or edge_type not in vertex.accept_red_red)): - vertex.repl_info.cost = MAX_DWORD - vertex.root = None # NULL GUID - vertex.demoted = True # Demoted appears not to be used - else: - heapq.heappush(queue, (vertex.repl_info.cost, vertex.guid, vertex)) - - -def try_new_path(graph, queue, vfrom, edge, vto): - newRI = ReplInfo() - # What this function checks is that there is a valid time frame for - # which replication can actually occur, despite being adequately - # connected - intersect = combine_repl_info(vfrom.repl_info, edge.repl_info, newRI) - - # If the new path costs more than the current, then ignore the edge - if newRI.cost > vto.repl_info.cost: - return - - if newRI.cost < vto.repl_info.cost and not intersect: - return - - new_duration = total_schedule(newRI.schedule) - old_duration = total_schedule(vto.repl_info.schedule) - - # Cheaper or longer schedule - if newRI.cost < vto.repl_info.cost or new_duration > old_duration: - vto.root = vfrom.root - vto.component_id = vfrom.component_id - vto.repl_info = newRI - heapq.heappush(queue, (vto.repl_info.cost, vto.guid, vto)) - - -def check_demote_vertex(vertex, edge_type): - if vertex.is_white(): - return - - # Accepts neither red-red nor black edges, demote - if ((edge_type not in vertex.accept_black and - edge_type not in vertex.accept_red_red)): - vertex.repl_info.cost = MAX_DWORD - vertex.root = None - vertex.demoted = True # Demoted appears not to be used - - -def undemote_vertex(vertex): - if vertex.is_white(): - return - - vertex.repl_info.cost = 0 - vertex.root = vertex - vertex.demoted = False - - -def process_edge_set(graph, e_set, internal_edges): - if e_set is None: - for edge in graph.edges: - for vertex in edge.vertices: - check_demote_vertex(vertex, edge.con_type) - process_edge(graph, edge, internal_edges) - for vertex in edge.vertices: - undemote_vertex(vertex) - else: - for edge in e_set.edges: - process_edge(graph, edge, internal_edges) - - -def process_edge(graph, examine, internal_edges): - # Find the set of all vertices touches the edge to examine - vertices = [] - for v in examine.vertices: - # Append a 4-tuple of color, repl cost, guid and vertex - vertices.append((v.color, v.repl_info.cost, v.ndrpacked_guid, v)) - # Sort by color, lower - DEBUG("vertices is %s" % vertices) - vertices.sort() - - color, cost, guid, bestv = vertices[0] - # Add to internal edges an edge from every colored vertex to bestV - for v in examine.vertices: - if v.component_id is None or v.root is None: - continue - - # Only add edge if valid inter-tree edge - needs a root and - # different components - if ((bestv.component_id is not None and - bestv.root is not None and - v.component_id is not None and - v.root is not None and - bestv.component_id != v.component_id)): - add_int_edge(graph, internal_edges, examine, bestv, v) - - -# Add internal edge, endpoints are roots of the vertices to pass in -# and are always colored -def add_int_edge(graph, internal_edges, examine, v1, v2): - root1 = v1.root - root2 = v2.root - - red_red = False - if root1.is_red() and root2.is_red(): - red_red = True - - if red_red: - if ((examine.con_type not in root1.accept_red_red - or examine.con_type not in root2.accept_red_red)): - return - elif (examine.con_type not in root1.accept_black - or examine.con_type not in root2.accept_black): - return - - ri = ReplInfo() - ri2 = ReplInfo() - - # Create the transitive replInfo for the two trees and this edge - if not combine_repl_info(v1.repl_info, v2.repl_info, ri): - return - # ri is now initialized - if not combine_repl_info(ri, examine.repl_info, ri2): - return - - newIntEdge = InternalEdge(root1, root2, red_red, ri2, examine.con_type, - examine.site_link) - # Order by vertex guid - #XXX guid comparison using ndr_pack - if newIntEdge.v1.ndrpacked_guid > newIntEdge.v2.ndrpacked_guid: - newIntEdge.v1 = root2 - newIntEdge.v2 = root1 - - internal_edges.add(newIntEdge) - - -def kruskal(graph, edges): - for v in graph.vertices: - v.edges = [] - - components = set([x for x in graph.vertices if not x.is_white()]) - edges = list(edges) - - # Sorted based on internal comparison function of internal edge - edges.sort() - - #XXX expected_num_tree_edges is never used - expected_num_tree_edges = 0 # TODO this value makes little sense - - count_edges = 0 - output_edges = [] - index = 0 - while index < len(edges): # TODO and num_components > 1 - e = edges[index] - parent1 = find_component(e.v1) - parent2 = find_component(e.v2) - if parent1 is not parent2: - count_edges += 1 - add_out_edge(graph, output_edges, e) - parent1.component_id = parent2 - components.discard(parent1) - - index += 1 - - return output_edges, len(components) - - -def find_component(vertex): - if vertex.component_id is vertex: - return vertex - - current = vertex - while current.component_id is not current: - current = current.component_id - - root = current - current = vertex - while current.component_id is not root: - n = current.component_id - current.component_id = root - current = n - - return root +from samba import getopt as options from samba.kcc.graph_utils import verify_and_dot, list_verify_tests from samba.kcc.graph_utils import GraphError -def add_out_edge(graph, output_edges, e): - v1 = e.v1 - v2 = e.v2 - # This multi-edge is a 'real' edge with no GUID - ee = MultiEdge() - ee.directed = False - ee.site_link = e.site_link - ee.vertices.append(v1) - ee.vertices.append(v2) - ee.con_type = e.e_type - ee.repl_info = e.repl_info - output_edges.append(ee) - v1.edges.append(ee) - v2.edges.append(ee) +import logging from samba.kcc.debug import logger, DEBUG, DEBUG_FN +from samba.kcc import KCC -def test_all_reps_from(lp, creds, rng_seed=None): - kcc = KCC() +def test_all_reps_from(lp, creds, unix_now, rng_seed=None): + # This implies readonly and attempt_live_connections + kcc = KCC(unix_now, readonly=True, + verify=opts.verify, debug=opts.debug, dot_files=opts.dot_files) kcc.load_samdb(opts.dburl, lp, creds) dsas = kcc.list_dsas() needed_parts = {} @@ -3157,10 +74,13 @@ def test_all_reps_from(lp, creds, rng_seed=None): for dsa_dn in dsas: if rng_seed: random.seed(rng_seed) - kcc = KCC() + kcc = KCC(unix_now, readonly=True, + verify=opts.verify, debug=opts.debug, + dot_files=opts.dot_files) kcc.run(opts.dburl, lp, creds, forced_local_dsa=dsa_dn, forget_local_links=opts.forget_local_links, - forget_intersite_links=opts.forget_intersite_links) + forget_intersite_links=opts.forget_intersite_links, + attempt_live_connections=opts.attempt_live_connections) current, needed = kcc.my_dsa.get_rep_tables() @@ -3215,33 +135,11 @@ def test_all_reps_from(lp, creds, rng_seed=None): properties=(), debug=DEBUG, verify=opts.verify, dot_files=opts.dot_files) - -logger = logging.getLogger("samba_kcc") -logger.addHandler(logging.StreamHandler(sys.stdout)) -DEBUG = logger.debug - - -def _color_debug(*args, **kwargs): - DEBUG('%s%s%s' % (kwargs['color'], args[0], C_NORMAL), *args[1:]) - -_globals = globals() -for _color in ('DARK_RED', 'RED', 'DARK_GREEN', 'GREEN', 'YELLOW', - 'DARK_YELLOW', 'DARK_BLUE', 'BLUE', 'PURPLE', 'MAGENTA', - 'DARK_CYAN', 'CYAN', 'GREY', 'WHITE', 'REV_RED'): - _globals['DEBUG_' + _color] = partial(_color_debug, color=_globals[_color]) - - -def DEBUG_FN(msg=''): - import traceback - filename, lineno, function, text = traceback.extract_stack(None, 2)[0] - DEBUG("%s%s:%s%s %s%s()%s '%s'" % (CYAN, filename, BLUE, lineno, - CYAN, function, C_NORMAL, msg)) - - ################################################## # samba_kcc entry point ################################################## + parser = optparse.OptionParser("samba_kcc [options]") sambaopts = options.SambaOptions(parser) credopts = options.CredentialsOptions(parser) @@ -3353,13 +251,10 @@ if opts.now: # else happens if break doesn't --> no match print >> sys.stderr, "could not parse time '%s'" % opts.now sys.exit(1) - unix_now = int(time.mktime(now_tuple)) else: unix_now = int(time.time()) -nt_now = unix2nttime(unix_now) - lp = sambaopts.get_loadparm() creds = credopts.get_credentials(lp, fallback_machine=True) @@ -3369,11 +264,13 @@ if opts.dburl is None: if opts.test_all_reps_from: opts.readonly = True rng_seed = opts.seed or 0xACE5CA11 - test_all_reps_from(lp, creds, rng_seed=rng_seed) + test_all_reps_from(lp, creds, unix_now, rng_seed=rng_seed) sys.exit() # Instantiate Knowledge Consistency Checker and perform run -kcc = KCC() +kcc = KCC(unix_now, readonly=opts.readonly, verify=opts.verify, + debug=opts.debug, dot_files=opts.dot_files) + if opts.exportldif: rc = kcc.export_ldif(opts.dburl, lp, creds, opts.exportldif) @@ -3395,7 +292,8 @@ if opts.list_valid_dsas: try: rc = kcc.run(opts.dburl, lp, creds, opts.forced_local_dsa, - opts.forget_local_links, opts.forget_intersite_links) + opts.forget_local_links, opts.forget_intersite_links, + attempt_live_connections=opts.attempt_live_connections) sys.exit(rc) except GraphError, e: