mirror of
https://github.com/altlinux/gpupdate.git
synced 2025-08-24 13:49:29 +03:00
Compare commits
2 Commits
0.10.6-alt
...
binary_mod
Author | SHA1 | Date | |
---|---|---|---|
8292aa69b3
|
|||
aa03e6dfa4
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -2,5 +2,4 @@ __pycache__
|
||||
*~
|
||||
_opam
|
||||
_build
|
||||
*.pyc
|
||||
|
||||
|
@ -1,22 +0,0 @@
|
||||
_gpoa()
|
||||
{
|
||||
local cur prev words cword split
|
||||
_init_completion -s || return
|
||||
|
||||
case $prev in
|
||||
--dc)
|
||||
_filedir
|
||||
return
|
||||
;;
|
||||
--loglevel)
|
||||
COMPREPLY=($(compgen -W '0 1 2 3 4 5' -- "$cur"))
|
||||
return
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=($(compgen -W '--dc --nodomain --noupdate --noplugins --list-backends --loglevel --help' -- "$cur"))
|
||||
return
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
complete -F _gpoa gpoa
|
@ -1,27 +0,0 @@
|
||||
_gpupdate()
|
||||
{
|
||||
local cur prev words cword split
|
||||
_init_completion -s || return
|
||||
|
||||
case $prev in
|
||||
-u|--user)
|
||||
_filedir
|
||||
return
|
||||
;;
|
||||
-t|--target)
|
||||
COMPREPLY=($(compgen -W 'ALL USER COMPUTER' -- "$cur"))
|
||||
return
|
||||
;;
|
||||
-l|--loglevel)
|
||||
COMPREPLY=($(compgen -W '0 1 2 3 4 5' -- "$cur"))
|
||||
return
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=($(compgen -W '--user --target --loglevel --system --help' -- "$cur"))
|
||||
return
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
complete -F _gpupdate gpupdate
|
||||
|
@ -1,18 +0,0 @@
|
||||
_gpupdate-setup()
|
||||
{
|
||||
local cur prev words cword split
|
||||
_init_completion -s || return
|
||||
|
||||
case $prev in
|
||||
set-backend)
|
||||
COMPREPLY=($(compgen -W 'local samba' -- "$cur"))
|
||||
return
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=($(compgen -W 'list list-backends status enable disable update write set-backend default-policy active-policy active-backend' -- "$cur"))
|
||||
return
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
complete -F _gpupdate-setup gpupdate-setup
|
19
dist/gpupdate-group-users
vendored
19
dist/gpupdate-group-users
vendored
@ -1,19 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. /etc/control.d/functions
|
||||
|
||||
CONFIG=/etc/pam.d/system-policy-gpupdate
|
||||
|
||||
new_subst disabled \
|
||||
'^[[:space:]]*session[[:space:]]+\[.*default=1.*\][[:space:]]+pam_succeed_if.so user ingroup users.*' \
|
||||
's,^\([[:space:]]*session[[:space:]]\+\[.*\)default=[[:alnum:]]\+\(.*pam_succeed_if.so user ingroup users.*\)$,\1default=1\2,'
|
||||
new_subst enabled \
|
||||
'^[[:space:]]*session[[:space:]]+\[.*default=ignore.*\][[:space:]]+pam_succeed_if.so user ingroup users.*' \
|
||||
's,^\([[:space:]]*session[[:space:]]\+\[.*\)default=[[:alnum:]]\+\(.*pam_succeed_if.so user ingroup users.*\)$,\1default=ignore\2,'
|
||||
|
||||
new_help disabled "Disable group policy applying for users in 'users' group only"
|
||||
new_help enabled "Enable group policy applying for users in 'users' group only"
|
||||
|
||||
new_summary "Group policy applying for users in 'users' group only"
|
||||
|
||||
control_subst "$CONFIG" "$*"
|
19
dist/gpupdate-localusers
vendored
19
dist/gpupdate-localusers
vendored
@ -1,19 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. /etc/control.d/functions
|
||||
|
||||
CONFIG=/etc/pam.d/system-policy-gpupdate
|
||||
|
||||
new_subst disabled \
|
||||
'^[[:space:]]*session[[:space:]]+\[.*success=2.*\][[:space:]]+pam_localuser.so' \
|
||||
's,^\([[:space:]]*session[[:space:]]\+\[.*\)success=[[:alnum:]]\+\(.*pam_localuser.so.*\)$,\1success=2\2,'
|
||||
new_subst enabled \
|
||||
'^[[:space:]]*session[[:space:]]+\[.*success=1.*\][[:space:]]+pam_localuser.so' \
|
||||
's,^\([[:space:]]*session[[:space:]]\+\[.*\)success=[[:alnum:]]\+\(.*pam_localuser.so.*\)$,\1success=1\2,'
|
||||
|
||||
new_help disabled 'Disable group policy applying for local users'
|
||||
new_help enabled 'Enable group policy applying for local users'
|
||||
|
||||
new_summary 'Group policy applying for local users'
|
||||
|
||||
control_subst "$CONFIG" "$*"
|
4
dist/gpupdate-remote-policy
vendored
4
dist/gpupdate-remote-policy
vendored
@ -1,4 +0,0 @@
|
||||
#%PAM-1.0
|
||||
#auth optional pam_mount.so
|
||||
session required pam_mkhomedir.so silent
|
||||
#session optional pam_mount.so
|
11
dist/gpupdate-scripts-run-user.service
vendored
11
dist/gpupdate-scripts-run-user.service
vendored
@ -1,11 +0,0 @@
|
||||
[Unit]
|
||||
Description=Run Group Policy scripts for a user
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/usr/libexec/gpupdate/scripts_runner --mode USER --action LOGON --user %u
|
||||
ExecStop=/usr/libexec/gpupdate/scripts_runner --mode USER --action LOGOFF --user %u
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
15
dist/gpupdate-scripts-run.service
vendored
15
dist/gpupdate-scripts-run.service
vendored
@ -1,15 +0,0 @@
|
||||
[Unit]
|
||||
Description=Running Group Policy Scripts
|
||||
After=gpupdate.service
|
||||
|
||||
[Service]
|
||||
Environment=PATH=/bin:/sbin:/usr/bin:/usr/sbin
|
||||
UnsetEnvironment=LANG LANGUAGE LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT LC_IDENTIFICATION
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/usr/libexec/gpupdate/scripts_runner --mode MACHINE --action STARTUP
|
||||
ExecStop=/usr/libexec/gpupdate/scripts_runner --mode MACHINE --action SHUTDOWN
|
||||
StandardOutput=journal
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
228
dist/gpupdate-setup
vendored
Executable file
228
dist/gpupdate-setup
vendored
Executable file
@ -0,0 +1,228 @@
|
||||
#! /usr/bin/env python3
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import subprocess
|
||||
|
||||
import re
|
||||
|
||||
from gpoa.util.samba import smbopts
|
||||
|
||||
|
||||
def command(args):
|
||||
try:
|
||||
subprocess.check_call(args.split())
|
||||
except:
|
||||
print ('command: \'%s\' error' % args)
|
||||
|
||||
def from_command(args):
|
||||
try:
|
||||
with subprocess.Popen(args.split(), stdout=subprocess.PIPE) as proc:
|
||||
value = proc.stdout.readline().decode('utf-8')
|
||||
proc.wait()
|
||||
except:
|
||||
print ('from_command: \'%s\' error' % args)
|
||||
return 'local'
|
||||
|
||||
return value.strip()
|
||||
|
||||
def get_default_policy_name():
|
||||
localpolicy = 'workstation'
|
||||
dcpolicy = 'ad-domain-controller'
|
||||
|
||||
try:
|
||||
if smbopt.get_server_role() == 'active directory domain controller':
|
||||
return dcpolicy
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
release = '/etc/altlinux-release'
|
||||
if os.path.isfile(release):
|
||||
f = open(release)
|
||||
s = f.readline()
|
||||
if re.search('server', s, re.I):
|
||||
localpolicy = 'server'
|
||||
except:
|
||||
pass
|
||||
|
||||
return localpolicy
|
||||
|
||||
def parse_arguments():
|
||||
'''
|
||||
Parse CLI arguments.
|
||||
'''
|
||||
parser = argparse.ArgumentParser(prog='gpupdate-setup')
|
||||
subparsers = parser.add_subparsers(dest='action',
|
||||
metavar='action',
|
||||
help='Group Policy management actions (default action is status)')
|
||||
|
||||
parser_list = subparsers.add_parser('list',
|
||||
help='List avalable types of local policy')
|
||||
parser_status = subparsers.add_parser('status',
|
||||
help='Show current Group Policy status')
|
||||
parser_enable = subparsers.add_parser('enable',
|
||||
help='Enable Group Policy subsystem')
|
||||
parser_disable = subparsers.add_parser('disable',
|
||||
help='Disable Group Policy subsystem')
|
||||
parser_write = subparsers.add_parser('write',
|
||||
help='Operate on Group Policies (enable or disable)')
|
||||
parser_active = subparsers.add_parser('active-policy',
|
||||
help='Show name of policy enabled')
|
||||
|
||||
parser_write.add_argument('status',
|
||||
choices=['enable', 'disable'],
|
||||
help='Enable or disable Group Policies')
|
||||
parser_write.add_argument('localpolicy',
|
||||
default=None,
|
||||
nargs='?',
|
||||
help='Name of local policy to enable')
|
||||
|
||||
parser_enable.add_argument('localpolicy',
|
||||
default=None,
|
||||
nargs='?',
|
||||
help='Name of local policy to enable')
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
def get_policy_entries(directory):
|
||||
filtered_entries = list()
|
||||
if os.path.isdir(directory):
|
||||
entries = [os.path.join(directory, entry) for entry in os.listdir(directory)]
|
||||
|
||||
for entry in entries:
|
||||
if os.path.isdir(os.path.join(entry)):
|
||||
if not os.path.islink(os.path.join(entry)):
|
||||
if not entry.rpartition('/')[2] == 'default':
|
||||
filtered_entries.append(entry)
|
||||
|
||||
return filtered_entries
|
||||
|
||||
|
||||
def get_policy_variants():
|
||||
'''
|
||||
Get the list of local policy variants deployed on this system.
|
||||
Please note that is case overlapping names the names in
|
||||
/etc/local-policy must override names in /usr/share/local-policy
|
||||
'''
|
||||
policy_dir = '/usr/share/local-policy'
|
||||
etc_policy_dir = '/etc/local-policy'
|
||||
|
||||
system_policies = get_policy_entries(policy_dir)
|
||||
user_policies = get_policy_entries(etc_policy_dir)
|
||||
|
||||
general_listing = list()
|
||||
general_listing.extend(system_policies)
|
||||
general_listing.extend(user_policies)
|
||||
|
||||
return general_listing
|
||||
|
||||
def validate_policy_name(policy_name):
|
||||
return policy_name in [os.path.basename(d) for d in get_policy_variants()]
|
||||
|
||||
def get_status():
|
||||
systemd_unit_link = '/etc/systemd/system/multi-user.target.wants/gpupdate.service'
|
||||
|
||||
return os.path.islink(systemd_unit_link)
|
||||
|
||||
def get_active_policy():
|
||||
policy_dir = '/usr/share/local-policy'
|
||||
etc_policy_dir = '/etc/local-policy'
|
||||
default_policy_name = os.path.join(policy_dir, get_default_policy_name())
|
||||
|
||||
active_policy_name = os.path.join(etc_policy_dir, 'active')
|
||||
|
||||
actual_policy_name = os.path.realpath(default_policy_name)
|
||||
|
||||
if os.path.isdir(active_policy_name):
|
||||
actual_policy_name = os.path.realpath(active_policy_name)
|
||||
|
||||
return actual_policy_name
|
||||
|
||||
|
||||
def disable_gp():
|
||||
if from_command('/usr/sbin/control system-auth') != 'local':
|
||||
command('/usr/sbin/control system-policy global')
|
||||
else:
|
||||
command('/usr/sbin/control system-policy local')
|
||||
command('systemctl disable gpupdate.service')
|
||||
command('systemctl --global disable gpupdate-user.service')
|
||||
|
||||
def enable_gp(policy_name):
|
||||
policy_dir = '/usr/share/local-policy'
|
||||
etc_policy_dir = '/etc/local-policy'
|
||||
target_policy_name = get_default_policy_name()
|
||||
if policy_name:
|
||||
if validate_policy_name(policy_name):
|
||||
target_policy_name = policy_name
|
||||
|
||||
print (target_policy_name)
|
||||
default_policy_name = os.path.join(policy_dir, target_policy_name)
|
||||
active_policy_name = os.path.join(etc_policy_dir, 'active')
|
||||
|
||||
if not os.path.isdir(etc_policy_dir):
|
||||
os.makedirs(etc_policy_dir)
|
||||
|
||||
if not os.path.islink(active_policy_name):
|
||||
os.symlink(default_policy_name, active_policy_name)
|
||||
else:
|
||||
os.unlink(active_policy_name)
|
||||
os.symlink(default_policy_name, active_policy_name)
|
||||
|
||||
# Enable oddjobd_gpupdate in PAM config
|
||||
command('/usr/sbin/control system-policy gpupdate')
|
||||
# Bootstrap the Group Policy engine
|
||||
command('/usr/sbin/gpoa --nodomain --loglevel 5')
|
||||
# Enable gpupdate-setup.service for all users
|
||||
command('systemctl --global enable gpupdate-user.service')
|
||||
|
||||
def main():
|
||||
arguments = parse_arguments()
|
||||
|
||||
if arguments.action == 'list':
|
||||
for entry in get_policy_variants():
|
||||
print(entry.rpartition('/')[2])
|
||||
|
||||
if arguments.action == 'status' or arguments.action == None:
|
||||
if get_status():
|
||||
print('enabled')
|
||||
else:
|
||||
print('disabled')
|
||||
|
||||
if arguments.action == 'write':
|
||||
if arguments.status == 'enable' or arguments.status == '#t':
|
||||
enable_gp(arguments.localpolicy)
|
||||
if arguments.status == 'disable' or arguments.status == '#f':
|
||||
disable_gp()
|
||||
|
||||
if arguments.action == "enable":
|
||||
enable_gp(arguments.localpolicy)
|
||||
|
||||
if arguments.action == "disable":
|
||||
disable_gp()
|
||||
|
||||
if arguments.action == 'active-policy':
|
||||
print(get_active_policy())
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
19
dist/gpupdate-system-uids
vendored
19
dist/gpupdate-system-uids
vendored
@ -1,19 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. /etc/control.d/functions
|
||||
|
||||
CONFIG=/etc/pam.d/system-policy-gpupdate
|
||||
|
||||
new_subst disabled \
|
||||
'^[[:space:]]*session[[:space:]]+\[.*default=1.*\][[:space:]]+pam_succeed_if.so uid >= 500.*' \
|
||||
's,^\([[:space:]]*session[[:space:]]\+\[.*\)default=[[:alnum:]]\+\(.*pam_succeed_if.so uid >= 500.*\)$,\1default=1\2,'
|
||||
new_subst enabled \
|
||||
'^[[:space:]]*session[[:space:]]+\[.*default=ignore.*\][[:space:]]+pam_succeed_if.so uid >= 500.*' \
|
||||
's,^\([[:space:]]*session[[:space:]]\+\[.*\)default=[[:alnum:]]\+\(.*pam_succeed_if.so uid >= 500.*\)$,\1default=ignore\2,'
|
||||
|
||||
new_help disabled "Disable group policy applying for users with not system uids only"
|
||||
new_help enabled "Enable group policy applying for users with not system uids only"
|
||||
|
||||
new_summary "Group policy applying for users with not system uids (greater or equal 500) only"
|
||||
|
||||
control_subst "$CONFIG" "$*"
|
11
dist/gpupdate-user.service
vendored
11
dist/gpupdate-user.service
vendored
@ -4,10 +4,13 @@ Description=gpupdate in userspace
|
||||
|
||||
# gpupdate on Windows runs once per hour
|
||||
[Service]
|
||||
Environment=PATH=/bin:/sbin:/usr/bin:/usr/sbin
|
||||
UnsetEnvironment=LANG LANGUAGE LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT LC_IDENTIFICATION
|
||||
Type=oneshot
|
||||
ExecStart=/usr/bin/gpupdate --target USER
|
||||
Environment="PATH=/bin:/sbin:/usr/bin:/usr/sbin"
|
||||
Type=simple
|
||||
RestartSec=3600
|
||||
TimeoutSec=3000
|
||||
Restart=always
|
||||
ExecStart=/usr/sbin/gpoa
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
|
||||
|
9
dist/gpupdate-user.timer
vendored
9
dist/gpupdate-user.timer
vendored
@ -1,9 +0,0 @@
|
||||
[Unit]
|
||||
Description=Run gpupdate-user every hour
|
||||
|
||||
[Timer]
|
||||
OnStartupSec=60min
|
||||
OnUnitActiveSec=60min
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
4
dist/gpupdate.ini
vendored
4
dist/gpupdate.ini
vendored
@ -1,4 +0,0 @@
|
||||
[gpoa]
|
||||
backend = local
|
||||
local-policy = default
|
||||
|
10
dist/gpupdate.service
vendored
10
dist/gpupdate.service
vendored
@ -1,11 +1,13 @@
|
||||
[Unit]
|
||||
Description=Group policy update for machine
|
||||
After=syslog.target network-online.target sssd.service
|
||||
After=sssd.service
|
||||
|
||||
[Service]
|
||||
Environment=PATH=/bin:/sbin:/usr/bin:/usr/sbin
|
||||
UnsetEnvironment=LANG LANGUAGE LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT LC_IDENTIFICATION
|
||||
Type=oneshot
|
||||
Environment="PATH=/bin:/sbin:/usr/bin:/usr/sbin"
|
||||
Type=simple
|
||||
RestartSec=3600
|
||||
TimeoutSec=3000
|
||||
Restart=always
|
||||
ExecStart=/usr/bin/gpupdate
|
||||
StandardOutput=journal
|
||||
|
||||
|
9
dist/gpupdate.timer
vendored
9
dist/gpupdate.timer
vendored
@ -1,9 +0,0 @@
|
||||
[Unit]
|
||||
Description=Run gpupdate every hour
|
||||
|
||||
[Timer]
|
||||
OnStartupSec=60min
|
||||
OnUnitActiveSec=60min
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
14
dist/system-policy-gpupdate
vendored
14
dist/system-policy-gpupdate
vendored
@ -1,13 +1,5 @@
|
||||
#%PAM-1.0
|
||||
session [success=2 perm_denied=ignore default=die] pam_localuser.so
|
||||
session substack gpupdate-remote-policy
|
||||
session [default=1] pam_permit.so
|
||||
session [default=7] pam_permit.so
|
||||
session [success=1 default=ignore] pam_succeed_if.so user ingroup users quiet
|
||||
session [default=5] pam_permit.so
|
||||
session [success=1 default=ignore] pam_succeed_if.so uid >= 500 quiet
|
||||
session [default=3] pam_permit.so
|
||||
session [success=1 default=ignore] pam_succeed_if.so service = systemd-user quiet
|
||||
session required pam_mktemp.so
|
||||
session required pam_mkhomedir.so silent
|
||||
session required pam_limits.so
|
||||
-session required pam_oddjob_gpupdate.so
|
||||
session optional pam_env.so user_readenv=1 conffile=/etc/gpupdate/environment user_envfile=.gpupdate_environment
|
||||
session required pam_permit.so
|
@ -16,15 +16,11 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import logging
|
||||
|
||||
from util.windows import smbcreds
|
||||
from .samba_backend import samba_backend
|
||||
from .nodomain_backend import nodomain_backend
|
||||
from util.logging import log
|
||||
from util.config import GPConfig
|
||||
from util.util import get_uid_by_username, touch_file
|
||||
from util.paths import get_dconf_config_path
|
||||
from storage.dconf_registry import Dconf_registry, create_dconf_ini_file
|
||||
|
||||
def backend_factory(dc, username, is_machine, no_domain = False):
|
||||
'''
|
||||
@ -34,40 +30,23 @@ def backend_factory(dc, username, is_machine, no_domain = False):
|
||||
policies enforced by domain administrators.
|
||||
'''
|
||||
back = None
|
||||
config = GPConfig()
|
||||
|
||||
if config.get_backend() == 'samba' and not no_domain:
|
||||
if not dc:
|
||||
dc = config.get_dc()
|
||||
if dc:
|
||||
ld = dict({'dc': dc})
|
||||
log('D52', ld)
|
||||
domain = None
|
||||
if not no_domain:
|
||||
sc = smbcreds(dc)
|
||||
domain = sc.get_domain()
|
||||
ldata = dict({'domain': domain, "username": username, 'is_machine': is_machine})
|
||||
log('D9', ldata)
|
||||
|
||||
if domain:
|
||||
logging.debug('Initialize Samba backend for domain: {}'.format(domain))
|
||||
try:
|
||||
back = samba_backend(sc, username, domain, is_machine)
|
||||
except Exception as exc:
|
||||
logdata = dict({'error': str(exc)})
|
||||
log('E7', logdata)
|
||||
|
||||
if config.get_backend() == 'local' or no_domain:
|
||||
log('D8')
|
||||
logging.error('Unable to initialize Samba backend: {}'.format(exc))
|
||||
else:
|
||||
logging.debug('Initialize local backend with no domain')
|
||||
try:
|
||||
back = nodomain_backend()
|
||||
except Exception as exc:
|
||||
logdata = dict({'error': str(exc)})
|
||||
log('E8', logdata)
|
||||
logging.error('Unable to initialize no-domain backend: {}'.format(exc))
|
||||
|
||||
return back
|
||||
|
||||
def save_dconf(username, is_machine):
|
||||
if is_machine:
|
||||
uid = None
|
||||
else:
|
||||
uid = get_uid_by_username(username) if not is_machine else None
|
||||
target_file = get_dconf_config_path(uid)
|
||||
touch_file(target_file)
|
||||
Dconf_registry.apply_template(uid)
|
||||
create_dconf_ini_file(target_file,Dconf_registry.global_registry_dict)
|
||||
|
@ -25,7 +25,7 @@ from gpt.gpt import gpt, get_local_gpt
|
||||
from util.util import (
|
||||
get_machine_name
|
||||
)
|
||||
from util.sid import get_sid
|
||||
from util.windows import get_sid
|
||||
import util.preg
|
||||
from util.logging import slogm
|
||||
|
||||
@ -35,7 +35,7 @@ class nodomain_backend(applier_backend):
|
||||
domain = None
|
||||
machine_name = get_machine_name()
|
||||
machine_sid = get_sid(domain, machine_name, True)
|
||||
self.storage = registry_factory()
|
||||
self.storage = registry_factory('registry')
|
||||
self.storage.set_info('domain', domain)
|
||||
self.storage.set_info('machine_name', machine_name)
|
||||
self.storage.set_info('machine_sid', machine_sid)
|
||||
@ -52,6 +52,5 @@ class nodomain_backend(applier_backend):
|
||||
self.storage.wipe_hklm()
|
||||
self.storage.wipe_user(self.storage.get_info('machine_sid'))
|
||||
local_policy = get_local_gpt(self.sid)
|
||||
local_policy.merge_machine()
|
||||
local_policy.merge_user()
|
||||
local_policy.merge()
|
||||
|
||||
|
@ -16,38 +16,26 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import logging
|
||||
import os
|
||||
# Facility to determine GPTs for user
|
||||
try:
|
||||
from samba.gpclass import check_safe_path
|
||||
except ImportError:
|
||||
from samba.gp.gpclass import check_safe_path
|
||||
from samba.gpclass import check_safe_path, check_refresh_gpo_list
|
||||
|
||||
from .applier_backend import applier_backend
|
||||
from storage import registry_factory
|
||||
from storage import cache_factory, registry_factory
|
||||
from gpt.gpt import gpt, get_local_gpt
|
||||
from util.util import (
|
||||
get_machine_name,
|
||||
is_machine_name
|
||||
)
|
||||
from util.kerberos import (
|
||||
machine_kinit
|
||||
, machine_kdestroy
|
||||
)
|
||||
from util.sid import get_sid
|
||||
from util.windows import get_sid
|
||||
import util.preg
|
||||
from util.logging import log
|
||||
from util.logging import slogm
|
||||
|
||||
class samba_backend(applier_backend):
|
||||
__user_policy_mode_key = '/SOFTWARE/Policies/Microsoft/Windows/System/UserPolicyMode'
|
||||
__user_policy_mode_key_win = '/Software/Policies/Microsoft/Windows/System/UserPolicyMode'
|
||||
|
||||
def __init__(self, sambacreds, username, domain, is_machine):
|
||||
self.cache_path = '/var/cache/gpupdate/creds/krb5cc_{}'.format(os.getpid())
|
||||
self.__kinit_successful = machine_kinit(self.cache_path)
|
||||
if not self.__kinit_successful:
|
||||
raise Exception('kinit is not successful')
|
||||
self.storage = registry_factory()
|
||||
self.storage = registry_factory('registry')
|
||||
self.storage.set_info('domain', domain)
|
||||
machine_name = get_machine_name()
|
||||
machine_sid = get_sid(domain, machine_name, is_machine)
|
||||
@ -62,93 +50,33 @@ class samba_backend(applier_backend):
|
||||
else:
|
||||
self.sid = get_sid(self.storage.get_info('domain'), self.username)
|
||||
|
||||
self.cache = cache_factory('regpol_cache')
|
||||
self.gpo_names = cache_factory('gpo_names')
|
||||
|
||||
# Samba objects - LoadParm() and CredentialsOptions()
|
||||
self.sambacreds = sambacreds
|
||||
|
||||
self.cache_dir = self.sambacreds.get_cache_dir()
|
||||
logdata = dict({'cachedir': self.cache_dir})
|
||||
log('D7', logdata)
|
||||
|
||||
def __del__(self):
|
||||
if self.__kinit_successful:
|
||||
machine_kdestroy()
|
||||
|
||||
def get_policy_mode(self):
|
||||
'''
|
||||
Get UserPolicyMode parameter value in order to determine if it
|
||||
is possible to work with user's part of GPT. This value is
|
||||
checked only if working for user's SID.
|
||||
'''
|
||||
upm_key = self.storage.get_key_value(self.__user_policy_mode_key)
|
||||
upm_win_key = self.storage.get_key_value(self.__user_policy_mode_key_win)
|
||||
upm = upm_key if upm_key else upm_win_key
|
||||
if upm:
|
||||
upm = int(upm)
|
||||
if upm < 0 or upm > 2:
|
||||
upm = 0
|
||||
else:
|
||||
upm = 0
|
||||
|
||||
return upm
|
||||
logging.debug(slogm('Cache directory is: {}'.format(self.cache_dir)))
|
||||
|
||||
def retrieve_and_store(self):
|
||||
'''
|
||||
Retrieve settings and strore it in a database
|
||||
'''
|
||||
# Get policies for machine at first.
|
||||
machine_gpts = list()
|
||||
try:
|
||||
machine_gpts = self._get_gpts(get_machine_name(), self.storage.get_info('machine_sid'))
|
||||
except Exception as exc:
|
||||
log('F2')
|
||||
raise exc
|
||||
|
||||
if self._is_machine_username:
|
||||
self.storage.wipe_hklm()
|
||||
self.storage.wipe_user(self.storage.get_info('machine_sid'))
|
||||
for gptobj in machine_gpts:
|
||||
try:
|
||||
gptobj.merge_machine()
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['msg'] = str(exc)
|
||||
log('E26', logdata)
|
||||
machine_gpts = self._get_gpts(get_machine_name(), self.storage.get_info('machine_sid'))
|
||||
self.storage.wipe_hklm()
|
||||
self.storage.wipe_user(self.storage.get_info('machine_sid'))
|
||||
for gptobj in machine_gpts:
|
||||
gptobj.merge()
|
||||
|
||||
# Load user GPT values in case user's name specified
|
||||
# This is a buggy implementation and should be tested more
|
||||
else:
|
||||
user_gpts = list()
|
||||
try:
|
||||
user_gpts = self._get_gpts(self.username, self.sid)
|
||||
except Exception as exc:
|
||||
log('F3')
|
||||
raise exc
|
||||
if not self._is_machine_username:
|
||||
user_gpts = self._get_gpts(self.username, self.sid)
|
||||
self.storage.wipe_user(self.sid)
|
||||
|
||||
# Merge user settings if UserPolicyMode set accordingly
|
||||
# and user settings (for HKCU) are exist.
|
||||
policy_mode = self.get_policy_mode()
|
||||
logdata = dict({'mode': upm2str(policy_mode), 'sid': self.sid})
|
||||
log('D152', logdata)
|
||||
|
||||
if policy_mode < 2:
|
||||
for gptobj in user_gpts:
|
||||
try:
|
||||
gptobj.merge_user()
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['msg'] = str(exc)
|
||||
log('E27', logdata)
|
||||
|
||||
if policy_mode > 0:
|
||||
for gptobj in machine_gpts:
|
||||
try:
|
||||
gptobj.sid = self.sid
|
||||
gptobj.merge_user()
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['msg'] = str(exc)
|
||||
log('E63', logdata)
|
||||
for gptobj in user_gpts:
|
||||
gptobj.merge()
|
||||
|
||||
def _check_sysvol_present(self, gpo):
|
||||
'''
|
||||
@ -158,34 +86,21 @@ class samba_backend(applier_backend):
|
||||
# GPO named "Local Policy" has no entry by its nature so
|
||||
# no reason to print warning.
|
||||
if 'Local Policy' != gpo.name:
|
||||
logdata = dict({'gponame': gpo.name})
|
||||
log('W4', logdata)
|
||||
logging.warning(slogm('No SYSVOL entry assigned to GPO {}'.format(gpo.name)))
|
||||
return False
|
||||
return True
|
||||
|
||||
def _get_gpts(self, username, sid):
|
||||
gpts = list()
|
||||
|
||||
log('D45', {'username': username, 'sid': sid})
|
||||
# util.windows.smbcreds
|
||||
gpos = self.sambacreds.update_gpos(username)
|
||||
log('D46')
|
||||
for gpo in gpos:
|
||||
if self._check_sysvol_present(gpo):
|
||||
logging.debug(slogm('Found SYSVOL entry "{}" for GPO "{}"'.format(gpo.file_sys_path, gpo.display_name)))
|
||||
path = check_safe_path(gpo.file_sys_path).upper()
|
||||
slogdata = dict({'sysvol_path': gpo.file_sys_path, 'gpo_name': gpo.display_name, 'gpo_path': path})
|
||||
log('D30', slogdata)
|
||||
logging.debug(slogm('Path: {}'.format(path)))
|
||||
gpt_abspath = os.path.join(self.cache_dir, 'gpo_cache', path)
|
||||
gpo_version=None
|
||||
try:
|
||||
gpo_version=gpo.version
|
||||
except:
|
||||
log('D210')
|
||||
|
||||
if self._is_machine_username:
|
||||
obj = gpt(gpt_abspath, sid, None, version=gpo_version)
|
||||
else:
|
||||
obj = gpt(gpt_abspath, sid, self.username, version=gpo_version)
|
||||
obj = gpt(gpt_abspath, sid)
|
||||
obj.set_name(gpo.display_name)
|
||||
gpts.append(obj)
|
||||
else:
|
||||
@ -194,16 +109,3 @@ class samba_backend(applier_backend):
|
||||
|
||||
return gpts
|
||||
|
||||
def upm2str(upm_num):
|
||||
'''
|
||||
Translate UserPolicyMode to string.
|
||||
'''
|
||||
result = 'Not configured'
|
||||
|
||||
if upm_num in [1, '1']:
|
||||
result = 'Merge'
|
||||
|
||||
if upm_num in [2, '2']:
|
||||
result = 'Replace'
|
||||
|
||||
return result
|
||||
|
@ -18,61 +18,6 @@
|
||||
|
||||
from abc import ABC
|
||||
|
||||
from util.logging import log
|
||||
|
||||
def check_experimental_enabled(storage):
|
||||
experimental_enable_flag = '/Software/BaseALT/Policies/GPUpdate/GlobalExperimental'
|
||||
flag = storage.get_key_value(experimental_enable_flag)
|
||||
|
||||
result = False
|
||||
|
||||
if flag and '1' == str(flag):
|
||||
result = True
|
||||
|
||||
return result
|
||||
|
||||
def check_windows_mapping_enabled(storage):
|
||||
windows_mapping_enable_flag = '/Software/BaseALT/Policies/GPUpdate/WindowsPoliciesMapping'
|
||||
flag = storage.get_key_value(windows_mapping_enable_flag)
|
||||
|
||||
result = True
|
||||
flag = str(flag)
|
||||
if flag and '0' == flag:
|
||||
result = False
|
||||
|
||||
return result
|
||||
|
||||
def check_module_enabled(storage, module_name):
|
||||
gpupdate_module_enable_branch = '/Software/BaseALT/Policies/GPUpdate'
|
||||
gpupdate_module_flag = '{}/{}'.format(gpupdate_module_enable_branch, module_name)
|
||||
flag = storage.get_key_value(gpupdate_module_flag)
|
||||
|
||||
result = None
|
||||
flag = str(flag)
|
||||
if flag:
|
||||
if '1' == flag:
|
||||
result = True
|
||||
else:
|
||||
result = False
|
||||
|
||||
return result
|
||||
|
||||
def check_enabled(storage, module_name, is_experimental):
|
||||
module_enabled = check_module_enabled(storage, module_name)
|
||||
exp_enabled = check_experimental_enabled(storage)
|
||||
|
||||
result = False
|
||||
|
||||
if None == module_enabled:
|
||||
if is_experimental and exp_enabled:
|
||||
result = True
|
||||
if not is_experimental:
|
||||
result = True
|
||||
else:
|
||||
result = module_enabled
|
||||
|
||||
return result
|
||||
|
||||
class applier_frontend(ABC):
|
||||
@classmethod
|
||||
def __init__(self, regobj):
|
||||
|
@ -19,29 +19,13 @@
|
||||
import subprocess
|
||||
import threading
|
||||
import logging
|
||||
from util.logging import slogm, log
|
||||
|
||||
def control_subst(preg_name):
|
||||
'''
|
||||
This is a workaround for control names which can't be used in
|
||||
PReg/ADMX files.
|
||||
'''
|
||||
control_triggers = dict()
|
||||
control_triggers['dvd_rw-format'] = 'dvd+rw-format'
|
||||
control_triggers['dvd_rw-mediainfo'] = 'dvd+rw-mediainfo'
|
||||
control_triggers['dvd_rw-booktype'] = 'dvd+rw-booktype'
|
||||
|
||||
result = preg_name
|
||||
if preg_name in control_triggers:
|
||||
result = control_triggers[preg_name]
|
||||
|
||||
return result
|
||||
from util.logging import slogm
|
||||
|
||||
class control:
|
||||
def __init__(self, name, value):
|
||||
if type(value) != int and type(value) != str:
|
||||
raise Exception('Unknown type of value for control')
|
||||
self.control_name = control_subst(name)
|
||||
self.control_name = name
|
||||
self.control_value = value
|
||||
self.possible_values = self._query_control_values()
|
||||
if self.possible_values == None:
|
||||
@ -55,12 +39,10 @@ class control:
|
||||
values = list()
|
||||
|
||||
popen_call = ['/usr/sbin/control', self.control_name, 'list']
|
||||
with subprocess.Popen(popen_call, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc:
|
||||
with subprocess.Popen(popen_call, stdout=subprocess.PIPE) as proc:
|
||||
values = proc.stdout.readline().decode('utf-8').split()
|
||||
valErr = proc.stderr.readline().decode('utf-8')
|
||||
if valErr:
|
||||
raise ValueError(valErr)
|
||||
proc.wait()
|
||||
|
||||
return values
|
||||
|
||||
def _map_control_status(self, int_status):
|
||||
@ -70,11 +52,7 @@ class control:
|
||||
try:
|
||||
str_status = self.possible_values[int_status]
|
||||
except IndexError as exc:
|
||||
logdata = dict()
|
||||
logdata['control'] = self.control_name
|
||||
logdata['value from'] = self.possible_values
|
||||
logdata['by index'] = int_status
|
||||
log('E41', )
|
||||
logging.error(slogm('Error getting control ({}) value from {} by index {}'.format(self.control_name, self.possible_values, int_status)))
|
||||
str_status = None
|
||||
|
||||
return str_status
|
||||
@ -99,30 +77,20 @@ class control:
|
||||
if type(self.control_value) == int:
|
||||
status = self._map_control_status(self.control_value)
|
||||
if status == None:
|
||||
logdata = dict()
|
||||
logdata['control'] = self.control_name
|
||||
logdata['inpossible values'] = self.self.control_value
|
||||
log('E42', logdata)
|
||||
logging.error(slogm('\'{}\' is not in possible values for control {}'.format(self.control_value, self.control_name)))
|
||||
return
|
||||
elif type(self.control_value) == str:
|
||||
if self.control_value not in self.possible_values:
|
||||
logdata = dict()
|
||||
logdata['control'] = self.control_name
|
||||
logdata['inpossible values'] = self.self.control_value
|
||||
log('E59', logdata)
|
||||
logging.error(slogm('\'{}\' is not in possible values for control {}'.format(self.control_value, self.control_name)))
|
||||
return
|
||||
status = self.control_value
|
||||
logdata = dict()
|
||||
logdata['control'] = self.control_name
|
||||
logdata['status'] = status
|
||||
log('D68', logdata)
|
||||
|
||||
logging.debug(slogm('Setting control {} to {}'.format(self.control_name, status)))
|
||||
|
||||
try:
|
||||
popen_call = ['/usr/sbin/control', self.control_name, status]
|
||||
with subprocess.Popen(popen_call, stdout=subprocess.PIPE) as proc:
|
||||
proc.wait()
|
||||
except:
|
||||
logdata = dict()
|
||||
logdata['control'] = self.control_name
|
||||
logdata['status'] = status
|
||||
log('E43', logdata)
|
||||
logging.error(slogm('Unable to set {} to {}'.format(self.control_name, status)))
|
||||
|
||||
|
@ -1,120 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from os.path import isfile
|
||||
from util.logging import slogm
|
||||
import logging
|
||||
|
||||
from gpt.envvars import (
|
||||
FileAction
|
||||
, action_letter2enum
|
||||
)
|
||||
from util.windows import expand_windows_var
|
||||
from util.util import (
|
||||
get_homedir,
|
||||
homedir_exists
|
||||
)
|
||||
|
||||
class Envvar:
|
||||
def __init__(self, envvars, username=''):
|
||||
self.username = username
|
||||
self.envvars = envvars
|
||||
if self.username == 'root':
|
||||
self.envvar_file_path = '/etc/gpupdate/environment'
|
||||
else:
|
||||
self.envvar_file_path = get_homedir(self.username) + '/.gpupdate_environment'
|
||||
|
||||
def _open_envvar_file(self):
|
||||
fd = None
|
||||
if isfile(self.envvar_file_path):
|
||||
fd = open(self.envvar_file_path, 'r+')
|
||||
else:
|
||||
fd = open(self.envvar_file_path, 'w')
|
||||
fd.close()
|
||||
fd = open(self.envvar_file_path, 'r+')
|
||||
return fd
|
||||
|
||||
def _create_action(self, create_dict, envvar_file):
|
||||
lines_old = envvar_file.readlines()
|
||||
lines_new = list()
|
||||
for name in create_dict:
|
||||
exist = False
|
||||
for line in lines_old:
|
||||
if line.startswith(name + '='):
|
||||
exist = True
|
||||
break
|
||||
if not exist:
|
||||
lines_new.append(name + '=' + create_dict[name] + '\n')
|
||||
if len(lines_new) > 0:
|
||||
envvar_file.writelines(lines_new)
|
||||
|
||||
def _delete_action(self, delete_dict, envvar_file):
|
||||
lines = envvar_file.readlines()
|
||||
deleted = False
|
||||
for name in delete_dict:
|
||||
for line in lines:
|
||||
if line.startswith(name + '='):
|
||||
lines.remove(line)
|
||||
deleted = True
|
||||
break
|
||||
if deleted:
|
||||
envvar_file.writelines(lines)
|
||||
|
||||
def act(self):
|
||||
if isfile(self.envvar_file_path):
|
||||
with open(self.envvar_file_path, 'r') as f:
|
||||
lines = f.readlines()
|
||||
else:
|
||||
lines = list()
|
||||
|
||||
file_changed = False
|
||||
for envvar_object in self.envvars:
|
||||
action = action_letter2enum(envvar_object.action)
|
||||
name = envvar_object.name
|
||||
value = expand_windows_var(envvar_object.value, self.username)
|
||||
if value != envvar_object.value:
|
||||
#slashes are replaced only if the change of variables was performed and we consider the variable as a path to a file or directory
|
||||
value = value.replace('\\', '/')
|
||||
exist_line = None
|
||||
for line in lines:
|
||||
if line == '\n':
|
||||
continue
|
||||
if line.split()[0] == name:
|
||||
exist_line = line
|
||||
break
|
||||
if exist_line != None:
|
||||
if action == FileAction.CREATE:
|
||||
pass
|
||||
if action == FileAction.DELETE:
|
||||
lines.remove(exist_line)
|
||||
file_changed = True
|
||||
if action == FileAction.UPDATE or action == FileAction.REPLACE:
|
||||
if exist_line.split()[1].split('=')[1].replace('"', '') != value: #from 'NAME DEFAULT=value' cut value and compare, don`t change if it matches
|
||||
lines.remove(exist_line)
|
||||
lines.append(name + ' ' + 'DEFAULT=\"' + value + '\"\n')
|
||||
file_changed = True
|
||||
else:
|
||||
if action == FileAction.CREATE or action == FileAction.UPDATE or action == FileAction.REPLACE:
|
||||
lines.append(name + ' ' + 'DEFAULT=\"' + value + '\"\n')
|
||||
file_changed = True
|
||||
if action == FileAction.DELETE:
|
||||
pass
|
||||
|
||||
if file_changed:
|
||||
with open(self.envvar_file_path, 'w') as f:
|
||||
f.writelines(lines)
|
@ -1,268 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2022 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
from gpt.folders import (
|
||||
FileAction
|
||||
, action_letter2enum
|
||||
)
|
||||
from .folder import str2bool
|
||||
from util.logging import log
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from util.windows import expand_windows_var
|
||||
from util.util import get_homedir
|
||||
from util.exceptions import NotUNCPathError
|
||||
from util.paths import UNCPath
|
||||
import fnmatch
|
||||
|
||||
class Files_cp:
|
||||
def __init__(self, file_obj, file_cache, exe_check, username=None):
|
||||
self.file_cache = file_cache
|
||||
self.exe_check = exe_check
|
||||
targetPath = expand_windows_var(file_obj.targetPath, username).replace('\\', '/')
|
||||
self.targetPath = check_target_path(targetPath, username)
|
||||
if not self.targetPath:
|
||||
return
|
||||
self.fromPath = (expand_windows_var(file_obj.fromPath, username).replace('\\', '/')
|
||||
if file_obj.fromPath else None)
|
||||
self.isTargetPathDirectory = False
|
||||
self.action = action_letter2enum(file_obj.action)
|
||||
self.readOnly = str2bool(file_obj.readOnly)
|
||||
self.archive = str2bool(file_obj.archive)
|
||||
self.hidden = str2bool(file_obj.hidden)
|
||||
self.suppress = str2bool(file_obj.suppress)
|
||||
self.executable = str2bool(file_obj.executable)
|
||||
self.username = username
|
||||
self.fromPathFiles = list()
|
||||
if self.fromPath:
|
||||
if targetPath[-1] == '/' or self.is_pattern(Path(self.fromPath).name):
|
||||
self.isTargetPathDirectory = True
|
||||
self.get_list_files()
|
||||
self.act()
|
||||
|
||||
def get_target_file(self, targetPath:Path, fromFile:str) -> Path:
|
||||
try:
|
||||
if fromFile:
|
||||
fromFileName = Path(fromFile).name
|
||||
if self.isTargetPathDirectory:
|
||||
targetPath.mkdir(parents = True, exist_ok = True)
|
||||
else:
|
||||
targetPath.parent.mkdir(parents = True, exist_ok = True)
|
||||
targetPath = targetPath.parent
|
||||
fromFileName = self.targetPath.name
|
||||
if self.hidden:
|
||||
return targetPath.joinpath('.' + fromFileName)
|
||||
else:
|
||||
return targetPath.joinpath(fromFileName)
|
||||
|
||||
else:
|
||||
if not self.hidden:
|
||||
return targetPath
|
||||
else:
|
||||
return targetPath.parent.joinpath('.' + targetPath.name)
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['targetPath'] = targetPath
|
||||
logdata['fromFile'] = fromFile
|
||||
logdata['exc'] = exc
|
||||
log('D163', logdata)
|
||||
|
||||
return None
|
||||
|
||||
def copy_target_file(self, targetFile:Path, fromFile:str):
|
||||
try:
|
||||
uri_path = UNCPath(fromFile)
|
||||
self.file_cache.store(fromFile, targetFile)
|
||||
except NotUNCPathError as exc:
|
||||
fromFilePath = Path(fromFile)
|
||||
if fromFilePath.exists():
|
||||
targetFile.write_bytes(fromFilePath.read_bytes())
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['targetFile'] = targetFile
|
||||
logdata['fromFile'] = fromFile
|
||||
logdata['exc'] = exc
|
||||
log('W15', logdata)
|
||||
|
||||
def set_exe_file(self, targetFile, fromFile):
|
||||
if self.executable:
|
||||
return True
|
||||
if Path(fromFile).suffix in self.exe_check.get_list_markers():
|
||||
targetPath = targetFile.parent
|
||||
for i in self.exe_check.get_list_paths():
|
||||
if targetPath == Path(i):
|
||||
return True
|
||||
return False
|
||||
|
||||
def set_mod_file(self, targetFile, fromFile):
|
||||
if not targetFile.is_file():
|
||||
return
|
||||
if self.set_exe_file(targetFile, fromFile):
|
||||
if self.readOnly:
|
||||
shutil.os.chmod(targetFile, 0o555)
|
||||
else:
|
||||
shutil.os.chmod(targetFile, 0o755)
|
||||
else:
|
||||
if self.readOnly:
|
||||
shutil.os.chmod(targetFile, 0o444)
|
||||
else:
|
||||
shutil.os.chmod(targetFile, 0o644)
|
||||
|
||||
def _create_action(self):
|
||||
logdata = dict()
|
||||
for fromFile in self.fromPathFiles:
|
||||
targetFile = None
|
||||
|
||||
try:
|
||||
targetFile = self.get_target_file(self.targetPath, fromFile)
|
||||
if targetFile and not targetFile.exists():
|
||||
self.copy_target_file(targetFile, fromFile)
|
||||
if self.username:
|
||||
shutil.chown(targetFile, self.username)
|
||||
self.set_mod_file(targetFile, fromFile)
|
||||
logdata['File'] = targetFile
|
||||
log('D191', logdata)
|
||||
except Exception as exc:
|
||||
logdata['exc'] = exc
|
||||
logdata['fromPath'] = fromFile
|
||||
logdata['targetPath'] = self.targetPath
|
||||
logdata['targetFile'] = targetFile
|
||||
log('D164', logdata)
|
||||
|
||||
def _delete_action(self):
|
||||
list_target = [self.targetPath.name]
|
||||
if self.is_pattern(self.targetPath.name) and self.targetPath.parent.exists() and self.targetPath.parent.is_dir():
|
||||
list_target = fnmatch.filter([str(x.name) for x in self.targetPath.parent.iterdir() if x.is_file()], self.targetPath.name)
|
||||
logdata = dict()
|
||||
for targetFile in list_target:
|
||||
targetFile = self.targetPath.parent.joinpath(targetFile)
|
||||
try:
|
||||
if targetFile.exists():
|
||||
targetFile.unlink()
|
||||
logdata['File'] = targetFile
|
||||
log('D193', logdata)
|
||||
|
||||
except Exception as exc:
|
||||
logdata['exc'] = exc
|
||||
logdata['targetPath'] = self.targetPath
|
||||
logdata['targetFile'] = targetFile
|
||||
log('D165', logdata)
|
||||
|
||||
def _update_action(self):
|
||||
logdata = dict()
|
||||
for fromFile in self.fromPathFiles:
|
||||
targetFile = self.get_target_file(self.targetPath, fromFile)
|
||||
try:
|
||||
self.copy_target_file(targetFile, fromFile)
|
||||
if self.username:
|
||||
shutil.chown(self.targetPath, self.username)
|
||||
self.set_mod_file(targetFile, fromFile)
|
||||
logdata['File'] = targetFile
|
||||
log('D192', logdata)
|
||||
except Exception as exc:
|
||||
logdata['exc'] = exc
|
||||
logdata['fromPath'] = self.fromPath
|
||||
logdata['targetPath'] = self.targetPath
|
||||
logdata['targetFile'] = targetFile
|
||||
log('D166', logdata)
|
||||
|
||||
def act(self):
|
||||
if self.action == FileAction.CREATE:
|
||||
self._create_action()
|
||||
if self.action == FileAction.UPDATE:
|
||||
self._update_action()
|
||||
if self.action == FileAction.DELETE:
|
||||
self._delete_action()
|
||||
if self.action == FileAction.REPLACE:
|
||||
self._delete_action()
|
||||
self._create_action()
|
||||
|
||||
def is_pattern(self, name):
|
||||
if name.find('*') != -1 or name.find('?') != -1:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def get_list_files(self):
|
||||
logdata = dict()
|
||||
logdata['targetPath'] = str(self.targetPath)
|
||||
fromFilePath = Path(self.fromPath)
|
||||
if not self.is_pattern(fromFilePath.name):
|
||||
self.fromPathFiles.append(self.fromPath)
|
||||
else:
|
||||
fromPathDir = self.fromPath[:self.fromPath.rfind('/')]
|
||||
|
||||
try:
|
||||
uri_path = UNCPath(fromPathDir)
|
||||
ls_files = self.file_cache.get_ls_smbdir(fromPathDir)
|
||||
if ls_files:
|
||||
filtered_ls_files = fnmatch.filter(ls_files, fromFilePath.name)
|
||||
if filtered_ls_files:
|
||||
self.fromPathFiles = [fromPathDir + '/' + file_s for file_s in filtered_ls_files]
|
||||
except NotUNCPathError as exc:
|
||||
try:
|
||||
exact_path = Path(fromPathDir)
|
||||
if exact_path.is_dir():
|
||||
self.fromPathFiles = [str(fromFile) for fromFile in exact_path.iterdir() if fromFile.is_file()]
|
||||
except Exception as exc:
|
||||
logdata['fromPath'] = self.fromPath
|
||||
logdata['exc'] = exc
|
||||
log('W3316', logdata)
|
||||
except Exception as exc:
|
||||
logdata['fromPath'] = self.fromPath
|
||||
logdata['exc'] = exc
|
||||
log('W3317', logdata)
|
||||
|
||||
def check_target_path(path_to_check, username = None):
|
||||
'''
|
||||
Function for checking the correctness of the path
|
||||
'''
|
||||
if not path_to_check:
|
||||
return None
|
||||
|
||||
checking = Path(path_to_check)
|
||||
rootpath = Path('/')
|
||||
if username:
|
||||
rootpath = Path(get_homedir(username))
|
||||
|
||||
return rootpath.joinpath(checking)
|
||||
|
||||
class Execution_check():
|
||||
|
||||
__etension_marker_key_name = 'ExtensionMarker'
|
||||
__marker_usage_path_key_name = 'MarkerUsagePath'
|
||||
__hklm_branch = 'Software\\BaseALT\\Policies\\GroupPolicies\\Files'
|
||||
|
||||
def __init__(self, storage):
|
||||
etension_marker_branch = '{}\\{}%'.format(self.__hklm_branch, self.__etension_marker_key_name)
|
||||
marker_usage_path_branch = '{}\\{}%'.format(self.__hklm_branch, self.__marker_usage_path_key_name)
|
||||
self.etension_marker = storage.filter_hklm_entries(etension_marker_branch)
|
||||
self.marker_usage_path = storage.filter_hklm_entries(marker_usage_path_branch)
|
||||
self.list_paths = list()
|
||||
self.list_markers = list()
|
||||
for marker in self.etension_marker:
|
||||
self.list_markers.append(marker.data)
|
||||
for usage_path in self.marker_usage_path:
|
||||
self.list_paths.append(usage_path.data)
|
||||
|
||||
def get_list_paths(self):
|
||||
return self.list_paths
|
||||
|
||||
def get_list_markers(self):
|
||||
return self.list_markers
|
@ -1,97 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from enum import Enum
|
||||
import subprocess
|
||||
|
||||
def getprops(param_list):
|
||||
props = dict()
|
||||
|
||||
for entry in param_list:
|
||||
lentry = entry.lower()
|
||||
if lentry.startswith('action'):
|
||||
props['action'] = lentry.rpartition('=')[2]
|
||||
if lentry.startswith('protocol'):
|
||||
props['protocol'] = lentry.rpartition('=')[2]
|
||||
if lentry.startswith('dir'):
|
||||
props['dir'] = lentry.rpartition('=')[2]
|
||||
|
||||
return props
|
||||
|
||||
|
||||
def get_ports(param_list):
|
||||
portlist = list()
|
||||
|
||||
for entry in param_list:
|
||||
lentry = entry.lower()
|
||||
if lentry.startswith('lport'):
|
||||
port = lentry.rpartition('=')[2]
|
||||
portlist.append(port)
|
||||
|
||||
return portlist
|
||||
|
||||
class PortState(Enum):
|
||||
OPEN = 'Allow'
|
||||
CLOSE = 'Deny'
|
||||
|
||||
class Protocol(Enum):
|
||||
TCP = 'tcp'
|
||||
UDP = 'udp'
|
||||
|
||||
class FirewallMode(Enum):
|
||||
ROUTER = 'router'
|
||||
GATEWAY = 'gateway'
|
||||
HOST = 'host'
|
||||
|
||||
# This shi^Wthing named alterator-net-iptables is unable to work in
|
||||
# multi-threaded environment
|
||||
class FirewallRule:
|
||||
__alterator_command = '/usr/bin/alterator-net-iptables'
|
||||
|
||||
def __init__(self, data):
|
||||
data_array = data.split('|')
|
||||
|
||||
self.version = data_array[0]
|
||||
self.ports = get_ports(data_array[1:])
|
||||
self.properties = getprops(data_array[1:])
|
||||
|
||||
def apply(self):
|
||||
tcp_command = []
|
||||
udp_command = []
|
||||
|
||||
for port in self.ports:
|
||||
tcp_port = '{}'.format(port)
|
||||
udp_port = '{}'.format(port)
|
||||
|
||||
if PortState.OPEN.value == self.properties['action']:
|
||||
tcp_port = '+' + tcp_port
|
||||
udp_port = '+' + udp_port
|
||||
if PortState.CLOSE.value == self.properties['action']:
|
||||
tcp_port = '-' + tcp_port
|
||||
udp_port = '-' + udp_port
|
||||
|
||||
portcmd = [
|
||||
self.__alterator_command
|
||||
, 'write'
|
||||
, '-m', FirewallMode.HOST.value
|
||||
, '-t', tcp_port
|
||||
, '-u', udp_port
|
||||
]
|
||||
proc = subprocess.Popen(portcmd)
|
||||
proc.wait()
|
||||
|
@ -1,95 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
from gpt.folders import (
|
||||
FileAction
|
||||
, action_letter2enum
|
||||
)
|
||||
from util.windows import expand_windows_var
|
||||
from util.util import get_homedir
|
||||
|
||||
def remove_dir_tree(path, delete_files=False, delete_folder=False, delete_sub_folders=False):
|
||||
content = list()
|
||||
for entry in path.iterdir():
|
||||
content.append(entry)
|
||||
if entry.is_file() and delete_files:
|
||||
entry.unlink()
|
||||
content.remove(entry)
|
||||
if entry.is_dir() and delete_sub_folders:
|
||||
content.remove(entry)
|
||||
content.extend(remove_dir_tree(entry, delete_files, delete_folder, delete_sub_folders))
|
||||
|
||||
if delete_folder and not content:
|
||||
path.rmdir()
|
||||
|
||||
return content
|
||||
|
||||
def str2bool(boolstr):
|
||||
if isinstance(boolstr, bool):
|
||||
return boolstr
|
||||
elif boolstr and boolstr.lower() in ['true', 'yes', '1']:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class Folder:
|
||||
def __init__(self, folder_object, username=None):
|
||||
folder_path = expand_windows_var(folder_object.path, username).replace('\\', '/').replace('//', '/')
|
||||
if username:
|
||||
folder_path = folder_path.replace(get_homedir(username), '')
|
||||
self.folder_path = Path(get_homedir(username)).joinpath(folder_path if folder_path [0] != '/' else folder_path [1:])
|
||||
else:
|
||||
self.folder_path = Path(folder_path)
|
||||
self.action = action_letter2enum(folder_object.action)
|
||||
self.delete_files = str2bool(folder_object.delete_files)
|
||||
self.delete_folder = str2bool(folder_object.delete_folder)
|
||||
self.delete_sub_folders = str2bool(folder_object.delete_sub_folders)
|
||||
self.hidden_folder = str2bool(folder_object.hidden_folder)
|
||||
|
||||
def _create_action(self):
|
||||
self.folder_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def _delete_action(self):
|
||||
if self.folder_path.exists():
|
||||
if self.action == FileAction.REPLACE:
|
||||
self.delete_folder = True
|
||||
remove_dir_tree(self.folder_path,
|
||||
self.delete_files,
|
||||
self.delete_folder,
|
||||
self.delete_sub_folders)
|
||||
|
||||
def act(self):
|
||||
if self.hidden_folder == True and str(self.folder_path.name)[0] != '.':
|
||||
path_components = list(self.folder_path.parts)
|
||||
path_components[-1] = '.' + path_components[-1]
|
||||
new_folder_path = Path(*path_components)
|
||||
self.folder_path = new_folder_path
|
||||
if self.action == FileAction.CREATE:
|
||||
self._create_action()
|
||||
if self.action == FileAction.UPDATE:
|
||||
self._create_action()
|
||||
if self.action == FileAction.DELETE:
|
||||
self._delete_action()
|
||||
if self.action == FileAction.REPLACE:
|
||||
self._delete_action()
|
||||
self._create_action()
|
||||
|
@ -1,7 +1,7 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2021 BaseALT Ltd.
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
@ -21,156 +21,47 @@ import os
|
||||
import logging
|
||||
from gi.repository import Gio, GLib
|
||||
|
||||
from util.logging import slogm, log
|
||||
from util.logging import slogm
|
||||
|
||||
class system_gsetting:
|
||||
def __init__(self, schema, path, value, lock, helper_function=None):
|
||||
__global_schema = '/usr/share/glib-2.0/schemas'
|
||||
|
||||
def __init__(self, schema, path, value, override_priority='0'):
|
||||
self.schema = schema
|
||||
self.path = path
|
||||
self.value = value
|
||||
self.lock = lock
|
||||
self.helper_function = helper_function
|
||||
|
||||
def apply(self, settings, config, locks):
|
||||
try:
|
||||
config.add_section(self.schema)
|
||||
except configparser.DuplicateSectionError:
|
||||
pass
|
||||
|
||||
value = self.value
|
||||
if self.helper_function:
|
||||
value = self.helper_function(self.schema, self.path, value)
|
||||
result = glib_value(self.schema, self.path, value, settings)
|
||||
config.set(self.schema, self.path, str(result))
|
||||
|
||||
if self.lock:
|
||||
lock_path = dconf_path(settings, self.path)
|
||||
locks.append(lock_path)
|
||||
|
||||
class system_gsettings:
|
||||
__path_local_dir = '/etc/dconf/db/local.d'
|
||||
__path_locks = '/etc/dconf/db/policy.d/locks/policy'
|
||||
__path_profile = '/etc/dconf/profile/user'
|
||||
__profile_data = 'user-db:user\nsystem-db:policy\nsystem-db:local\n'
|
||||
|
||||
def __init__(self, override_file_path):
|
||||
self.gsettings = list()
|
||||
self.locks = list()
|
||||
self.override_file_path = override_file_path
|
||||
|
||||
def append(self, schema, path, data, lock, helper):
|
||||
if check_existing_gsettings(schema, path):
|
||||
self.gsettings.append(system_gsetting(schema, path, data, lock, helper))
|
||||
else:
|
||||
logdata = dict()
|
||||
logdata['schema'] = schema
|
||||
logdata['path'] = path
|
||||
logdata['data'] = data
|
||||
logdata['lock'] = lock
|
||||
log('D150', logdata)
|
||||
self.override_priority = override_priority
|
||||
self.filename = '{}_policy.gschema.override'.format(self.override_priority)
|
||||
self.file_path = os.path.join(self.__global_schema, self.filename)
|
||||
|
||||
def apply(self):
|
||||
config = configparser.ConfigParser()
|
||||
try:
|
||||
config.read(self.file_path)
|
||||
except Exception as exc:
|
||||
logging.error(slogm(exc))
|
||||
config.add_section(self.schema)
|
||||
config.set(self.schema, self.path, self.value)
|
||||
|
||||
for gsetting in self.gsettings:
|
||||
logdata = dict()
|
||||
logdata['gsetting.schema'] = gsetting.schema
|
||||
logdata['gsetting.path'] = gsetting.path
|
||||
logdata['gsetting.value'] = gsetting.value
|
||||
logdata['gsetting.lock'] = gsetting.lock
|
||||
settings = Gio.Settings(schema=gsetting.schema)
|
||||
log('D89', logdata)
|
||||
gsetting.apply(settings, config, self.locks)
|
||||
|
||||
with open(self.override_file_path, 'w') as f:
|
||||
with open(self.file_path, 'w') as f:
|
||||
config.write(f)
|
||||
|
||||
os.makedirs(self.__path_local_dir, mode=0o755, exist_ok=True)
|
||||
os.makedirs(os.path.dirname(self.__path_locks), mode=0o755, exist_ok=True)
|
||||
os.makedirs(os.path.dirname(self.__path_profile), mode=0o755, exist_ok=True)
|
||||
try:
|
||||
os.remove(self.__path_locks)
|
||||
except OSError as error:
|
||||
pass
|
||||
|
||||
file_locks = open(self.__path_locks,'w')
|
||||
for lock in self.locks:
|
||||
file_locks.write(lock +'\n')
|
||||
file_locks.close()
|
||||
|
||||
profile = open(self.__path_profile ,'w')
|
||||
profile.write(self.__profile_data)
|
||||
profile.close()
|
||||
|
||||
def glib_map(value, glib_type):
|
||||
result_value = value
|
||||
|
||||
if glib_type == 'i' or glib_type == 'b' or glib_type == 'q':
|
||||
result_value = GLib.Variant(glib_type, int(value))
|
||||
else:
|
||||
result_value = GLib.Variant(glib_type, value)
|
||||
|
||||
return result_value
|
||||
|
||||
def dconf_path(settings, path):
|
||||
return settings.get_property("path") + path
|
||||
|
||||
def glib_value(schema, path, value, settings):
|
||||
# Get the key to modify
|
||||
key = settings.get_value(path)
|
||||
# Query the data type for the key
|
||||
glib_value_type = key.get_type_string()
|
||||
# Build the new value with the determined type
|
||||
return glib_map(value, glib_value_type)
|
||||
|
||||
def check_existing_gsettings (schema, path):
|
||||
source = Gio.SettingsSchemaSource.get_default()
|
||||
sourceSchema = (source.lookup(schema, False))
|
||||
if bool(sourceSchema) and sourceSchema.has_key(path):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
class user_gsettings:
|
||||
def __init__(self):
|
||||
self.gsettings = list()
|
||||
|
||||
def append(self, schema, path, value, helper=None):
|
||||
if check_existing_gsettings(schema, path):
|
||||
self.gsettings.append(user_gsetting(schema, path, value, helper))
|
||||
else:
|
||||
logdata = dict()
|
||||
logdata['schema'] = schema
|
||||
logdata['path'] = path
|
||||
logdata['data'] = value
|
||||
log('D151', logdata)
|
||||
|
||||
def apply(self):
|
||||
for gsetting in self.gsettings:
|
||||
logdata = dict()
|
||||
logdata['gsetting.schema'] = gsetting.schema
|
||||
logdata['gsetting.path'] = gsetting.path
|
||||
logdata['gsetting.value'] = gsetting.value
|
||||
log('D85', logdata)
|
||||
gsetting.apply()
|
||||
|
||||
|
||||
class user_gsetting:
|
||||
def __init__(self, schema, path, value, helper_function=None):
|
||||
def __init__(self, schema, path, value):
|
||||
self.schema = schema
|
||||
self.path = path
|
||||
self.value = value
|
||||
self.helper_function = helper_function
|
||||
|
||||
def apply(self):
|
||||
# Access the current schema
|
||||
settings = Gio.Settings(schema=self.schema)
|
||||
# Update result with helper function
|
||||
value = self.value
|
||||
if self.helper_function:
|
||||
value = self.helper_function(self.schema, self.path, value)
|
||||
# Get typed value by schema
|
||||
result = glib_value(self.schema, self.path, value, settings)
|
||||
# Set the value
|
||||
settings.set_value(self.path, result)
|
||||
settings.sync()
|
||||
source = Gio.SettingsSchemaSource.get_default()
|
||||
schema = source.lookup(self.schema, True)
|
||||
key = schema.get_key(self.path)
|
||||
gvformat = key.get_value_type()
|
||||
val = GLib.Variant(gvformat.dup_string(), self.value)
|
||||
schema.set_value(self.path, val)
|
||||
#gso = Gio.Settings.new(self.schema)
|
||||
#variants = gso.get_property(self.path)
|
||||
#if (variants.has_key(self.path)):
|
||||
# key = variants.get_key(self.path)
|
||||
# print(key.get_range())
|
||||
|
||||
|
@ -1,114 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2022 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
|
||||
from gpt.folders import (
|
||||
FileAction
|
||||
, action_letter2enum
|
||||
)
|
||||
from util.logging import log
|
||||
from pathlib import Path
|
||||
from util.windows import expand_windows_var
|
||||
from util.util import get_homedir
|
||||
from util.gpoa_ini_parsing import GpoaConfigObj
|
||||
|
||||
|
||||
class Ini_file:
|
||||
def __init__(self, ini_obj, username=None):
|
||||
path = expand_windows_var(ini_obj.path, username).replace('\\', '/')
|
||||
self.path = check_path(path, username)
|
||||
if not self.path:
|
||||
logdata = {'path': ini_obj.path}
|
||||
log('D175', logdata)
|
||||
return None
|
||||
self.section = ini_obj.section
|
||||
self.action = action_letter2enum(ini_obj.action)
|
||||
self.key = ini_obj.property
|
||||
self.value = ini_obj.value
|
||||
try:
|
||||
self.config = GpoaConfigObj(str(self.path), unrepr=False)
|
||||
except Exception as exc:
|
||||
logdata = {'exc': exc}
|
||||
log('D176', logdata)
|
||||
return
|
||||
|
||||
self.act()
|
||||
|
||||
def _create_action(self):
|
||||
if self.path.is_dir():
|
||||
return
|
||||
if self.section not in self.config:
|
||||
self.config[self.section] = dict()
|
||||
|
||||
self.config[self.section][self.key] = self.value
|
||||
self.config.write()
|
||||
|
||||
|
||||
def _delete_action(self):
|
||||
if not self.path.exists() or self.path.is_dir():
|
||||
return
|
||||
if not self.section:
|
||||
self.path.unlink()
|
||||
return
|
||||
if self.section in self.config:
|
||||
if not self.key:
|
||||
self.config.pop(self.section)
|
||||
elif self.key in self.config[self.section]:
|
||||
self.config[self.section].pop(self.key)
|
||||
self.config.write()
|
||||
|
||||
|
||||
def act(self):
|
||||
try:
|
||||
if self.action == FileAction.CREATE:
|
||||
self._create_action()
|
||||
if self.action == FileAction.UPDATE:
|
||||
self._create_action()
|
||||
if self.action == FileAction.DELETE:
|
||||
self._delete_action()
|
||||
if self.action == FileAction.REPLACE:
|
||||
self._create_action()
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['action'] = self.action
|
||||
logdata['exc'] = exc
|
||||
log('W23', logdata)
|
||||
|
||||
|
||||
def check_path(path_to_check, username = None):
|
||||
'''
|
||||
Function for checking the right path for Inifile
|
||||
'''
|
||||
checking = Path(path_to_check)
|
||||
if checking.exists():
|
||||
if username and path_to_check == '/':
|
||||
return Path(get_homedir(username))
|
||||
return checking
|
||||
#Check for path directory without '/nameIni' suffix
|
||||
elif (len(path_to_check.split('/')) > 2
|
||||
and Path(path_to_check.replace(path_to_check.split('/')[-1], '')).is_dir()):
|
||||
return checking
|
||||
elif username:
|
||||
target_path = Path(get_homedir(username))
|
||||
res = target_path.joinpath(path_to_check
|
||||
if path_to_check[0] != '/'
|
||||
else path_to_check[1:])
|
||||
return check_path(str(res))
|
||||
else:
|
||||
return False
|
@ -1,90 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2022 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import subprocess
|
||||
|
||||
from gpt.folders import (
|
||||
FileAction
|
||||
, action_letter2enum
|
||||
)
|
||||
from util.logging import log
|
||||
from util.windows import expand_windows_var
|
||||
|
||||
|
||||
class Networkshare:
|
||||
|
||||
def __init__(self, networkshare_obj, username = None):
|
||||
self.net_full_cmd = ['/usr/bin/net', 'usershare']
|
||||
self.net_cmd_check = ['/usr/bin/net', 'usershare', 'list']
|
||||
self.cmd = list()
|
||||
self.name = networkshare_obj.name
|
||||
self.path = expand_windows_var(networkshare_obj.path, username).replace('\\', '/') if networkshare_obj.path else None
|
||||
|
||||
self.action = action_letter2enum(networkshare_obj.action)
|
||||
self.allRegular = networkshare_obj.allRegular
|
||||
self.comment = networkshare_obj.comment
|
||||
self.limitUsers = networkshare_obj.limitUsers
|
||||
self.abe = networkshare_obj.abe
|
||||
self._guest = 'guest_ok=y'
|
||||
self.acl = 'Everyone:'
|
||||
self.act()
|
||||
|
||||
def check_list_net(self):
|
||||
try:
|
||||
res = subprocess.check_output(self.net_cmd_check, encoding='utf-8')
|
||||
return res
|
||||
except Exception as exc:
|
||||
return exc
|
||||
|
||||
def _run_net_full_cmd(self):
|
||||
logdata = dict()
|
||||
try:
|
||||
res = subprocess.check_output(self.net_full_cmd, stderr=subprocess.DEVNULL, encoding='utf-8')
|
||||
if res:
|
||||
logdata['cmd'] = self.net_full_cmd
|
||||
logdata['answer'] = res
|
||||
log('D190', logdata)
|
||||
except Exception as exc:
|
||||
logdata['cmd'] = self.net_full_cmd
|
||||
logdata['exc'] = exc
|
||||
log('D182', logdata)
|
||||
|
||||
|
||||
def _create_action(self):
|
||||
self.net_full_cmd.append('add')
|
||||
self.net_full_cmd.append(self.name)
|
||||
self.net_full_cmd.append(self.path)
|
||||
self.net_full_cmd.append(self.comment)
|
||||
self.net_full_cmd.append(self.acl + 'F')
|
||||
self.net_full_cmd.append(self._guest)
|
||||
self._run_net_full_cmd()
|
||||
|
||||
def _delete_action(self):
|
||||
self.net_full_cmd.append('delete')
|
||||
self.net_full_cmd.append(self.name)
|
||||
self._run_net_full_cmd()
|
||||
|
||||
def act(self):
|
||||
if self.action == FileAction.CREATE:
|
||||
self._create_action()
|
||||
if self.action == FileAction.UPDATE:
|
||||
self._create_action()
|
||||
if self.action == FileAction.DELETE:
|
||||
self._delete_action()
|
||||
if self.action == FileAction.REPLACE:
|
||||
self._create_action()
|
@ -18,8 +18,9 @@
|
||||
|
||||
import os
|
||||
import jinja2
|
||||
import logging
|
||||
|
||||
from util.logging import log
|
||||
from util.logging import slogm
|
||||
|
||||
class polkit:
|
||||
__template_path = '/usr/share/gpupdate/templates'
|
||||
@ -27,29 +28,13 @@ class polkit:
|
||||
__template_loader = jinja2.FileSystemLoader(searchpath=__template_path)
|
||||
__template_environment = jinja2.Environment(loader=__template_loader)
|
||||
|
||||
def __init__(self, template_name, arglist, username=None):
|
||||
def __init__(self, template_name, arglist):
|
||||
self.template_name = template_name
|
||||
self.args = arglist
|
||||
self.username = username
|
||||
self.infilename = '{}.rules.j2'.format(self.template_name)
|
||||
if self.username:
|
||||
self.outfile = os.path.join(self.__policy_dir, '{}.{}.rules'.format(self.template_name, self.username))
|
||||
else:
|
||||
self.outfile = os.path.join(self.__policy_dir, '{}.rules'.format(self.template_name))
|
||||
|
||||
def _is_empty(self):
|
||||
for key, item in self.args.items():
|
||||
if key == 'User':
|
||||
continue
|
||||
elif item:
|
||||
return False
|
||||
return True
|
||||
self.outfile = os.path.join(self.__policy_dir, '{}.rules'.format(self.template_name))
|
||||
|
||||
def generate(self):
|
||||
if self._is_empty():
|
||||
if os.path.isfile(self.outfile):
|
||||
os.remove(self.outfile)
|
||||
return
|
||||
try:
|
||||
template = self.__template_environment.get_template(self.infilename)
|
||||
text = template.render(**self.args)
|
||||
@ -57,13 +42,7 @@ class polkit:
|
||||
with open(self.outfile, 'w') as f:
|
||||
f.write(text)
|
||||
|
||||
logdata = dict()
|
||||
logdata['file'] = self.outfile
|
||||
logdata['arguments'] = self.args
|
||||
log('D77', logdata)
|
||||
logging.debug(slogm('Generated file {} with arguments {}'.format(self.outfile, self.args)))
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['file'] = self.outfile
|
||||
logdata['arguments'] = self.args
|
||||
log('E44', logdata)
|
||||
logging.error(slogm('Unable to generate file {} from {}'.format(self.outfile, self.infilename)))
|
||||
|
||||
|
@ -16,10 +16,16 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from util.rpm import (
|
||||
install_rpm,
|
||||
remove_rpm
|
||||
)
|
||||
|
||||
from .applier_backend import applier_backend
|
||||
class rpm:
|
||||
def __init__(self, name, action):
|
||||
self.name = name
|
||||
self.action = action
|
||||
|
||||
class freeipa_backend(applier_backend):
|
||||
def __init__(self):
|
||||
def apply(self):
|
||||
pass
|
||||
|
@ -19,7 +19,7 @@
|
||||
import dbus
|
||||
import logging
|
||||
|
||||
from util.logging import slogm, log
|
||||
from util.logging import slogm
|
||||
|
||||
class systemd_unit:
|
||||
def __init__(self, unit_name, state):
|
||||
@ -39,9 +39,7 @@ class systemd_unit:
|
||||
self.manager.UnmaskUnitFiles([self.unit_name], dbus.Boolean(False))
|
||||
self.manager.EnableUnitFiles([self.unit_name], dbus.Boolean(False), dbus.Boolean(True))
|
||||
self.manager.StartUnit(self.unit_name, 'replace')
|
||||
logdata = dict()
|
||||
logdata['unit'] = self.unit_name
|
||||
log('I6', logdata)
|
||||
logging.info(slogm('Starting systemd unit: {}'.format(self.unit_name)))
|
||||
|
||||
# In case the service has 'RestartSec' property set it
|
||||
# switches to 'activating (auto-restart)' state instead of
|
||||
@ -49,27 +47,17 @@ class systemd_unit:
|
||||
service_state = self._get_state()
|
||||
|
||||
if not service_state in ['active', 'activating']:
|
||||
service_timer_name = self.unit_name.replace(".service", ".timer")
|
||||
self.unit = self.manager.LoadUnit(dbus.String(service_timer_name))
|
||||
service_state = self._get_state()
|
||||
if not service_state in ['active', 'activating']:
|
||||
logdata = dict()
|
||||
logdata['unit'] = self.unit_name
|
||||
log('E46', logdata)
|
||||
logging.error(slogm('Unable to start systemd unit {}'.format(self.unit_name)))
|
||||
else:
|
||||
self.manager.StopUnit(self.unit_name, 'replace')
|
||||
self.manager.DisableUnitFiles([self.unit_name], dbus.Boolean(False))
|
||||
self.manager.MaskUnitFiles([self.unit_name], dbus.Boolean(False), dbus.Boolean(True))
|
||||
logdata = dict()
|
||||
logdata['unit'] = self.unit_name
|
||||
log('I6', logdata)
|
||||
logging.info(slogm('Stopping systemd unit: {}'.format(self.unit_name)))
|
||||
|
||||
service_state = self._get_state()
|
||||
|
||||
if not service_state in ['stopped', 'deactivating', 'inactive']:
|
||||
logdata = dict()
|
||||
logdata['unit'] = self.unit_name
|
||||
log('E46', logdata)
|
||||
if not service_state in ['stopped']:
|
||||
logging.error(slogm('Unable to stop systemd unit {}'.format(self.unit_name)))
|
||||
|
||||
def _get_state(self):
|
||||
'''
|
||||
|
@ -1,23 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from enum import Enum
|
||||
|
||||
class WallpaperStretchMode(Enum):
|
||||
STRETCH = 2
|
||||
|
@ -16,199 +16,122 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from .applier_frontend import (
|
||||
applier_frontend
|
||||
, check_enabled
|
||||
)
|
||||
from .applier_frontend import applier_frontend
|
||||
|
||||
import logging
|
||||
import json
|
||||
import os
|
||||
from util.logging import log
|
||||
from util.util import is_machine_name, string_to_literal_eval
|
||||
|
||||
from util.logging import slogm
|
||||
from util.util import is_machine_name
|
||||
|
||||
class chromium_applier(applier_frontend):
|
||||
__module_name = 'ChromiumApplier'
|
||||
__module_enabled = True
|
||||
__module_experimental = False
|
||||
__registry_branch = 'Software/Policies/Google/Chrome'
|
||||
__registry_branch = 'Software\\Policies\\Google\\Chrome'
|
||||
__managed_policies_path = '/etc/chromium/policies/managed'
|
||||
__recommended_policies_path = '/etc/chromium/policies/recommended'
|
||||
# JSON file where Chromium stores its settings (and which is
|
||||
# overwritten every exit.
|
||||
__user_settings = '.config/chromium/Default'
|
||||
|
||||
def __init__(self, storage, sid, username):
|
||||
self.storage = storage
|
||||
self.sid = sid
|
||||
self.username = username
|
||||
self._is_machine_name = is_machine_name(self.username)
|
||||
chromium_filter = '{}%'.format(self.__registry_branch)
|
||||
self.chromium_keys = self.storage.filter_hklm_entries(chromium_filter)
|
||||
self.policies = dict()
|
||||
|
||||
self.policies_json = dict()
|
||||
def get_hklm_string_entry(self, hive_subkey):
|
||||
query_str = '{}\\{}'.format(self.__registry_branch, hive_subkey)
|
||||
return self.storage.get_hklm_entry(query_str)
|
||||
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
def get_hkcu_string_entry(self, hive_subkey):
|
||||
query_str = '{}\\{}'.format(self.__registry_branch, hive_subkey)
|
||||
return self.storage.get_hkcu_entry(sid, query_str)
|
||||
|
||||
def get_hklm_string_entry_default(self, hive_subkey, default):
|
||||
'''
|
||||
Return row from HKLM table identified by hive_subkey as string
|
||||
or return supplied default value if such hive_subkey is missing.
|
||||
'''
|
||||
|
||||
defval = str(default)
|
||||
response = self.get_hklm_string_entry(hive_subkey)
|
||||
|
||||
if response:
|
||||
return response.data
|
||||
|
||||
return defval
|
||||
|
||||
def get_hkcu_string_entry_default(self, hive_subkey, default):
|
||||
defval = str(default)
|
||||
response = self.get_hkcu_string_entry(hive_subkey)
|
||||
if response:
|
||||
return response.data
|
||||
return defval
|
||||
|
||||
def set_policy(self, name, obj):
|
||||
if obj:
|
||||
self.policies[name] = obj
|
||||
logging.info(slogm('Chromium policy \'{}\' set to {}'.format(name, obj)))
|
||||
|
||||
def set_user_policy(self, name, obj):
|
||||
'''
|
||||
Please not that writing user preferences file is not considered
|
||||
a good practice and used mostly by various malware.
|
||||
'''
|
||||
if not self._is_machine_name:
|
||||
prefdir = os.path.join(util.get_homedir(self.username), self.__user_settings)
|
||||
os.makedirs(prefdir, exist_ok=True)
|
||||
|
||||
prefpath = os.path.join(prefdir, 'Preferences')
|
||||
util.mk_homedir_path(self.username, self.__user_settings)
|
||||
settings = dict()
|
||||
try:
|
||||
with open(prefpath, 'r') as f:
|
||||
settings = json.load(f)
|
||||
except FileNotFoundError as exc:
|
||||
logging.error(slogm('Chromium preferences file {} does not exist at the moment'.format(prefpath)))
|
||||
except:
|
||||
logging.error(slogm('Error during attempt to read Chromium preferences for user {}'.format(self.username)))
|
||||
|
||||
if obj:
|
||||
settings[name] = obj
|
||||
|
||||
with open(prefpath, 'w') as f:
|
||||
json.dump(settings, f)
|
||||
logging.info(slogm('Set user ({}) property \'{}\' to {}'.format(self.username, name, obj)))
|
||||
|
||||
def get_home_page(self, hkcu=False):
|
||||
response = self.get_hklm_string_entry('HomepageLocation')
|
||||
result = 'about:blank'
|
||||
if response:
|
||||
result = response.data
|
||||
return result
|
||||
|
||||
def machine_apply(self):
|
||||
'''
|
||||
Apply machine settings.
|
||||
'''
|
||||
self.set_policy('HomepageLocation', self.get_home_page())
|
||||
|
||||
destfile = os.path.join(self.__managed_policies_path, 'policies.json')
|
||||
|
||||
try:
|
||||
recommended__json = self.policies_json.pop('Recommended')
|
||||
except:
|
||||
recommended__json = {}
|
||||
|
||||
#Replacing all nested dictionaries with a list
|
||||
dict_item_to_list = (
|
||||
lambda target_dict :
|
||||
{key:[*val.values()] if type(val) == dict else string_to_literal_eval(val) for key,val in target_dict.items()}
|
||||
)
|
||||
os.makedirs(self.__managed_policies_path, exist_ok=True)
|
||||
with open(destfile, 'w') as f:
|
||||
json.dump(dict_item_to_list(self.policies_json), f)
|
||||
logdata = dict()
|
||||
logdata['destfile'] = destfile
|
||||
log('D97', logdata)
|
||||
|
||||
destfilerec = os.path.join(self.__recommended_policies_path, 'policies.json')
|
||||
os.makedirs(self.__recommended_policies_path, exist_ok=True)
|
||||
with open(destfilerec, 'w') as f:
|
||||
json.dump(dict_item_to_list(recommended__json), f)
|
||||
logdata = dict()
|
||||
logdata['destfilerec'] = destfilerec
|
||||
log('D97', logdata)
|
||||
json.dump(self.policies, f)
|
||||
logging.debug(slogm('Wrote Chromium preferences to {}'.format(destfile)))
|
||||
|
||||
def user_apply(self):
|
||||
'''
|
||||
Apply settings for the specified username.
|
||||
'''
|
||||
self.set_user_policy('homepage', self.get_home_page(hkcu=True))
|
||||
|
||||
def apply(self):
|
||||
'''
|
||||
All actual job done here.
|
||||
'''
|
||||
if self.__module_enabled:
|
||||
log('D95')
|
||||
self.create_dict(self.chromium_keys)
|
||||
self.machine_apply()
|
||||
else:
|
||||
log('D96')
|
||||
|
||||
def get_valuename_typeint(self):
|
||||
'''
|
||||
List of keys resulting from parsing chrome.admx with parsing_chrom_admx_intvalues.py
|
||||
'''
|
||||
valuename_typeint = (['DefaultClipboardSetting',
|
||||
'DefaultCookiesSetting',
|
||||
'DefaultFileSystemReadGuardSetting',
|
||||
'DefaultFileSystemWriteGuardSetting',
|
||||
'DefaultGeolocationSetting',
|
||||
'DefaultImagesSetting',
|
||||
'DefaultInsecureContentSetting',
|
||||
'DefaultJavaScriptJitSetting',
|
||||
'DefaultJavaScriptSetting',
|
||||
'DefaultLocalFontsSetting',
|
||||
'DefaultNotificationsSetting',
|
||||
'DefaultPopupsSetting',
|
||||
'DefaultSensorsSetting',
|
||||
'DefaultSerialGuardSetting',
|
||||
'DefaultThirdPartyStoragePartitioningSetting',
|
||||
'DefaultWebBluetoothGuardSetting',
|
||||
'DefaultWebHidGuardSetting',
|
||||
'DefaultWebUsbGuardSetting',
|
||||
'DefaultWindowManagementSetting',
|
||||
'DefaultMediaStreamSetting',
|
||||
'DefaultWindowPlacementSetting',
|
||||
'ProxyServerMode',
|
||||
'ExtensionManifestV2Availability',
|
||||
'ExtensionUnpublishedAvailability',
|
||||
'BrowserSwitcherParsingMode',
|
||||
'CloudAPAuthEnabled',
|
||||
'AdsSettingForIntrusiveAdsSites',
|
||||
'AmbientAuthenticationInPrivateModesEnabled',
|
||||
'BatterySaverModeAvailability',
|
||||
'BrowserSignin',
|
||||
'ChromeVariations',
|
||||
'DeveloperToolsAvailability',
|
||||
'DownloadRestrictions',
|
||||
'ForceYouTubeRestrict',
|
||||
'HeadlessMode',
|
||||
'IncognitoModeAvailability',
|
||||
'IntranetRedirectBehavior',
|
||||
'NetworkPredictionOptions',
|
||||
'ProfilePickerOnStartupAvailability',
|
||||
'RelaunchNotification',
|
||||
'SafeSitesFilterBehavior',
|
||||
'UserAgentReduction',
|
||||
'BatterySaverModeAvailability_recommended',
|
||||
'DownloadRestrictions_recommended',
|
||||
'NetworkPredictionOptions_recommended',
|
||||
'PrintPostScriptMode',
|
||||
'PrintRasterizationMode',
|
||||
'ChromeFrameRendererSettings',
|
||||
'DefaultFileHandlingGuardSetting',
|
||||
'DefaultKeygenSetting',
|
||||
'DefaultPluginsSetting',
|
||||
'LegacySameSiteCookieBehaviorEnabled',
|
||||
'ForceMajorVersionToMinorPositionInUserAgent',
|
||||
'PasswordProtectionWarningTrigger',
|
||||
'SafeBrowsingProtectionLevel',
|
||||
'SafeBrowsingProtectionLevel_recommended',
|
||||
'RestoreOnStartup',
|
||||
'RestoreOnStartup_recommended'])
|
||||
return valuename_typeint
|
||||
|
||||
|
||||
def get_boolean(self,data):
|
||||
if data in ['0', 'false', None, 'none', 0]:
|
||||
return False
|
||||
if data in ['1', 'true', 1]:
|
||||
return True
|
||||
def get_parts(self, hivekeyname):
|
||||
'''
|
||||
Parse registry path string and leave key parameters
|
||||
'''
|
||||
parts = hivekeyname.replace(self.__registry_branch, '').split('/')
|
||||
return parts
|
||||
|
||||
|
||||
def create_dict(self, chromium_keys):
|
||||
'''
|
||||
Collect dictionaries from registry keys into a general dictionary
|
||||
'''
|
||||
counts = dict()
|
||||
#getting the list of keys to read as an integer
|
||||
valuename_typeint = self.get_valuename_typeint()
|
||||
for it_data in chromium_keys:
|
||||
branch = counts
|
||||
try:
|
||||
if type(it_data.data) is bytes:
|
||||
it_data.data = it_data.data.decode(encoding='utf-16').replace('\x00','')
|
||||
parts = self.get_parts(it_data.hive_key)
|
||||
#creating a nested dictionary from elements
|
||||
for part in parts[:-1]:
|
||||
branch = branch.setdefault(part, {})
|
||||
#dictionary key value initialization
|
||||
if it_data.type == 4:
|
||||
if it_data.valuename in valuename_typeint:
|
||||
branch[parts[-1]] = int(it_data.data)
|
||||
else:
|
||||
branch[parts[-1]] = self.get_boolean(it_data.data)
|
||||
else:
|
||||
if it_data.data[0] == '[' and it_data.data[-1] == ']':
|
||||
try:
|
||||
branch[parts[-1]] = json.loads(str(it_data.data))
|
||||
except:
|
||||
branch[parts[-1]] = str(it_data.data).replace('\\', '/')
|
||||
else:
|
||||
branch[parts[-1]] = str(it_data.data).replace('\\', '/')
|
||||
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['Exception'] = exc
|
||||
logdata['keyname'] = it_data.keyname
|
||||
log('D178', logdata)
|
||||
try:
|
||||
self.policies_json = counts['']
|
||||
except:
|
||||
self.policies_json = {}
|
||||
self.machine_apply()
|
||||
#if not self._is_machine_name:
|
||||
# logging.debug('Running user applier for Chromium')
|
||||
# self.user_apply()
|
||||
|
@ -1,357 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2022 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import jinja2
|
||||
import os
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
import string
|
||||
|
||||
from .applier_frontend import (
|
||||
applier_frontend
|
||||
, check_enabled
|
||||
)
|
||||
from util.util import get_homedir
|
||||
from util.logging import log
|
||||
|
||||
def storage_get_drives(storage, sid):
|
||||
drives = storage.get_drives(sid)
|
||||
drive_list = list()
|
||||
|
||||
for drv_obj in drives:
|
||||
drive_list.append(drv_obj)
|
||||
|
||||
return drive_list
|
||||
|
||||
|
||||
def add_line_if_missing(filename, ins_line):
|
||||
with open(filename, 'r+') as f:
|
||||
for line in f:
|
||||
if ins_line == line.strip():
|
||||
break
|
||||
else:
|
||||
f.write(ins_line + '\n')
|
||||
f.flush()
|
||||
|
||||
def remove_chars_before_colon(input_string):
|
||||
if ":" in input_string:
|
||||
colon_index = input_string.index(":")
|
||||
result_string = input_string[colon_index + 1:]
|
||||
return result_string
|
||||
else:
|
||||
return input_string
|
||||
|
||||
def remove_escaped_quotes(input_string):
|
||||
result_string = input_string.replace('"', '').replace("'", '')
|
||||
return result_string
|
||||
|
||||
|
||||
class Drive_list:
|
||||
__alphabet = string.ascii_uppercase
|
||||
def __init__(self):
|
||||
self.dict_drives = dict()
|
||||
|
||||
def __get_letter(self, letter):
|
||||
slice_letters = set(self.__alphabet[self.__alphabet.find(letter) + 1:]) - set(self.dict_drives.keys())
|
||||
free_letters = sorted(slice_letters)
|
||||
if free_letters:
|
||||
return free_letters[0]
|
||||
else:
|
||||
return None
|
||||
|
||||
def append(self, drive:dict):
|
||||
cur_dir = drive['dir']
|
||||
if cur_dir not in set(self.dict_drives.keys()):
|
||||
if drive['action'] == 'D':
|
||||
return
|
||||
self.dict_drives[cur_dir] = drive
|
||||
return
|
||||
|
||||
else:
|
||||
if drive['action'] == 'C':
|
||||
if drive['useLetter'] == '1':
|
||||
return
|
||||
else:
|
||||
new_dir = self.__get_letter(cur_dir)
|
||||
if not new_dir:
|
||||
return
|
||||
drive['dir'] = new_dir
|
||||
self.dict_drives[new_dir] = drive
|
||||
return
|
||||
|
||||
if drive['action'] == 'U':
|
||||
self.dict_drives[cur_dir]['thisDrive'] = drive['thisDrive']
|
||||
self.dict_drives[cur_dir]['allDrives'] = drive['allDrives']
|
||||
self.dict_drives[cur_dir]['label'] = drive['label']
|
||||
self.dict_drives[cur_dir]['persistent'] = drive['persistent']
|
||||
self.dict_drives[cur_dir]['useLetter'] = drive['useLetter']
|
||||
return
|
||||
|
||||
if drive['action'] == 'R':
|
||||
self.dict_drives[cur_dir] = drive
|
||||
return
|
||||
if drive['action'] == 'D':
|
||||
if drive['useLetter'] == '1':
|
||||
self.dict_drives.pop(cur_dir, None)
|
||||
else:
|
||||
keys_set = set(self.dict_drives.keys())
|
||||
slice_letters = set(self.__alphabet[self.__alphabet.find(cur_dir):])
|
||||
for letter_dir in (keys_set & slice_letters):
|
||||
self.dict_drives.pop(letter_dir, None)
|
||||
|
||||
def __call__(self):
|
||||
return list(self.dict_drives.values())
|
||||
|
||||
def len(self):
|
||||
return len(self.dict_drives)
|
||||
|
||||
class cifs_applier(applier_frontend):
|
||||
__module_name = 'CIFSApplier'
|
||||
__module_enabled = True
|
||||
__module_experimental = False
|
||||
|
||||
def __init__(self, storage, sid):
|
||||
self.applier_cifs = cifs_applier_user(storage, sid, None)
|
||||
self.__module_enabled = check_enabled(
|
||||
storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
|
||||
def apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D179')
|
||||
self.applier_cifs._admin_context_apply()
|
||||
else:
|
||||
log('D180')
|
||||
|
||||
class cifs_applier_user(applier_frontend):
|
||||
__module_name = 'CIFSApplierUser'
|
||||
__module_enabled = True
|
||||
__module_experimental = False
|
||||
__auto_file = '/etc/auto.master'
|
||||
__auto_dir = '/etc/auto.master.gpupdate.d'
|
||||
__template_path = '/usr/share/gpupdate/templates'
|
||||
__template_mountpoints = 'autofs_mountpoints.j2'
|
||||
__template_identity = 'autofs_identity.j2'
|
||||
__template_auto = 'autofs_auto.j2'
|
||||
__template_mountpoints_hide = 'autofs_mountpoints_hide.j2'
|
||||
__template_auto_hide = 'autofs_auto_hide.j2'
|
||||
__enable_home_link = 'Software\\BaseALT\\Policies\\GPUpdate\\DriveMapsHome'
|
||||
__enable_home_link_user = 'Software\\BaseALT\\Policies\\GPUpdate\\DriveMapsHomeUser'
|
||||
__target_mountpoint = '/media/gpupdate'
|
||||
__target_mountpoint_user = '/run/media'
|
||||
__mountpoint_dirname = 'drives.system'
|
||||
__mountpoint_dirname_user = 'drives'
|
||||
|
||||
def __init__(self, storage, sid, username):
|
||||
self.storage = storage
|
||||
self.sid = sid
|
||||
self.username = username
|
||||
self.state_home_link = False
|
||||
self.state_home_link_user = False
|
||||
|
||||
if username:
|
||||
self.home = self.__target_mountpoint_user + '/' + username
|
||||
self.state_home_link = self.check_enable_home_link(self.__enable_home_link)
|
||||
self.state_home_link_user = self.check_enable_home_link(self.__enable_home_link_user)
|
||||
else:
|
||||
self.home = self.__target_mountpoint
|
||||
|
||||
conf_file = '{}.conf'.format(sid)
|
||||
conf_hide_file = '{}_hide.conf'.format(sid)
|
||||
autofs_file = '{}.autofs'.format(sid)
|
||||
autofs_hide_file = '{}_hide.autofs'.format(sid)
|
||||
cred_file = '{}.creds'.format(sid)
|
||||
|
||||
self.auto_master_d = Path(self.__auto_dir)
|
||||
|
||||
self.user_config = self.auto_master_d / conf_file
|
||||
self.user_config_hide = self.auto_master_d / conf_hide_file
|
||||
if os.path.exists(self.user_config.resolve()):
|
||||
self.user_config.unlink()
|
||||
if os.path.exists(self.user_config_hide.resolve()):
|
||||
self.user_config_hide.unlink()
|
||||
self.user_autofs = self.auto_master_d / autofs_file
|
||||
self.user_autofs_hide = self.auto_master_d / autofs_hide_file
|
||||
if os.path.exists(self.user_autofs.resolve()):
|
||||
self.user_autofs.unlink()
|
||||
if os.path.exists(self.user_autofs_hide.resolve()):
|
||||
self.user_autofs_hide.unlink()
|
||||
self.user_creds = self.auto_master_d / cred_file
|
||||
|
||||
if username:
|
||||
self.mntTarget = self.__mountpoint_dirname_user
|
||||
else:
|
||||
self.mntTarget = self.__mountpoint_dirname
|
||||
|
||||
self.mount_dir = Path(os.path.join(self.home))
|
||||
self.drives = storage_get_drives(self.storage, self.sid)
|
||||
|
||||
self.template_loader = jinja2.FileSystemLoader(searchpath=self.__template_path)
|
||||
self.template_env = jinja2.Environment(loader=self.template_loader)
|
||||
|
||||
self.template_mountpoints = self.template_env.get_template(self.__template_mountpoints)
|
||||
self.template_indentity = self.template_env.get_template(self.__template_identity)
|
||||
self.template_auto = self.template_env.get_template(self.__template_auto)
|
||||
|
||||
self.template_mountpoints_hide = self.template_env.get_template(self.__template_mountpoints_hide)
|
||||
self.template_auto_hide = self.template_env.get_template(self.__template_auto_hide)
|
||||
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
|
||||
def check_enable_home_link(self, enable_home_link):
|
||||
if self.storage.get_hkcu_entry(self.sid, enable_home_link):
|
||||
data = self.storage.get_hkcu_entry(self.sid, enable_home_link).data
|
||||
return bool(int(data)) if data else None
|
||||
else:
|
||||
return False
|
||||
|
||||
def user_context_apply(self):
|
||||
'''
|
||||
Nothing to implement.
|
||||
'''
|
||||
pass
|
||||
|
||||
def _admin_context_apply(self):
|
||||
# Create /etc/auto.master.gpupdate.d directory
|
||||
self.auto_master_d.mkdir(parents=True, exist_ok=True)
|
||||
# Create user's destination mount directory
|
||||
self.mount_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Add pointer to /etc/auto.master.gpiupdate.d in /etc/auto.master
|
||||
auto_destdir = '+dir:{}'.format(self.__auto_dir)
|
||||
add_line_if_missing(self.__auto_file, auto_destdir)
|
||||
|
||||
# Collect data for drive settings
|
||||
drive_list = Drive_list()
|
||||
for drv in self.drives:
|
||||
drive_settings = dict()
|
||||
drive_settings['dir'] = drv.dir
|
||||
drive_settings['login'] = drv.login
|
||||
drive_settings['password'] = drv.password
|
||||
drive_settings['path'] = remove_chars_before_colon(drv.path.replace('\\', '/'))
|
||||
drive_settings['action'] = drv.action
|
||||
drive_settings['thisDrive'] = drv.thisDrive
|
||||
drive_settings['allDrives'] = drv.allDrives
|
||||
drive_settings['label'] = remove_escaped_quotes(drv.label)
|
||||
drive_settings['persistent'] = drv.persistent
|
||||
drive_settings['useLetter'] = drv.useLetter
|
||||
|
||||
drive_list.append(drive_settings)
|
||||
|
||||
if drive_list.len() > 0:
|
||||
mount_settings = dict()
|
||||
mount_settings['drives'] = drive_list()
|
||||
mount_text = self.template_mountpoints.render(**mount_settings)
|
||||
|
||||
mount_text_hide = self.template_mountpoints_hide.render(**mount_settings)
|
||||
|
||||
with open(self.user_config.resolve(), 'w') as f:
|
||||
f.truncate()
|
||||
f.write(mount_text)
|
||||
f.flush()
|
||||
|
||||
with open(self.user_config_hide.resolve(), 'w') as f:
|
||||
f.truncate()
|
||||
f.write(mount_text_hide)
|
||||
f.flush()
|
||||
|
||||
autofs_settings = dict()
|
||||
autofs_settings['home_dir'] = self.home
|
||||
autofs_settings['mntTarget'] = self.mntTarget
|
||||
autofs_settings['mount_file'] = self.user_config.resolve()
|
||||
autofs_text = self.template_auto.render(**autofs_settings)
|
||||
|
||||
with open(self.user_autofs.resolve(), 'w') as f:
|
||||
f.truncate()
|
||||
f.write(autofs_text)
|
||||
f.flush()
|
||||
|
||||
autofs_settings['mount_file'] = self.user_config_hide.resolve()
|
||||
autofs_text = self.template_auto_hide.render(**autofs_settings)
|
||||
with open(self.user_autofs_hide.resolve(), 'w') as f:
|
||||
f.truncate()
|
||||
f.write(autofs_text)
|
||||
f.flush()
|
||||
|
||||
if self.username:
|
||||
self.update_drivemaps_home_links()
|
||||
|
||||
subprocess.check_call(['/bin/systemctl', 'restart', 'autofs'])
|
||||
|
||||
def update_drivemaps_home_links(self):
|
||||
dUser = Path(get_homedir(self.username)+'/net.' + self.__mountpoint_dirname_user)
|
||||
dUserHide = Path(get_homedir(self.username)+'/.net.' + self.__mountpoint_dirname_user)
|
||||
|
||||
if self.state_home_link_user:
|
||||
dUserMountpoint = Path(self.home).joinpath(self.__mountpoint_dirname_user)
|
||||
dUserMountpointHide = Path(self.home).joinpath('.' + self.__mountpoint_dirname_user)
|
||||
|
||||
if not dUser.exists():
|
||||
try:
|
||||
os.symlink(dUserMountpoint, dUser, True)
|
||||
except Exception as exc:
|
||||
log('D194', {'exc': exc})
|
||||
|
||||
if not dUserHide.exists():
|
||||
try:
|
||||
os.symlink(dUserMountpointHide, dUserHide, True)
|
||||
except Exception as exc:
|
||||
log('D196', {'exc': exc})
|
||||
else:
|
||||
if dUser.is_symlink() and dUser.owner() == 'root':
|
||||
dUser.unlink()
|
||||
if dUserHide.is_symlink() and dUserHide.owner() == 'root':
|
||||
dUserHide.unlink()
|
||||
|
||||
dMachine = Path(get_homedir(self.username)+'/net.' + self.__mountpoint_dirname)
|
||||
dMachineHide = Path(get_homedir(self.username)+'/.net.' + self.__mountpoint_dirname)
|
||||
|
||||
if self.state_home_link:
|
||||
dMachineMountpoint = Path(self.__target_mountpoint).joinpath(self.__mountpoint_dirname)
|
||||
dMachineMountpointHide = Path(self.__target_mountpoint).joinpath('.' + self.__mountpoint_dirname)
|
||||
|
||||
if not dMachine.exists():
|
||||
try:
|
||||
os.symlink(dMachineMountpoint, dMachine, True)
|
||||
except Exception as exc:
|
||||
log('D195', {'exc': exc})
|
||||
|
||||
if not dMachineHide.exists():
|
||||
try:
|
||||
os.symlink(dMachineMountpointHide, dMachineHide, True)
|
||||
except Exception as exc:
|
||||
log('D197', {'exc': exc})
|
||||
else:
|
||||
if dMachine.is_symlink() and dMachine.owner() == 'root':
|
||||
dMachine.unlink()
|
||||
if dMachineHide.is_symlink() and dMachineHide.owner() == 'root':
|
||||
dMachineHide.unlink()
|
||||
|
||||
def admin_context_apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D146')
|
||||
self._admin_context_apply()
|
||||
else:
|
||||
log('D147')
|
||||
|
@ -16,68 +16,36 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from .applier_frontend import (
|
||||
applier_frontend
|
||||
, check_enabled
|
||||
)
|
||||
from .applier_frontend import applier_frontend
|
||||
from .appliers.control import control
|
||||
from util.logging import slogm, log
|
||||
from util.logging import slogm
|
||||
|
||||
import logging
|
||||
|
||||
class control_applier(applier_frontend):
|
||||
__module_name = 'ControlApplier'
|
||||
__module_experimental = False
|
||||
__module_enabled = True
|
||||
_registry_branch = 'Software/BaseALT/Policies/Control'
|
||||
_registry_branch = 'Software\\BaseALT\\Policies\\Control'
|
||||
|
||||
def __init__(self, storage):
|
||||
self.storage = storage
|
||||
self.control_settings = self.storage.filter_hklm_entries(self._registry_branch)
|
||||
self.control_settings = self.storage.filter_hklm_entries('Software\\BaseALT\\Policies\\Control%')
|
||||
self.controls = list()
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
|
||||
def run(self):
|
||||
for setting in self.control_settings:
|
||||
valuename = setting.hive_key.rpartition('/')[2]
|
||||
try:
|
||||
self.controls.append(control(valuename, int(setting.data)))
|
||||
logdata = dict()
|
||||
logdata['control'] = valuename
|
||||
logdata['value'] = setting.data
|
||||
log('I3', logdata)
|
||||
except ValueError as exc:
|
||||
try:
|
||||
ctl = control(valuename, setting.data)
|
||||
except Exception as exc:
|
||||
logdata = {'Exception': exc}
|
||||
log('I3', logdata)
|
||||
continue
|
||||
self.controls.append(ctl)
|
||||
logdata = dict()
|
||||
logdata['control'] = valuename
|
||||
logdata['with string value'] = setting.data
|
||||
log('I3', logdata)
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['control'] = valuename
|
||||
logdata['exc'] = exc
|
||||
log('E39', logdata)
|
||||
#for e in polfile.pol_file.entries:
|
||||
# print('{}:{}:{}:{}:{}'.format(e.type, e.data, e.valuename, e.keyname))
|
||||
for cont in self.controls:
|
||||
cont.set_control_status()
|
||||
|
||||
def apply(self):
|
||||
'''
|
||||
Trigger control facility invocation.
|
||||
'''
|
||||
if self.__module_enabled:
|
||||
log('D67')
|
||||
self.run()
|
||||
else:
|
||||
log('E40')
|
||||
for setting in self.control_settings:
|
||||
valuename = setting.hive_key.rpartition('\\')[2]
|
||||
try:
|
||||
self.controls.append(control(valuename, int(setting.data)))
|
||||
logging.info(slogm('Working with control {}'.format(valuename)))
|
||||
except ValueError as exc:
|
||||
self.controls.append(control(valuename, setting.data))
|
||||
logging.info(slogm('Working with control {} with string value'.format(valuename)))
|
||||
except Exception as exc:
|
||||
logging.info(slogm('Unable to work with control {}: {}'.format(valuename, exc)))
|
||||
#for e in polfile.pol_file.entries:
|
||||
# print('{}:{}:{}:{}:{}'.format(e.type, e.data, e.valuename, e.keyname))
|
||||
for cont in self.controls:
|
||||
cont.set_control_status()
|
||||
|
||||
|
@ -18,17 +18,11 @@
|
||||
|
||||
import logging
|
||||
import os
|
||||
import json
|
||||
|
||||
import cups
|
||||
|
||||
from .applier_frontend import (
|
||||
applier_frontend
|
||||
, check_enabled
|
||||
)
|
||||
from .applier_frontend import applier_frontend
|
||||
from gpt.printers import json2printer
|
||||
from util.rpm import is_rpm_installed
|
||||
from util.logging import slogm, log
|
||||
from util.logging import slogm
|
||||
|
||||
def storage_get_printers(storage, sid):
|
||||
'''
|
||||
@ -38,85 +32,42 @@ def storage_get_printers(storage, sid):
|
||||
printers = list()
|
||||
|
||||
for prnj in printer_objs:
|
||||
printers.append(prnj)
|
||||
prn_obj = json2printer(prnj)
|
||||
printers.append(prn_obj)
|
||||
|
||||
return printers
|
||||
|
||||
def connect_printer(connection, prn):
|
||||
def write_printer(prn):
|
||||
'''
|
||||
Dump printer cinfiguration to disk as CUPS config
|
||||
'''
|
||||
# PPD file location
|
||||
printer_driver = 'generic'
|
||||
pjson = json.loads(prn.printer)
|
||||
printer_parts = pjson['printer']['path'].partition(' ')
|
||||
# Printer queue name in CUPS
|
||||
printer_name = printer_parts[2].replace('(', '').replace(')', '')
|
||||
# Printer description in CUPS
|
||||
printer_info = printer_name
|
||||
printer_uri = printer_parts[0].replace('\\', '/')
|
||||
printer_uri = 'smb:' + printer_uri
|
||||
|
||||
connection.addPrinter(
|
||||
name=printer_name
|
||||
, info=printer_info
|
||||
, device=printer_uri
|
||||
#filename=printer_driver
|
||||
)
|
||||
printer_config_path = os.path.join('/etc/cups', prn.name)
|
||||
with open(printer_config_path, 'r') as f:
|
||||
print(prn.cups_config(), file=f)
|
||||
|
||||
class cups_applier(applier_frontend):
|
||||
__module_name = 'CUPSApplier'
|
||||
__module_experimental = True
|
||||
__module_enabled = False
|
||||
|
||||
def __init__(self, storage):
|
||||
self.storage = storage
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
|
||||
def run(self):
|
||||
if not is_rpm_installed('cups'):
|
||||
log('W9')
|
||||
return
|
||||
try:
|
||||
self.cups_connection = cups.Connection()
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['exc', exc]
|
||||
log('W20', logdata)
|
||||
self.printers = storage_get_printers(self.storage, self.storage.get_info('machine_sid'))
|
||||
|
||||
if self.printers:
|
||||
for prn in self.printers:
|
||||
connect_printer(self.cups_connection, prn)
|
||||
|
||||
def apply(self):
|
||||
'''
|
||||
Perform configuration of printer which is assigned to computer.
|
||||
'''
|
||||
if self.__module_enabled:
|
||||
log('D113')
|
||||
self.run()
|
||||
else:
|
||||
log('D114')
|
||||
if not is_rpm_installed('cups'):
|
||||
logging.warning(slogm('CUPS is not installed: no printer settings will be deployed'))
|
||||
return
|
||||
|
||||
printers = storage_get_printers(self.storage, self.storage.get_info('machine_sid'))
|
||||
|
||||
if printers:
|
||||
for prn in printers:
|
||||
write_printer(prn)
|
||||
|
||||
class cups_applier_user(applier_frontend):
|
||||
__module_name = 'CUPSApplierUser'
|
||||
__module_experimental = True
|
||||
__module_enabled = False
|
||||
|
||||
def __init__(self, storage, sid, username):
|
||||
self.storage = storage
|
||||
self.sid = sid
|
||||
self.username = username
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage
|
||||
, self.__module_name
|
||||
, self.__module_enabled
|
||||
)
|
||||
|
||||
def user_context_apply(self):
|
||||
'''
|
||||
@ -125,25 +76,17 @@ class cups_applier_user(applier_frontend):
|
||||
'''
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
if not is_rpm_installed('cups'):
|
||||
log('W9')
|
||||
return
|
||||
|
||||
self.cups_connection = cups.Connection()
|
||||
self.printers = storage_get_printers(self.storage, self.sid)
|
||||
|
||||
if self.printers:
|
||||
for prn in self.printers:
|
||||
connect_printer(self.cups_connection, prn)
|
||||
|
||||
def admin_context_apply(self):
|
||||
'''
|
||||
Perform printer configuration assigned for user.
|
||||
'''
|
||||
if self.__module_enabled:
|
||||
log('D115')
|
||||
self.run()
|
||||
else:
|
||||
log('D116')
|
||||
if not is_rpm_installed('cups'):
|
||||
logging.warning(slogm('CUPS is not installed: no printer settings will be deployed'))
|
||||
return
|
||||
|
||||
printers = storage_get_printers(self.storage, self.sid)
|
||||
|
||||
if printers:
|
||||
for prn in printers:
|
||||
write_printer(prn)
|
||||
|
||||
|
@ -1,69 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from .applier_frontend import (
|
||||
applier_frontend
|
||||
, check_enabled
|
||||
)
|
||||
from .appliers.envvar import Envvar
|
||||
from util.logging import slogm, log
|
||||
|
||||
import logging
|
||||
|
||||
class envvar_applier(applier_frontend):
|
||||
__module_name = 'EnvvarsApplier'
|
||||
__module_experimental = False
|
||||
__module_enabled = True
|
||||
|
||||
def __init__(self, storage, sid):
|
||||
self.storage = storage
|
||||
self.sid = sid
|
||||
self.envvars = self.storage.get_envvars(self.sid)
|
||||
#self.__module_enabled = check_enabled(self.storage, self.__module_name, self.__module_enabled)
|
||||
|
||||
def apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D134')
|
||||
ev = Envvar(self.envvars, 'root')
|
||||
ev.act()
|
||||
else:
|
||||
log('D135')
|
||||
|
||||
class envvar_applier_user(applier_frontend):
|
||||
__module_name = 'EnvvarsApplierUser'
|
||||
__module_experimental = False
|
||||
__module_enabled = True
|
||||
|
||||
def __init__(self, storage, sid, username):
|
||||
self.storage = storage
|
||||
self.sid = sid
|
||||
self.username = username
|
||||
self.envvars = self.storage.get_envvars(self.sid)
|
||||
#self.__module_enabled = check_enabled(self.storage, self.__module_name, self.__module_experimental)
|
||||
|
||||
def admin_context_apply(self):
|
||||
pass
|
||||
|
||||
def user_context_apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D136')
|
||||
ev = Envvar(self.envvars, self.username)
|
||||
ev.act()
|
||||
else:
|
||||
log('D137')
|
||||
|
@ -1,83 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2022 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
from .appliers.file_cp import Files_cp, Execution_check
|
||||
from .applier_frontend import (
|
||||
applier_frontend
|
||||
, check_enabled
|
||||
)
|
||||
from util.logging import log
|
||||
|
||||
|
||||
|
||||
class file_applier(applier_frontend):
|
||||
__module_name = 'FilesApplier'
|
||||
__module_experimental = True
|
||||
__module_enabled = False
|
||||
|
||||
def __init__(self, storage, file_cache, sid):
|
||||
self.storage = storage
|
||||
self.exe_check = Execution_check(storage)
|
||||
self.sid = sid
|
||||
self.file_cache = file_cache
|
||||
self.files = self.storage.get_files(self.sid)
|
||||
self.__module_enabled = check_enabled(self.storage, self.__module_name, self.__module_experimental)
|
||||
|
||||
def run(self):
|
||||
for file in self.files:
|
||||
Files_cp(file, self.file_cache, self.exe_check)
|
||||
|
||||
def apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D167')
|
||||
self.run()
|
||||
else:
|
||||
log('D168')
|
||||
|
||||
class file_applier_user(applier_frontend):
|
||||
__module_name = 'FilesApplierUser'
|
||||
__module_experimental = True
|
||||
__module_enabled = False
|
||||
|
||||
def __init__(self, storage, file_cache, sid, username):
|
||||
self.storage = storage
|
||||
self.file_cache = file_cache
|
||||
self.sid = sid
|
||||
self.username = username
|
||||
self.exe_check = Execution_check(storage)
|
||||
self.files = self.storage.get_files(self.sid)
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
|
||||
def run(self):
|
||||
for file in self.files:
|
||||
Files_cp(file, self.file_cache, self.exe_check, self.username)
|
||||
|
||||
def admin_context_apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D169')
|
||||
self.run()
|
||||
else:
|
||||
log('D170')
|
||||
|
||||
def user_context_apply(self):
|
||||
pass
|
@ -25,23 +25,19 @@
|
||||
# This thing must work with keys and subkeys located at:
|
||||
# Software\Policies\Mozilla\Firefox
|
||||
|
||||
import logging
|
||||
import json
|
||||
import os
|
||||
import configparser
|
||||
|
||||
from .applier_frontend import (
|
||||
applier_frontend
|
||||
, check_enabled
|
||||
)
|
||||
from util.logging import log
|
||||
from util.util import is_machine_name, try_dict_to_literal_eval
|
||||
from .applier_frontend import applier_frontend
|
||||
from util.logging import slogm
|
||||
from util.util import is_machine_name
|
||||
|
||||
class firefox_applier(applier_frontend):
|
||||
__module_name = 'FirefoxApplier'
|
||||
__module_experimental = False
|
||||
__module_enabled = True
|
||||
__registry_branch = 'Software/Policies/Mozilla/Firefox'
|
||||
__firefox_installdir1 = '/usr/lib64/firefox/distribution'
|
||||
__firefox_installdir2 = '/etc/firefox/policies'
|
||||
__registry_branch = 'Software\\Policies\\Mozilla\\Firefox'
|
||||
__firefox_installdir = '/usr/lib64/firefox/distribution'
|
||||
__user_settings_dir = '.mozilla/firefox'
|
||||
|
||||
def __init__(self, storage, sid, username):
|
||||
self.storage = storage
|
||||
@ -50,139 +46,99 @@ class firefox_applier(applier_frontend):
|
||||
self._is_machine_name = is_machine_name(self.username)
|
||||
self.policies = dict()
|
||||
self.policies_json = dict({ 'policies': self.policies })
|
||||
firefox_filter = '{}%'.format(self.__registry_branch)
|
||||
self.firefox_keys = self.storage.filter_hklm_entries(firefox_filter)
|
||||
self.policies_gen = dict()
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
|
||||
def get_boolean(self,data):
|
||||
if data in ['0', 'false', None, 'none', 0]:
|
||||
return False
|
||||
if data in ['1', 'true', 1]:
|
||||
def get_profiles(self):
|
||||
'''
|
||||
Get directory names of Firefox profiles for specified username.
|
||||
'''
|
||||
profiles_ini = os.path.join(util.get_homedir(self.username), self.__user_settings_dir, 'profiles.ini')
|
||||
config = configparser.ConfigParser()
|
||||
config.read(profiles_ini)
|
||||
|
||||
profile_paths = list()
|
||||
for section in config.keys():
|
||||
if section.startswith('Profile'):
|
||||
profile_paths.append(config[section]['Path'])
|
||||
|
||||
return profile_paths
|
||||
|
||||
def get_hklm_string_entry(self, hive_subkey):
|
||||
'''
|
||||
Get HKEY_LOCAL_MACHINE hive subkey of
|
||||
'Software\Policies\Mozilla\Firefox'.
|
||||
'''
|
||||
query_str = '{}\\{}'.format(self.__registry_branch, hive_subkey)
|
||||
return self.storage.get_hklm_entry(query_str)
|
||||
|
||||
def get_hklm_string_entry_default(self, hive_subkey, default):
|
||||
'''
|
||||
Get Firefox's subkey or return the default value.
|
||||
'''
|
||||
defval = str(default)
|
||||
response = self.get_hklm_string_entry(hive_subkey)
|
||||
if response:
|
||||
return response.data
|
||||
return defval
|
||||
|
||||
def set_policy(self, name, obj):
|
||||
'''
|
||||
Add entry to policy set.
|
||||
'''
|
||||
if obj:
|
||||
self.policies[name] = obj
|
||||
logging.info(slogm('Firefox policy \'{}\' set to {}'.format(name, obj)))
|
||||
|
||||
def get_home_page(self):
|
||||
'''
|
||||
Query the Homepage property from the storage.
|
||||
'''
|
||||
homepage = dict({
|
||||
'URL': 'about:blank',
|
||||
'Locked': False,
|
||||
'StartPage': 'homepage'
|
||||
})
|
||||
response = self.get_hklm_string_entry('Homepage\\URL')
|
||||
if response:
|
||||
homepage['URL'] = response.data
|
||||
return homepage
|
||||
return None
|
||||
|
||||
def get_block_about_config(self):
|
||||
'''
|
||||
Query BlockAboutConfig boolean property from the storage.
|
||||
'''
|
||||
response = self.get_hklm_string_entry('BlockAboutConfig')
|
||||
if response:
|
||||
if response.data.lower() in ['0', 'false', False, None, 'None']:
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_parts(self, hivekeyname):
|
||||
'''
|
||||
Parse registry path string and leave key parameters
|
||||
'''
|
||||
parts = hivekeyname.replace(self.__registry_branch, '').split('/')
|
||||
return parts
|
||||
|
||||
def create_dict(self, firefox_keys):
|
||||
'''
|
||||
Collect dictionaries from registry keys into a general dictionary
|
||||
'''
|
||||
excp = ['SOCKSVersion']
|
||||
counts = dict()
|
||||
for it_data in firefox_keys:
|
||||
branch = counts
|
||||
try:
|
||||
if type(it_data.data) is bytes:
|
||||
it_data.data = it_data.data.decode(encoding='utf-16').replace('\x00','')
|
||||
json_data = try_dict_to_literal_eval(it_data.data)
|
||||
if json_data:
|
||||
it_data.data = json_data
|
||||
it_data.type = 7
|
||||
else:
|
||||
if it_data.type == 1:
|
||||
it_data.data = clean_data_firefox(it_data.data)
|
||||
#Cases when it is necessary to create nested dictionaries
|
||||
if it_data.valuename != it_data.data:
|
||||
parts = self.get_parts(it_data.hive_key)
|
||||
#creating a nested dictionary from elements
|
||||
for part in parts[:-1]:
|
||||
branch = branch.setdefault(part, {})
|
||||
#dictionary key value initialization
|
||||
if it_data.type == 4:
|
||||
if it_data.valuename in excp:
|
||||
branch[parts[-1]] = int(it_data.data)
|
||||
else:
|
||||
branch[parts[-1]] = self.get_boolean(it_data.data)
|
||||
elif it_data.type == 7:
|
||||
branch[parts[-1]] = it_data.data
|
||||
else:
|
||||
branch[parts[-1]] = str(it_data.data).replace('\\', '/')
|
||||
#Cases when it is necessary to create lists in a dictionary
|
||||
else:
|
||||
parts = self.get_parts(it_data.keyname)
|
||||
for part in parts[:-1]:
|
||||
branch = branch.setdefault(part, {})
|
||||
if branch.get(parts[-1]) is None:
|
||||
branch[parts[-1]] = list()
|
||||
if it_data.type == 4:
|
||||
branch[parts[-1]].append(self.get_boolean(it_data.data))
|
||||
else:
|
||||
if os.path.isdir(str(it_data.data).replace('\\', '/')):
|
||||
branch[parts[-1]].append(str(it_data.data).replace('\\', '/'))
|
||||
else:
|
||||
branch[parts[-1]].append(str(it_data.data))
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['Exception'] = exc
|
||||
logdata['keyname'] = it_data.keyname
|
||||
log('W14', logdata)
|
||||
|
||||
self.policies_json = {'policies': dict_item_to_list(counts)}
|
||||
return None
|
||||
|
||||
def machine_apply(self):
|
||||
'''
|
||||
Write policies.json to Firefox installdir.
|
||||
'''
|
||||
self.create_dict(self.firefox_keys)
|
||||
destfile = os.path.join(self.__firefox_installdir1, 'policies.json')
|
||||
self.set_policy('Homepage', self.get_home_page())
|
||||
self.set_policy('BlockAboutConfig', self.get_block_about_config())
|
||||
|
||||
os.makedirs(self.__firefox_installdir1, exist_ok=True)
|
||||
destfile = os.path.join(self.__firefox_installdir, 'policies.json')
|
||||
|
||||
os.makedirs(self.__firefox_installdir, exist_ok=True)
|
||||
with open(destfile, 'w') as f:
|
||||
json.dump(self.policies_json, f)
|
||||
logdata = dict()
|
||||
logdata['destfile'] = destfile
|
||||
log('D91', logdata)
|
||||
logging.debug(slogm('Wrote Firefox preferences to {}'.format(destfile)))
|
||||
|
||||
destfile = os.path.join(self.__firefox_installdir2, 'policies.json')
|
||||
os.makedirs(self.__firefox_installdir2, exist_ok=True)
|
||||
with open(destfile, 'w') as f:
|
||||
json.dump(self.policies_json, f)
|
||||
logdata = dict()
|
||||
logdata['destfile'] = destfile
|
||||
log('D91', logdata)
|
||||
def user_apply(self):
|
||||
profiles = self.get_profiles()
|
||||
|
||||
profiledir = os.path.join(util.get_homedir(self.username), self.__user_settings_dir)
|
||||
for profile in profiles:
|
||||
logging.debug(slogm('Found Firefox profile in {}/{}'.format(profiledir, profile)))
|
||||
|
||||
def apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D93')
|
||||
self.machine_apply()
|
||||
else:
|
||||
log('D94')
|
||||
self.machine_apply()
|
||||
#if not self._is_machine_name:
|
||||
# logging.debug('Running user applier for Firefox')
|
||||
# self.user_apply()
|
||||
|
||||
def key_dict_is_digit(dictionary:dict) -> bool:
|
||||
'''
|
||||
Checking if a dictionary key is a digit
|
||||
'''
|
||||
if not isinstance(dictionary, dict):
|
||||
return False
|
||||
for dig in dictionary.keys():
|
||||
if dig.isdigit():
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def dict_item_to_list(dictionary:dict) -> dict:
|
||||
'''
|
||||
Replacing dictionaries with numeric keys with a List
|
||||
'''
|
||||
if '' in dictionary:
|
||||
dictionary = dictionary.pop('')
|
||||
|
||||
for key,val in dictionary.items():
|
||||
if type(val) == dict:
|
||||
if key_dict_is_digit(val):
|
||||
dictionary[key] = [*val.values()]
|
||||
else:
|
||||
dict_item_to_list(dictionary[key])
|
||||
return dictionary
|
||||
|
||||
def clean_data_firefox(data):
|
||||
return data.replace("'", '\"')
|
||||
|
@ -1,65 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
import logging
|
||||
import subprocess
|
||||
|
||||
from util.logging import slogm, log
|
||||
from .applier_frontend import (
|
||||
applier_frontend
|
||||
, check_enabled
|
||||
)
|
||||
from .appliers.firewall_rule import FirewallRule
|
||||
|
||||
class firewall_applier(applier_frontend):
|
||||
__module_name = 'FirewallApplier'
|
||||
__module_experimental = True
|
||||
__module_enabled = False
|
||||
__firewall_branch = 'SOFTWARE\\Policies\\Microsoft\\WindowsFirewall\\FirewallRules'
|
||||
__firewall_switch = 'SOFTWARE\\Policies\\Microsoft\\WindowsFirewall\\DomainProfile\\EnableFirewall'
|
||||
__firewall_reset_cmd = ['/usr/bin/alterator-net-iptables', 'reset']
|
||||
|
||||
def __init__(self, storage):
|
||||
self.storage = storage
|
||||
self.firewall_settings = self.storage.filter_hklm_entries('{}%'.format(self.__firewall_branch))
|
||||
self.firewall_enabled = self.storage.get_hklm_entry(self.__firewall_switch)
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
|
||||
def run(self):
|
||||
for setting in self.firewall_settings:
|
||||
rule = FirewallRule(setting.data)
|
||||
rule.apply()
|
||||
|
||||
def apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D117')
|
||||
if '1' == self.firewall_enabled:
|
||||
log('D118')
|
||||
self.run()
|
||||
else:
|
||||
log('D119')
|
||||
proc = subprocess.Popen(self.__firewall_reset_cmd)
|
||||
proc.wait()
|
||||
else:
|
||||
log('D120')
|
||||
|
@ -1,92 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from .applier_frontend import (
|
||||
applier_frontend
|
||||
, check_enabled
|
||||
)
|
||||
from .appliers.folder import Folder
|
||||
from util.logging import log
|
||||
from util.windows import expand_windows_var
|
||||
import re
|
||||
|
||||
class folder_applier(applier_frontend):
|
||||
__module_name = 'FoldersApplier'
|
||||
__module_experimental = False
|
||||
__module_enabled = True
|
||||
|
||||
def __init__(self, storage, sid):
|
||||
self.storage = storage
|
||||
self.sid = sid
|
||||
self.folders = self.storage.get_folders(self.sid)
|
||||
self.__module_enabled = check_enabled(self.storage, self.__module_name, self.__module_experimental)
|
||||
|
||||
def apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D107')
|
||||
for directory_obj in self.folders:
|
||||
check = expand_windows_var(directory_obj.path).replace('\\', '/')
|
||||
win_var = re.findall(r'%.+?%', check)
|
||||
drive = re.findall(r'^[a-z A-Z]\:',check)
|
||||
if drive or win_var:
|
||||
log('D109', {"path": directory_obj.path})
|
||||
continue
|
||||
fld = Folder(directory_obj)
|
||||
fld.act()
|
||||
else:
|
||||
log('D108')
|
||||
|
||||
class folder_applier_user(applier_frontend):
|
||||
__module_name = 'FoldersApplierUser'
|
||||
__module_experimental = False
|
||||
__module_enabled = True
|
||||
|
||||
def __init__(self, storage, sid, username):
|
||||
self.storage = storage
|
||||
self.sid = sid
|
||||
self.username = username
|
||||
self.folders = self.storage.get_folders(self.sid)
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
|
||||
def run(self):
|
||||
for directory_obj in self.folders:
|
||||
check = expand_windows_var(directory_obj.path, self.username).replace('\\', '/')
|
||||
win_var = re.findall(r'%.+?%', check)
|
||||
drive = re.findall(r'^[a-z A-Z]\:',check)
|
||||
if drive or win_var:
|
||||
log('D110', {"path": directory_obj.path})
|
||||
continue
|
||||
fld = Folder(directory_obj, self.username)
|
||||
fld.act()
|
||||
|
||||
def admin_context_apply(self):
|
||||
pass
|
||||
|
||||
def user_context_apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D111')
|
||||
self.run()
|
||||
else:
|
||||
log('D112')
|
||||
|
@ -17,21 +17,14 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from storage import registry_factory
|
||||
from storage.fs_file_cache import fs_file_cache
|
||||
|
||||
from .control_applier import control_applier
|
||||
from .polkit_applier import (
|
||||
polkit_applier
|
||||
, polkit_applier_user
|
||||
)
|
||||
from .polkit_applier import polkit_applier
|
||||
from .systemd_applier import systemd_applier
|
||||
from .firefox_applier import firefox_applier
|
||||
from .chromium_applier import chromium_applier
|
||||
from .cups_applier import cups_applier
|
||||
from .package_applier import (
|
||||
package_applier
|
||||
, package_applier_user
|
||||
)
|
||||
from .package_applier import package_applier
|
||||
from .shortcut_applier import (
|
||||
shortcut_applier,
|
||||
shortcut_applier_user
|
||||
@ -40,51 +33,21 @@ from .gsettings_applier import (
|
||||
gsettings_applier,
|
||||
gsettings_applier_user
|
||||
)
|
||||
from .firewall_applier import firewall_applier
|
||||
from .folder_applier import (
|
||||
folder_applier
|
||||
, folder_applier_user
|
||||
)
|
||||
from .cifs_applier import (
|
||||
cifs_applier_user
|
||||
, cifs_applier)
|
||||
from .ntp_applier import ntp_applier
|
||||
from .envvar_applier import (
|
||||
envvar_applier
|
||||
, envvar_applier_user
|
||||
)
|
||||
from .scripts_applier import (
|
||||
scripts_applier
|
||||
, scripts_applier_user
|
||||
)
|
||||
|
||||
from .file_applier import (
|
||||
file_applier
|
||||
, file_applier_user
|
||||
)
|
||||
|
||||
from .ini_applier import (
|
||||
ini_applier
|
||||
, ini_applier_user
|
||||
)
|
||||
|
||||
from .kde_applier import (
|
||||
kde_applier
|
||||
, kde_applier_user
|
||||
)
|
||||
|
||||
from .networkshare_applier import networkshare_applier
|
||||
from .yandex_browser_applier import yandex_browser_applier
|
||||
|
||||
from util.sid import get_sid
|
||||
from util.windows import get_sid
|
||||
from util.users import (
|
||||
is_root,
|
||||
get_process_user,
|
||||
username_match_uid,
|
||||
with_privileges
|
||||
)
|
||||
from util.logging import slogm
|
||||
from util.paths import (
|
||||
frontend_module_dir
|
||||
)
|
||||
from util.logging import log
|
||||
from util.system import with_privileges
|
||||
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
def determine_username(username=None):
|
||||
'''
|
||||
@ -97,30 +60,16 @@ def determine_username(username=None):
|
||||
# of process owner.
|
||||
if not username:
|
||||
name = get_process_user()
|
||||
logdata = dict({'username': name})
|
||||
log('D2', logdata)
|
||||
logging.debug(slogm('Username is not specified - will use username of current process'))
|
||||
|
||||
if not username_match_uid(name):
|
||||
if not is_root():
|
||||
raise Exception('Current process UID does not match specified username')
|
||||
|
||||
logdata = dict({'username': name})
|
||||
log('D15', logdata)
|
||||
logging.debug(slogm('Username for frontend is set to {}'.format(name)))
|
||||
|
||||
return name
|
||||
|
||||
def apply_user_context(user_appliers):
|
||||
for applier_name, applier_object in user_appliers.items():
|
||||
log('D55', {'name': applier_name})
|
||||
|
||||
try:
|
||||
applier_object.user_context_apply()
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['applier'] = applier_name
|
||||
logdata['exception'] = str(exc)
|
||||
log('E20', logdata)
|
||||
|
||||
class frontend_manager:
|
||||
'''
|
||||
The frontend_manager class decides when and how to run appliers
|
||||
@ -128,116 +77,75 @@ class frontend_manager:
|
||||
'''
|
||||
|
||||
def __init__(self, username, is_machine):
|
||||
frontend_module_files = frontend_module_dir().glob('**/*')
|
||||
self.frontend_module_binaries = list()
|
||||
for exe in frontend_module_files:
|
||||
if (exe.is_file() and os.access(exe.resolve(), os.X_OK)):
|
||||
self.frontend_module_binaries.append(exe)
|
||||
|
||||
self.storage = registry_factory('registry')
|
||||
self.username = determine_username(username)
|
||||
self.storage = registry_factory('dconf', username=self.username)
|
||||
self.is_machine = is_machine
|
||||
self.process_uname = get_process_user()
|
||||
self.sid = get_sid(self.storage.get_info('domain'), self.username, is_machine)
|
||||
self.file_cache = fs_file_cache('file_cache', self.username)
|
||||
|
||||
self.machine_appliers = dict()
|
||||
self.user_appliers = dict()
|
||||
if is_machine:
|
||||
self._init_machine_appliers()
|
||||
else:
|
||||
self._init_user_appliers()
|
||||
self.machine_appliers = dict({
|
||||
'control': control_applier(self.storage),
|
||||
'polkit': polkit_applier(self.storage),
|
||||
'systemd': systemd_applier(self.storage),
|
||||
'firefox': firefox_applier(self.storage, self.sid, self.username),
|
||||
'chromium': chromium_applier(self.storage, self.sid, self.username),
|
||||
'shortcuts': shortcut_applier(self.storage),
|
||||
'gsettings': gsettings_applier(self.storage),
|
||||
'cups': cups_applier(self.storage),
|
||||
'package': package_applier(self.storage)
|
||||
})
|
||||
|
||||
def _init_machine_appliers(self):
|
||||
self.machine_appliers['control'] = control_applier(self.storage)
|
||||
self.machine_appliers['polkit'] = polkit_applier(self.storage)
|
||||
self.machine_appliers['systemd'] = systemd_applier(self.storage)
|
||||
self.machine_appliers['firefox'] = firefox_applier(self.storage, self.sid, self.username)
|
||||
self.machine_appliers['chromium'] = chromium_applier(self.storage, self.sid, self.username)
|
||||
self.machine_appliers['yandex_browser'] = yandex_browser_applier(self.storage, self.sid, self.username)
|
||||
self.machine_appliers['shortcuts'] = shortcut_applier(self.storage)
|
||||
self.machine_appliers['gsettings'] = gsettings_applier(self.storage, self.file_cache)
|
||||
try:
|
||||
self.machine_appliers['cifs'] = cifs_applier(self.storage, self.sid)
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['applier_name'] = 'cifs'
|
||||
logdata['msg'] = str(exc)
|
||||
log('E24', logdata)
|
||||
self.machine_appliers['cups'] = cups_applier(self.storage)
|
||||
self.machine_appliers['firewall'] = firewall_applier(self.storage)
|
||||
self.machine_appliers['folders'] = folder_applier(self.storage, self.sid)
|
||||
self.machine_appliers['package'] = package_applier(self.storage)
|
||||
self.machine_appliers['ntp'] = ntp_applier(self.storage)
|
||||
self.machine_appliers['envvar'] = envvar_applier(self.storage, self.sid)
|
||||
self.machine_appliers['networkshare'] = networkshare_applier(self.storage, self.sid)
|
||||
self.machine_appliers['scripts'] = scripts_applier(self.storage, self.sid)
|
||||
self.machine_appliers['files'] = file_applier(self.storage, self.file_cache, self.sid)
|
||||
self.machine_appliers['ini'] = ini_applier(self.storage, self.sid)
|
||||
self.machine_appliers['kde'] = kde_applier(self.storage)
|
||||
|
||||
def _init_user_appliers(self):
|
||||
# User appliers are expected to work with user-writable
|
||||
# files and settings, mostly in $HOME.
|
||||
self.user_appliers['shortcuts'] = shortcut_applier_user(self.storage, self.sid, self.username)
|
||||
self.user_appliers['folders'] = folder_applier_user(self.storage, self.sid, self.username)
|
||||
self.user_appliers['gsettings'] = gsettings_applier_user(self.storage, self.file_cache, self.sid, self.username)
|
||||
try:
|
||||
self.user_appliers['cifs'] = cifs_applier_user(self.storage, self.sid, self.username)
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['applier_name'] = 'cifs'
|
||||
logdata['msg'] = str(exc)
|
||||
log('E25', logdata)
|
||||
self.user_appliers['package'] = package_applier_user(self.storage, self.sid, self.username)
|
||||
self.user_appliers['polkit'] = polkit_applier_user(self.storage, self.sid, self.username)
|
||||
self.user_appliers['envvar'] = envvar_applier_user(self.storage, self.sid, self.username)
|
||||
self.user_appliers['networkshare'] = networkshare_applier(self.storage, self.sid, self.username)
|
||||
self.user_appliers['scripts'] = scripts_applier_user(self.storage, self.sid, self.username)
|
||||
self.user_appliers['files'] = file_applier_user(self.storage, self.file_cache, self.sid, self.username)
|
||||
self.user_appliers['ini'] = ini_applier_user(self.storage, self.sid, self.username)
|
||||
self.user_appliers['kde'] = kde_applier_user(self.storage, self.sid, self.username, self.file_cache)
|
||||
self.user_appliers = dict({
|
||||
'shortcuts': shortcut_applier_user(self.storage, self.sid, self.username),
|
||||
'gsettings': gsettings_applier_user(self.storage, self.sid, self.username)
|
||||
})
|
||||
|
||||
def machine_apply(self):
|
||||
'''
|
||||
Run global appliers with administrator privileges.
|
||||
'''
|
||||
if not is_root():
|
||||
log('E13')
|
||||
logging.error('Not sufficient privileges to run machine appliers')
|
||||
return
|
||||
log('D16')
|
||||
logging.debug(slogm('Applying computer part of settings'))
|
||||
|
||||
for applier_name, applier_object in self.machine_appliers.items():
|
||||
try:
|
||||
applier_object.apply()
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['applier_name'] = applier_name
|
||||
logdata['msg'] = str(exc)
|
||||
log('E24', logdata)
|
||||
for exe in self.frontend_module_binaries:
|
||||
subprocess.check_call([exe.resolve()])
|
||||
|
||||
self.machine_appliers['systemd'].apply()
|
||||
self.machine_appliers['control'].apply()
|
||||
self.machine_appliers['polkit'].apply()
|
||||
self.machine_appliers['firefox'].apply()
|
||||
self.machine_appliers['chromium'].apply()
|
||||
self.machine_appliers['shortcuts'].apply()
|
||||
self.machine_appliers['gsettings'].apply()
|
||||
self.machine_appliers['cups'].apply()
|
||||
#self.machine_appliers['package'].apply()
|
||||
|
||||
def user_apply(self):
|
||||
'''
|
||||
Run appliers for users.
|
||||
'''
|
||||
if is_root():
|
||||
for applier_name, applier_object in self.user_appliers.items():
|
||||
try:
|
||||
applier_object.admin_context_apply()
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['applier'] = applier_name
|
||||
logdata['exception'] = str(exc)
|
||||
log('E19', logdata)
|
||||
logging.debug(slogm('Running user appliers from administrator context'))
|
||||
self.user_appliers['shortcuts'].admin_context_apply()
|
||||
self.user_appliers['gsettings'].admin_context_apply()
|
||||
|
||||
try:
|
||||
with_privileges(self.username, lambda: apply_user_context(self.user_appliers))
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['username'] = self.username
|
||||
logdata['exception'] = str(exc)
|
||||
log('E30', logdata)
|
||||
logging.debug(slogm('Running user appliers for user context'))
|
||||
with_privileges(self.username, self.user_appliers['shortcuts'].user_context_apply)
|
||||
with_privileges(self.username, self.user_appliers['gsettings'].user_context_apply)
|
||||
else:
|
||||
for applier_name, applier_object in self.user_appliers.items():
|
||||
try:
|
||||
applier_object.user_context_apply()
|
||||
except Exception as exc:
|
||||
logdata = dict({'applier_name': applier_name, 'message': str(exc)})
|
||||
log('E11', logdata)
|
||||
logging.debug(slogm('Running user appliers from user context'))
|
||||
self.user_appliers['shortcuts'].user_context_apply()
|
||||
self.user_appliers['gsettings'].user_context_apply()
|
||||
|
||||
def apply_parameters(self):
|
||||
'''
|
||||
|
@ -1,7 +1,7 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2021 BaseALT Ltd.
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
@ -16,281 +16,77 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from util.exceptions import NotUNCPathError
|
||||
import logging
|
||||
import os
|
||||
import pwd
|
||||
import subprocess
|
||||
|
||||
from gi.repository import (
|
||||
Gio
|
||||
, GLib
|
||||
)
|
||||
|
||||
from .applier_frontend import (
|
||||
applier_frontend
|
||||
, check_enabled
|
||||
, check_windows_mapping_enabled
|
||||
)
|
||||
from .applier_frontend import applier_frontend
|
||||
from .appliers.gsettings import (
|
||||
system_gsettings,
|
||||
user_gsettings
|
||||
system_gsetting,
|
||||
user_gsetting
|
||||
)
|
||||
from util.logging import slogm ,log
|
||||
|
||||
def uri_fetch(schema, path, value, cache):
|
||||
'''
|
||||
Function to fetch and cache uri
|
||||
'''
|
||||
retval = value
|
||||
logdata = dict()
|
||||
logdata['schema'] = schema
|
||||
logdata['path'] = path
|
||||
logdata['src'] = value
|
||||
try:
|
||||
retval = cache.get(value)
|
||||
logdata['dst'] = retval
|
||||
log('D90', logdata)
|
||||
except Exception as exc:
|
||||
pass
|
||||
|
||||
return retval
|
||||
from util.logging import slogm
|
||||
|
||||
class gsettings_applier(applier_frontend):
|
||||
__module_name = 'GSettingsApplier'
|
||||
__module_experimental = False
|
||||
__module_enabled = True
|
||||
__registry_branch = 'Software\\BaseALT\\Policies\\gsettings\\'
|
||||
__registry_locks_branch = 'Software\\BaseALT\\Policies\\GSettingsLocks\\'
|
||||
__wallpaper_entry = 'Software/BaseALT/Policies/gsettings/org.mate.background.picture-filename'
|
||||
__vino_authentication_methods_entry = 'Software/BaseALT/Policies/gsettings/org.gnome.Vino.authentication-methods'
|
||||
__registry_branch = 'Software\\BaseALT\\Policies\\gsettings'
|
||||
__global_schema = '/usr/share/glib-2.0/schemas'
|
||||
__override_priority_file = 'zzz_policy.gschema.override'
|
||||
__override_old_file = '0_policy.gschema.override'
|
||||
|
||||
|
||||
def __init__(self, storage, file_cache):
|
||||
def __init__(self, storage):
|
||||
self.storage = storage
|
||||
self.file_cache = file_cache
|
||||
gsettings_filter = '{}%'.format(self.__registry_branch)
|
||||
gsettings_locks_filter = '{}%'.format(self.__registry_locks_branch)
|
||||
self.gsettings_keys = self.storage.filter_hklm_entries(gsettings_filter)
|
||||
self.gsettings_locks = self.storage.filter_hklm_entries(gsettings_locks_filter)
|
||||
self.override_file = os.path.join(self.__global_schema, self.__override_priority_file)
|
||||
self.override_old_file = os.path.join(self.__global_schema, self.__override_old_file)
|
||||
self.gsettings = system_gsettings(self.override_file)
|
||||
self.locks = dict()
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
|
||||
def update_file_cache(self, data):
|
||||
try:
|
||||
self.file_cache.store(data)
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['exception'] = str(exc)
|
||||
log('D145', logdata)
|
||||
|
||||
def uri_fetch_helper(self, schema, path, value):
|
||||
return uri_fetch(schema, path, value, self.file_cache)
|
||||
|
||||
def run(self):
|
||||
# Compatility cleanup of old settings
|
||||
if os.path.exists(self.override_old_file):
|
||||
os.remove(self.override_old_file)
|
||||
self.gsettings = list()
|
||||
self.override_file = os.path.join(self.__global_schema, '0_policy.gschema.override')
|
||||
|
||||
def apply(self):
|
||||
# Cleanup settings from previous run
|
||||
if os.path.exists(self.override_file):
|
||||
log('D82')
|
||||
logging.debug(slogm('Removing GSettings policy file from previous run'))
|
||||
os.remove(self.override_file)
|
||||
|
||||
# Get all configured gsettings locks
|
||||
for lock in self.gsettings_locks:
|
||||
valuename = lock.hive_key.rpartition('/')[2]
|
||||
self.locks[valuename] = int(lock.data)
|
||||
|
||||
# Calculate all configured gsettings
|
||||
for setting in self.gsettings_keys:
|
||||
helper = None
|
||||
valuename = setting.hive_key.rpartition('/')[2]
|
||||
valuename = setting.hive_key.rpartition('\\')[2]
|
||||
rp = valuename.rpartition('.')
|
||||
schema = rp[0]
|
||||
path = rp[2]
|
||||
data = setting.data
|
||||
lock = bool(self.locks[valuename]) if valuename in self.locks else None
|
||||
if setting.hive_key.lower() == self.__wallpaper_entry.lower():
|
||||
self.update_file_cache(setting.data)
|
||||
helper = self.uri_fetch_helper
|
||||
elif setting.hive_key.lower() == self.__vino_authentication_methods_entry.lower():
|
||||
data = [setting.data]
|
||||
self.gsettings.append(schema, path, data, lock, helper)
|
||||
self.gsettings.append(system_gsetting(schema, path, setting.data))
|
||||
|
||||
# Create GSettings policy with highest available priority
|
||||
self.gsettings.apply()
|
||||
for gsetting in self.gsettings:
|
||||
gsetting.apply()
|
||||
|
||||
# Recompile GSettings schemas with overrides
|
||||
try:
|
||||
proc = subprocess.run(args=['/usr/bin/glib-compile-schemas', self.__global_schema], capture_output=True, check=True)
|
||||
except Exception as exc:
|
||||
log('E48')
|
||||
|
||||
# Update desktop configuration system backend
|
||||
try:
|
||||
proc = subprocess.run(args=['/usr/bin/dconf', "update"], capture_output=True, check=True)
|
||||
except Exception as exc:
|
||||
log('E49')
|
||||
|
||||
def apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D80')
|
||||
self.run()
|
||||
else:
|
||||
log('D81')
|
||||
|
||||
class GSettingsMapping:
|
||||
def __init__(self, hive_key, gsettings_schema, gsettings_key):
|
||||
self.hive_key = hive_key
|
||||
self.gsettings_schema = gsettings_schema
|
||||
self.gsettings_key = gsettings_key
|
||||
|
||||
try:
|
||||
self.schema_source = Gio.SettingsSchemaSource.get_default()
|
||||
self.schema = self.schema_source.lookup(self.gsettings_schema, True)
|
||||
self.gsettings_schema_key = self.schema.get_key(self.gsettings_key)
|
||||
self.gsettings_type = self.gsettings_schema_key.get_value_type()
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['hive_key'] = self.hive_key
|
||||
logdata['gsettings_schema'] = self.gsettings_schema
|
||||
logdata['gsettings_key'] = self.gsettings_key
|
||||
log('W6', logdata)
|
||||
|
||||
def preg2gsettings(self):
|
||||
'''
|
||||
Transform PReg key variant into GLib.Variant. This function
|
||||
performs mapping of PReg type system into GLib type system.
|
||||
'''
|
||||
pass
|
||||
|
||||
def gsettings2preg(self):
|
||||
'''
|
||||
Transform GLib.Variant key type into PReg key type.
|
||||
'''
|
||||
pass
|
||||
logging.debug(slogm('Error recompiling global GSettings schemas'))
|
||||
|
||||
class gsettings_applier_user(applier_frontend):
|
||||
__module_name = 'GSettingsApplierUser'
|
||||
__module_experimental = False
|
||||
__module_enabled = True
|
||||
__registry_branch = 'Software\\BaseALT\\Policies\\gsettings\\'
|
||||
__wallpaper_entry = 'Software/BaseALT/Policies/gsettings/org.mate.background.picture-filename'
|
||||
__vino_authentication_methods_entry = 'Software/BaseALT/Policies/gsettings/org.gnome.Vino.authentication-methods'
|
||||
__registry_branch = 'Software\\BaseALT\\Policies\\gsettings'
|
||||
|
||||
def __init__(self, storage, file_cache, sid, username):
|
||||
def __init__(self, storage, sid, username):
|
||||
self.storage = storage
|
||||
self.file_cache = file_cache
|
||||
self.sid = sid
|
||||
self.username = username
|
||||
gsettings_filter = '{}%'.format(self.__registry_branch)
|
||||
self.gsettings_keys = self.storage.filter_hkcu_entries(self.sid, gsettings_filter)
|
||||
self.gsettings = user_gsettings()
|
||||
self.__module_enabled = check_enabled(self.storage, self.__module_name, self.__module_experimental)
|
||||
self.__windows_mapping_enabled = check_windows_mapping_enabled(self.storage)
|
||||
self.gsettings = list()
|
||||
|
||||
self.__windows_settings = dict()
|
||||
self.windows_settings = list()
|
||||
mapping = [
|
||||
# Disable or enable screen saver
|
||||
GSettingsMapping(
|
||||
'Software/Policies/Microsoft/Windows/Control Panel/Desktop/ScreenSaveActive'
|
||||
, 'org.mate.screensaver'
|
||||
, 'idle-activation-enabled'
|
||||
)
|
||||
# Timeout in seconds for screen saver activation. The value of zero effectively disables screensaver start
|
||||
, GSettingsMapping(
|
||||
'Software/Policies/Microsoft/Windows/Control Panel/Desktop/ScreenSaveTimeOut'
|
||||
, 'org.mate.session'
|
||||
, 'idle-delay'
|
||||
)
|
||||
# Enable or disable password protection for screen saver
|
||||
, GSettingsMapping(
|
||||
'Software/Policies/Microsoft/Windows/Control Panel/Desktop/ScreenSaverIsSecure'
|
||||
, 'org.mate.screensaver'
|
||||
, 'lock-enabled'
|
||||
)
|
||||
# Specify image which will be used as a wallpaper
|
||||
, GSettingsMapping(
|
||||
'Software/Microsoft/Windows/CurrentVersion/Policies/System/Wallpaper'
|
||||
, 'org.mate.background'
|
||||
, 'picture-filename'
|
||||
)
|
||||
]
|
||||
self.windows_settings.extend(mapping)
|
||||
|
||||
for element in self.windows_settings:
|
||||
self.__windows_settings[element.hive_key] = element
|
||||
|
||||
|
||||
def windows_mapping_append(self):
|
||||
for setting_key in self.__windows_settings.keys():
|
||||
value = self.storage.get_hkcu_entry(self.sid, setting_key)
|
||||
if value:
|
||||
logdata = dict()
|
||||
logdata['setting_key'] = setting_key
|
||||
logdata['value.data'] = value.data
|
||||
log('D86', logdata)
|
||||
mapping = self.__windows_settings[setting_key]
|
||||
try:
|
||||
self.gsettings.append(mapping.gsettings_schema, mapping.gsettings_key, value.data)
|
||||
except Exception as exc:
|
||||
print(exc)
|
||||
|
||||
def uri_fetch_helper(self, schema, path, value):
|
||||
return uri_fetch(schema, path, value, self.file_cache)
|
||||
|
||||
def run(self):
|
||||
if self.__windows_mapping_enabled:
|
||||
log('D83')
|
||||
self.windows_mapping_append()
|
||||
else:
|
||||
log('D84')
|
||||
|
||||
# Calculate all configured gsettings
|
||||
def user_context_apply(self):
|
||||
for setting in self.gsettings_keys:
|
||||
valuename = setting.hive_key.rpartition('/')[2]
|
||||
valuename = setting.hive_key.rpartition('\\')[2]
|
||||
rp = valuename.rpartition('.')
|
||||
schema = rp[0]
|
||||
path = rp[2]
|
||||
data = setting.data
|
||||
helper = self.uri_fetch_helper if setting.hive_key.lower() == self.__wallpaper_entry.lower() else None
|
||||
if setting.hive_key.lower() == self.__vino_authentication_methods_entry.lower():
|
||||
data = [setting.data]
|
||||
self.gsettings.append(schema, path, data, helper)
|
||||
self.gsettings.append(user_gsetting(schema, path, setting.data))
|
||||
|
||||
# Create GSettings policy with highest available priority
|
||||
self.gsettings.apply()
|
||||
|
||||
def user_context_apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D87')
|
||||
self.run()
|
||||
else:
|
||||
log('D88')
|
||||
for gsetting in self.gsettings:
|
||||
gsetting.apply()
|
||||
|
||||
def admin_context_apply(self):
|
||||
# Cache files on remote locations
|
||||
try:
|
||||
entry = self.__wallpaper_entry
|
||||
filter_result = self.storage.get_hkcu_entry(self.sid, entry)
|
||||
if filter_result and filter_result.data:
|
||||
self.file_cache.store(filter_result.data)
|
||||
except NotUNCPathError:
|
||||
...
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['exception'] = str(exc)
|
||||
log('E50', logdata)
|
||||
|
||||
'''
|
||||
Not implemented because there is no point of doing so.
|
||||
'''
|
||||
pass
|
||||
|
||||
|
@ -1,78 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2022 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from .appliers.ini_file import Ini_file
|
||||
from .applier_frontend import (
|
||||
applier_frontend
|
||||
, check_enabled
|
||||
)
|
||||
from util.logging import log
|
||||
|
||||
class ini_applier(applier_frontend):
|
||||
__module_name = 'InifilesApplier'
|
||||
__module_experimental = True
|
||||
__module_enabled = False
|
||||
|
||||
def __init__(self, storage, sid):
|
||||
self.storage = storage
|
||||
self.sid = sid
|
||||
self.inifiles_info = self.storage.get_ini(self.sid)
|
||||
self.__module_enabled = check_enabled(self.storage, self.__module_name, self.__module_experimental)
|
||||
|
||||
def run(self):
|
||||
for inifile in self.inifiles_info:
|
||||
Ini_file(inifile)
|
||||
|
||||
def apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D171')
|
||||
self.run()
|
||||
else:
|
||||
log('D172')
|
||||
|
||||
class ini_applier_user(applier_frontend):
|
||||
__module_name = 'InifilesApplierUser'
|
||||
__module_experimental = True
|
||||
__module_enabled = False
|
||||
|
||||
def __init__(self, storage, sid, username):
|
||||
self.sid = sid
|
||||
self.username = username
|
||||
self.storage = storage
|
||||
self.inifiles_info = self.storage.get_ini(self.sid)
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
|
||||
def run(self):
|
||||
for inifile in self.inifiles_info:
|
||||
Ini_file(inifile, self.username)
|
||||
|
||||
def admin_context_apply(self):
|
||||
pass
|
||||
|
||||
def user_context_apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D173')
|
||||
self.run()
|
||||
else:
|
||||
log('D174')
|
@ -1,292 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2024 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from .applier_frontend import applier_frontend, check_enabled
|
||||
from util.logging import log
|
||||
from util.util import get_homedir
|
||||
from util.exceptions import NotUNCPathError
|
||||
import os
|
||||
import subprocess
|
||||
import re
|
||||
import dbus
|
||||
|
||||
class kde_applier(applier_frontend):
|
||||
__module_name = 'KdeApplier'
|
||||
__module_experimental = True
|
||||
__module_enabled = False
|
||||
__hklm_branch = 'Software\\BaseALT\\Policies\\KDE\\'
|
||||
__hklm_lock_branch = 'Software\\BaseALT\\Policies\\KDELocks\\'
|
||||
|
||||
def __init__(self, storage):
|
||||
self.storage = storage
|
||||
self.locks_dict = {}
|
||||
self.locks_data_dict = {}
|
||||
self.all_kde_settings = {}
|
||||
kde_filter = '{}%'.format(self.__hklm_branch)
|
||||
locks_filter = '{}%'.format(self.__hklm_lock_branch)
|
||||
self.locks_settings = self.storage.filter_hklm_entries(locks_filter)
|
||||
self.kde_settings = self.storage.filter_hklm_entries(kde_filter)
|
||||
self.all_kde_settings = {}
|
||||
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage,
|
||||
self.__module_name,
|
||||
self.__module_experimental
|
||||
)
|
||||
|
||||
def apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D198')
|
||||
create_dict(self.kde_settings, self.all_kde_settings, self.locks_settings, self.locks_dict)
|
||||
apply(self.all_kde_settings, self.locks_dict)
|
||||
else:
|
||||
log('D199')
|
||||
|
||||
class kde_applier_user(applier_frontend):
|
||||
__module_name = 'KdeApplierUser'
|
||||
__module_experimental = True
|
||||
__module_enabled = False
|
||||
__hkcu_branch = 'Software\\BaseALT\\Policies\\KDE\\'
|
||||
__hkcu_lock_branch = 'Software\\BaseALT\\Policies\\KDELocks\\'
|
||||
|
||||
def __init__(self, storage, sid=None, username=None, file_cache = None):
|
||||
self.storage = storage
|
||||
self.username = username
|
||||
self.sid = sid
|
||||
self.file_cache = file_cache
|
||||
self.locks_dict = {}
|
||||
self.locks_data_dict = {}
|
||||
self.all_kde_settings = {}
|
||||
kde_filter = '{}%'.format(self.__hkcu_branch)
|
||||
locks_filter = '{}%'.format(self.__hkcu_lock_branch)
|
||||
self.locks_settings = self.storage.filter_hkcu_entries(self.sid, locks_filter)
|
||||
self.kde_settings = self.storage.filter_hkcu_entries(self.sid, kde_filter)
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage,
|
||||
self.__module_name,
|
||||
self.__module_experimental
|
||||
)
|
||||
|
||||
def admin_context_apply(self):
|
||||
pass
|
||||
|
||||
def user_context_apply(self):
|
||||
'''
|
||||
Change settings applied in user context
|
||||
'''
|
||||
if self.__module_enabled:
|
||||
log('D200')
|
||||
create_dict(self.kde_settings, self.all_kde_settings, self.locks_settings, self.locks_dict, self.file_cache, self.username)
|
||||
apply(self.all_kde_settings, self.locks_dict, self.username)
|
||||
else:
|
||||
log('D201')
|
||||
|
||||
def create_dict(kde_settings, all_kde_settings, locks_settings, locks_dict, file_cache = None, username = None):
|
||||
for locks in locks_settings:
|
||||
locks_dict[locks.valuename] = locks.data
|
||||
for setting in kde_settings:
|
||||
try:
|
||||
file_name, section, value = setting.keyname.split("/")[-2], setting.keyname.split("/")[-1], setting.valuename
|
||||
data = setting.data
|
||||
if file_name == 'wallpaper':
|
||||
apply_for_wallpaper(data, file_cache, username)
|
||||
else:
|
||||
if file_name not in all_kde_settings:
|
||||
all_kde_settings[file_name] = {}
|
||||
if section not in all_kde_settings[file_name]:
|
||||
all_kde_settings[file_name][section] = {}
|
||||
all_kde_settings[file_name][section][value] = data
|
||||
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['file_name'] = file_name
|
||||
logdata['section'] = section
|
||||
logdata['value'] = value
|
||||
logdata['data'] = data
|
||||
logdata['exc'] = exc
|
||||
log('W16', logdata)
|
||||
|
||||
def apply(all_kde_settings, locks_dict, username = None):
|
||||
logdata = dict()
|
||||
if username is None:
|
||||
system_path_settings = '/etc/xdg/'
|
||||
system_files = [
|
||||
"baloofilerc",
|
||||
"kcminputrc",
|
||||
"kded_device_automounterrc",
|
||||
"kdeglobals",
|
||||
"ksplashrc",
|
||||
"kwinrc",
|
||||
"plasma-localerc",
|
||||
"plasmarc",
|
||||
"powermanagementprofilesrc"
|
||||
]
|
||||
for file in system_files:
|
||||
file_to_remove = f'{system_path_settings}{file}'
|
||||
if os.path.exists(file_to_remove):
|
||||
os.remove(file_to_remove)
|
||||
for file_name, sections in all_kde_settings.items():
|
||||
file_path = f'{system_path_settings}{file_name}'
|
||||
with open(file_path, 'w') as file:
|
||||
for section, keys in sections.items():
|
||||
section = section.replace(')(', '][')
|
||||
file.write(f'[{section}]\n')
|
||||
for key, value in keys.items():
|
||||
lock = f"{file_name}.{section}.{key}".replace('][', ')(')
|
||||
if lock in locks_dict and locks_dict[lock] == 1:
|
||||
file.write(f'{key}[$i]={value}\n')
|
||||
else:
|
||||
file.write(f'{key}={value}\n')
|
||||
file.write('\n')
|
||||
else:
|
||||
for file_name, sections in all_kde_settings.items():
|
||||
path = f'{get_homedir(username)}/.config/{file_name}'
|
||||
if not os.path.exists(path):
|
||||
open(path, 'a').close()
|
||||
else:
|
||||
pass
|
||||
for section, keys in sections.items():
|
||||
for key, value in keys.items():
|
||||
lock = f"{file_name}.{section}.{key}"
|
||||
if lock in locks_dict and locks_dict[lock] == 1:
|
||||
command = [
|
||||
'kwriteconfig5',
|
||||
'--file', file_name,
|
||||
'--group', section,
|
||||
'--key', key +'/$i/',
|
||||
'--type', 'string',
|
||||
value
|
||||
]
|
||||
else:
|
||||
command = [
|
||||
'kwriteconfig5',
|
||||
'--file', file_name,
|
||||
'--group', section,
|
||||
'--key', key,
|
||||
'--type', 'string',
|
||||
value
|
||||
]
|
||||
try:
|
||||
clear_locks_settings(username, file_name, key)
|
||||
subprocess.run(command, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
except:
|
||||
logdata['command'] = command
|
||||
log('W22', logdata)
|
||||
new_content = []
|
||||
file_path = f'{get_homedir(username)}/.config/{file_name}'
|
||||
try:
|
||||
with open(file_path, 'r') as file:
|
||||
for line in file:
|
||||
line = line.replace('/$i/', '[$i]').replace(')(', '][')
|
||||
new_content.append(line)
|
||||
with open(file_path, 'w') as file:
|
||||
file.writelines(new_content)
|
||||
logdata['file'] = file_name
|
||||
log('D202', logdata)
|
||||
except Exception as exc:
|
||||
logdata['exc'] = exc
|
||||
log('W19', logdata)
|
||||
|
||||
def clear_locks_settings(username, file_name, key):
|
||||
'''
|
||||
Method to remove old locked settings
|
||||
'''
|
||||
file_path = f'{get_homedir(username)}/.config/{file_name}'
|
||||
with open(file_path, 'r') as file:
|
||||
lines = file.readlines()
|
||||
with open(file_path, 'w') as file:
|
||||
for line in lines:
|
||||
if f'{key}[$i]=' not in line:
|
||||
file.write(line)
|
||||
for line in lines:
|
||||
if f'{key}[$i]=' in line:
|
||||
logdata = dict()
|
||||
logdata['line'] = line.strip()
|
||||
log('I10', logdata)
|
||||
|
||||
def apply_for_wallpaper(data, file_cache, username):
|
||||
'''
|
||||
Method to change wallpaper
|
||||
'''
|
||||
logdata = dict()
|
||||
path_to_wallpaper = f'{get_homedir(username)}/.config/plasma-org.kde.plasma.desktop-appletsrc'
|
||||
try:
|
||||
try:
|
||||
file_cache.store(data)
|
||||
data = file_cache.get(data)
|
||||
except NotUNCPathError:
|
||||
data = data
|
||||
os.environ["XDG_DATA_DIRS"] = "/usr/share/kf5:"
|
||||
#Variable for system detection of directories before files with .colors extension
|
||||
os.environ["DISPLAY"] = ":0"
|
||||
#Variable for command execution plasma-apply-colorscheme
|
||||
os.environ["XDG_RUNTIME_DIR"] = f"/run/user/{os.getuid()}"
|
||||
os.environ["DBUS_SESSION_BUS_ADDRESS"] = f"unix:path=/run/user/{os.getuid()}/bus"#plasma-apply-wallpaperimage
|
||||
os.environ["PATH"] = "/usr/lib/kf5/bin:"
|
||||
#environment variable for accessing binary files without hard links
|
||||
if os.path.isfile(path_to_wallpaper):
|
||||
id_desktop = get_id_desktop(path_to_wallpaper)
|
||||
command = [
|
||||
'kwriteconfig5',
|
||||
'--file', 'plasma-org.kde.plasma.desktop-appletsrc',
|
||||
'--group', 'Containments',
|
||||
'--group', id_desktop,
|
||||
'--group', 'Wallpaper',
|
||||
'--group', 'org.kde.image',
|
||||
'--group', 'General',
|
||||
'--key', 'Image',
|
||||
'--type', 'string',
|
||||
data
|
||||
]
|
||||
try:
|
||||
subprocess.run(command, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
except:
|
||||
logdata['command'] = command
|
||||
log('E68', logdata)
|
||||
try:
|
||||
session_bus = dbus.SessionBus()
|
||||
plasma_shell = session_bus.get_object('org.kde.plasmashell', '/PlasmaShell', introspect='org.kde.PlasmaShell')
|
||||
plasma_shell_iface = dbus.Interface(plasma_shell, 'org.kde.PlasmaShell')
|
||||
plasma_shell_iface.refreshCurrentShell()
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
logdata['file'] = path_to_wallpaper
|
||||
log('W21', logdata)
|
||||
except OSError as exc:
|
||||
logdata['exc'] = exc
|
||||
log('W17', logdata)
|
||||
except Exception as exc:
|
||||
logdata['exc'] = exc
|
||||
log('E67', logdata)
|
||||
|
||||
def get_id_desktop(path_to_wallpaper):
|
||||
'''
|
||||
Method for getting desktop id. It is currently accepted that this number is one of the sections in the configuration file.
|
||||
'''
|
||||
pattern = r'\[Containments\]\[(\d+)\][^\[]*activityId=([^\s]+)'
|
||||
try:
|
||||
with open(path_to_wallpaper, 'r') as file:
|
||||
file_content = file.read()
|
||||
match = re.search(pattern, file_content)
|
||||
if match:
|
||||
return match.group(1)
|
||||
else:
|
||||
return None
|
||||
except:
|
||||
return None
|
@ -1,58 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2022 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from .appliers.netshare import Networkshare
|
||||
from .applier_frontend import (
|
||||
applier_frontend
|
||||
, check_enabled
|
||||
)
|
||||
from util.logging import log
|
||||
|
||||
class networkshare_applier(applier_frontend):
|
||||
__module_name = 'NetworksharesApplier'
|
||||
__module_name_user = 'NetworksharesApplierUser'
|
||||
__module_experimental = True
|
||||
__module_enabled = False
|
||||
|
||||
def __init__(self, storage, sid, username = None):
|
||||
self.storage = storage
|
||||
self.sid = sid
|
||||
self.username = username
|
||||
self.networkshare_info = self.storage.get_networkshare(self.sid)
|
||||
self.__module_enabled = check_enabled(self.storage, self.__module_name, self.__module_experimental)
|
||||
self.__module_enabled_user = check_enabled(self.storage, self.__module_name_user, self.__module_experimental)
|
||||
|
||||
def run(self):
|
||||
for networkshare in self.networkshare_info:
|
||||
Networkshare(networkshare, self.username)
|
||||
|
||||
def apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D187')
|
||||
self.run()
|
||||
else:
|
||||
log('D181')
|
||||
def admin_context_apply(self):
|
||||
pass
|
||||
|
||||
def user_context_apply(self):
|
||||
if self.__module_enabled_user:
|
||||
log('D188')
|
||||
self.run()
|
||||
else:
|
||||
log('D189')
|
@ -1,154 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
|
||||
import subprocess
|
||||
from enum import Enum
|
||||
|
||||
|
||||
from .applier_frontend import (
|
||||
applier_frontend
|
||||
, check_enabled
|
||||
)
|
||||
from util.logging import log
|
||||
|
||||
|
||||
class NTPServerType(Enum):
|
||||
NTP = 'NTP'
|
||||
|
||||
|
||||
class ntp_applier(applier_frontend):
|
||||
__module_name = 'NTPApplier'
|
||||
__module_experimental = True
|
||||
__module_enabled = False
|
||||
|
||||
__ntp_branch = 'Software\\Policies\\Microsoft\\W32time\\Parameters'
|
||||
__ntp_client_branch = 'Software\\Policies\\Microsoft\\W32time\\TimeProviders\\NtpClient'
|
||||
__ntp_server_branch = 'Software\\Policies\\Microsoft\\W32time\\TimeProviders\\NtpServer'
|
||||
|
||||
__ntp_key_address = 'NtpServer'
|
||||
__ntp_key_type = 'Type'
|
||||
__ntp_key_client_enabled = 'Enabled'
|
||||
__ntp_key_server_enabled = 'Enabled'
|
||||
|
||||
__chrony_config = '/etc/chrony.conf'
|
||||
|
||||
def __init__(self, storage):
|
||||
self.storage = storage
|
||||
|
||||
self.ntp_server_address_key = '{}\\{}'.format(self.__ntp_branch, self.__ntp_key_address)
|
||||
self.ntp_server_type = '{}\\{}'.format(self.__ntp_branch, self.__ntp_key_type)
|
||||
self.ntp_client_enabled = '{}\\{}'.format(self.__ntp_client_branch, self.__ntp_key_client_enabled)
|
||||
self.ntp_server_enabled = '{}\\{}'.format(self.__ntp_server_branch, self.__ntp_key_server_enabled)
|
||||
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
|
||||
def _chrony_as_client(self):
|
||||
command = ['/usr/sbin/control', 'chrony', 'client']
|
||||
proc = subprocess.Popen(command)
|
||||
proc.wait()
|
||||
|
||||
def _chrony_as_server(self):
|
||||
command = ['/usr/sbin/control', 'chrony', 'server']
|
||||
proc = subprocess.Popen(command)
|
||||
proc.wait()
|
||||
|
||||
def _start_chrony_client(self, server=None):
|
||||
srv = None
|
||||
if server:
|
||||
srv = server.data.rpartition(',')[0]
|
||||
logdata = dict()
|
||||
logdata['srv'] = srv
|
||||
log('D122', logdata)
|
||||
|
||||
start_command = ['/usr/bin/systemctl', 'start', 'chronyd']
|
||||
chrony_set_server = ['/usr/bin/chronyc', 'add', 'server', srv]
|
||||
chrony_disconnect_all = ['/usr/bin/chronyc', 'offline']
|
||||
chrony_connect = ['/usr/bin/chronyc', 'online', srv]
|
||||
|
||||
log('D123')
|
||||
|
||||
proc = subprocess.Popen(start_command)
|
||||
proc.wait()
|
||||
|
||||
if srv:
|
||||
logdata = dict()
|
||||
logdata['srv'] = srv
|
||||
log('D124', logdata)
|
||||
|
||||
proc = subprocess.Popen(chrony_disconnect_all)
|
||||
proc.wait()
|
||||
|
||||
proc = subprocess.Popen(chrony_set_server)
|
||||
proc.wait()
|
||||
|
||||
proc = subprocess.Popen(chrony_connect)
|
||||
proc.wait()
|
||||
|
||||
def _stop_chrony_client(self):
|
||||
stop_command = ['/usr/bin/systemctl', 'stop', 'chronyd']
|
||||
log('D125')
|
||||
proc = subprocess.Popen(stop_command)
|
||||
proc.wait()
|
||||
|
||||
def run(self):
|
||||
server_type = self.storage.get_hklm_entry(self.ntp_server_type)
|
||||
server_address = self.storage.get_hklm_entry(self.ntp_server_address_key)
|
||||
ntp_server_enabled = self.storage.get_hklm_entry(self.ntp_server_enabled)
|
||||
ntp_client_enabled = self.storage.get_hklm_entry(self.ntp_client_enabled)
|
||||
|
||||
if server_type and server_type.data:
|
||||
if NTPServerType.NTP.value != server_type.data:
|
||||
logdata = dict()
|
||||
logdata['server_type'] = server_type
|
||||
log('W10', logdata)
|
||||
else:
|
||||
log('D126')
|
||||
if ntp_server_enabled:
|
||||
if '1' == ntp_server_enabled.data and server_address:
|
||||
log('D127')
|
||||
self._start_chrony_client(server_address)
|
||||
self._chrony_as_server()
|
||||
elif '0' == ntp_server_enabled.data:
|
||||
log('D128')
|
||||
self._chrony_as_client()
|
||||
else:
|
||||
log('D129')
|
||||
|
||||
elif ntp_client_enabled:
|
||||
if '1' == ntp_client_enabled.data:
|
||||
log('D130')
|
||||
self._start_chrony_client()
|
||||
elif '0' == ntp_client_enabled.data:
|
||||
log('D131')
|
||||
self._stop_chrony_client()
|
||||
else:
|
||||
log('D132')
|
||||
|
||||
def apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D121')
|
||||
self.run()
|
||||
else:
|
||||
log('D133')
|
||||
|
@ -17,108 +17,20 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import logging
|
||||
import subprocess
|
||||
from util.logging import slogm, log
|
||||
from util.rpm import (
|
||||
update
|
||||
, install_rpm
|
||||
, remove_rpm
|
||||
)
|
||||
|
||||
from .applier_frontend import (
|
||||
applier_frontend
|
||||
, check_enabled
|
||||
)
|
||||
from .applier_frontend import applier_frontend
|
||||
from .appliers.rpm import rpm
|
||||
|
||||
class package_applier(applier_frontend):
|
||||
__module_name = 'PackagesApplier'
|
||||
__module_experimental = True
|
||||
__module_enabled = False
|
||||
__install_key_name = 'Install'
|
||||
__remove_key_name = 'Remove'
|
||||
__sync_key_name = 'Sync'
|
||||
__hklm_branch = 'Software\\BaseALT\\Policies\\Packages'
|
||||
|
||||
def __init__(self, storage):
|
||||
self.storage = storage
|
||||
|
||||
install_branch = '{}\\{}%'.format(self.__hklm_branch, self.__install_key_name)
|
||||
remove_branch = '{}\\{}%'.format(self.__hklm_branch, self.__remove_key_name)
|
||||
sync_branch = '{}\\{}%'.format(self.__hklm_branch, self.__sync_key_name)
|
||||
self.fulcmd = list()
|
||||
self.fulcmd.append('/usr/libexec/gpupdate/pkcon_runner')
|
||||
self.fulcmd.append('--loglevel')
|
||||
logger = logging.getLogger()
|
||||
self.fulcmd.append(str(logger.level))
|
||||
self.install_packages_setting = self.storage.filter_hklm_entries(install_branch)
|
||||
self.remove_packages_setting = self.storage.filter_hklm_entries(remove_branch)
|
||||
self.sync_packages_setting = self.storage.filter_hklm_entries(sync_branch)
|
||||
self.flagSync = True
|
||||
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
def run(self):
|
||||
for flag in self.sync_packages_setting:
|
||||
self.flagSync = bool(flag.data)
|
||||
|
||||
if 0 < self.install_packages_setting.count() or 0 < self.remove_packages_setting.count():
|
||||
if self.flagSync:
|
||||
try:
|
||||
subprocess.check_call(self.fulcmd)
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['msg'] = str(exc)
|
||||
log('E55', logdata)
|
||||
else:
|
||||
try:
|
||||
subprocess.Popen(self.fulcmd,close_fds=False)
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['msg'] = str(exc)
|
||||
log('E61', logdata)
|
||||
|
||||
def apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D138')
|
||||
self.run()
|
||||
else:
|
||||
log('D139')
|
||||
|
||||
pass
|
||||
|
||||
class package_applier_user(applier_frontend):
|
||||
__module_name = 'PackagesApplierUser'
|
||||
__module_experimental = True
|
||||
__module_enabled = False
|
||||
__install_key_name = 'Install'
|
||||
__remove_key_name = 'Remove'
|
||||
__sync_key_name = 'Sync'
|
||||
__hkcu_branch = 'Software\\BaseALT\\Policies\\Packages'
|
||||
|
||||
def __init__(self, storage, sid, username):
|
||||
self.storage = storage
|
||||
self.sid = sid
|
||||
self.username = username
|
||||
self.fulcmd = list()
|
||||
self.fulcmd.append('/usr/libexec/gpupdate/pkcon_runner')
|
||||
self.fulcmd.append('--user')
|
||||
self.fulcmd.append(self.username)
|
||||
self.fulcmd.append('--loglevel')
|
||||
logger = logging.getLogger()
|
||||
self.fulcmd.append(str(logger.level))
|
||||
|
||||
install_branch = '{}\\{}%'.format(self.__hkcu_branch, self.__install_key_name)
|
||||
remove_branch = '{}\\{}%'.format(self.__hkcu_branch, self.__remove_key_name)
|
||||
sync_branch = '{}\\{}%'.format(self.__hkcu_branch, self.__sync_key_name)
|
||||
|
||||
self.install_packages_setting = self.storage.filter_hkcu_entries(self.sid, install_branch)
|
||||
self.remove_packages_setting = self.storage.filter_hkcu_entries(self.sid, remove_branch)
|
||||
self.sync_packages_setting = self.storage.filter_hkcu_entries(self.sid, sync_branch)
|
||||
self.flagSync = False
|
||||
|
||||
self.__module_enabled = check_enabled(self.storage, self.__module_name, self.__module_experimental)
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def user_context_apply(self):
|
||||
'''
|
||||
@ -126,35 +38,10 @@ class package_applier_user(applier_frontend):
|
||||
'''
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
for flag in self.sync_packages_setting:
|
||||
if flag.data:
|
||||
self.flagSync = bool(int(flag.data))
|
||||
|
||||
if 0 < self.install_packages_setting.count() or 0 < self.remove_packages_setting.count():
|
||||
if self.flagSync:
|
||||
try:
|
||||
subprocess.check_call(self.fulcmd)
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['msg'] = str(exc)
|
||||
log('E60', logdata)
|
||||
else:
|
||||
try:
|
||||
subprocess.Popen(self.fulcmd,close_fds=False)
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['msg'] = str(exc)
|
||||
log('E62', logdata)
|
||||
|
||||
def admin_context_apply(self):
|
||||
'''
|
||||
Install software assigned to specified username regardless
|
||||
which computer he uses to log into system.
|
||||
'''
|
||||
if self.__module_enabled:
|
||||
log('D140')
|
||||
self.run()
|
||||
else:
|
||||
log('D141')
|
||||
pass
|
||||
|
||||
|
@ -16,165 +16,36 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from .applier_frontend import (
|
||||
applier_frontend
|
||||
, check_enabled
|
||||
, check_windows_mapping_enabled
|
||||
)
|
||||
from .applier_frontend import applier_frontend
|
||||
from .appliers.polkit import polkit
|
||||
from util.logging import log
|
||||
from util.logging import slogm
|
||||
|
||||
import logging
|
||||
|
||||
class polkit_applier(applier_frontend):
|
||||
__module_name = 'PolkitApplier'
|
||||
__module_experimental = False
|
||||
__module_enabled = True
|
||||
__deny_all_win = 'Software\\Policies\\Microsoft\\Windows\\RemovableStorageDevices\\Deny_All'
|
||||
__registry_branch = 'Software\\BaseALT\\Policies\\Polkit\\'
|
||||
__registry_locks_branch = 'Software\\BaseALT\\Policies\\PolkitLocks\\'
|
||||
__deny_all = 'Software\\Policies\\Microsoft\\Windows\\RemovableStorageDevices\\Deny_All'
|
||||
__polkit_map = {
|
||||
__deny_all_win: ['49-gpoa_disk_permissions', { 'Deny_All': 0 }],
|
||||
__registry_branch : ['49-alt_group_policy_permissions', {}],
|
||||
__registry_locks_branch : ['47-alt_group_policy_permissions', {}]
|
||||
__deny_all: ['99-gpoa_disk_permissions', { 'Deny_All': 0 }]
|
||||
}
|
||||
|
||||
def __init__(self, storage):
|
||||
self.storage = storage
|
||||
deny_all_win = None
|
||||
if check_windows_mapping_enabled(self.storage):
|
||||
deny_all_win = storage.filter_hklm_entries(self.__deny_all_win).first()
|
||||
deny_all = storage.filter_hklm_entries(self.__deny_all).first()
|
||||
# Deny_All hook: initialize defaults
|
||||
polkit_filter = '{}%'.format(self.__registry_branch)
|
||||
polkit_locks_filter = '{}%'.format(self.__registry_locks_branch)
|
||||
self.polkit_keys = self.storage.filter_hklm_entries(polkit_filter)
|
||||
self.polkit_locks = self.storage.filter_hklm_entries(polkit_locks_filter)
|
||||
template_file = self.__polkit_map[self.__deny_all_win][0]
|
||||
template_vars = self.__polkit_map[self.__deny_all_win][1]
|
||||
template_file_all = self.__polkit_map[self.__registry_branch][0]
|
||||
template_vars_all = self.__polkit_map[self.__registry_branch][1]
|
||||
template_file_all_lock = self.__polkit_map[self.__registry_locks_branch][0]
|
||||
template_vars_all_lock = self.__polkit_map[self.__registry_locks_branch][1]
|
||||
locks = list()
|
||||
for lock in self.polkit_locks:
|
||||
if bool(int(lock.data)):
|
||||
locks.append(lock.valuename)
|
||||
|
||||
dict_lists_rules = {'No': [[], []],
|
||||
'Yes': [[], []],
|
||||
'Auth_self' : [[], []],
|
||||
'Auth_admin': [[], []],
|
||||
'Auth_self_keep': [[], []],
|
||||
'Auth_admin_keep': [[], []]}
|
||||
|
||||
check_and_add_to_list = (lambda it, act: dict_lists_rules[act][0].append(it.valuename)
|
||||
if it.valuename not in locks
|
||||
else dict_lists_rules[act][1].append(it.valuename))
|
||||
|
||||
for it_data in self.polkit_keys:
|
||||
check_and_add_to_list(it_data, it_data.data)
|
||||
|
||||
for key, item in dict_lists_rules.items():
|
||||
self.__polkit_map[self.__registry_branch][1][key] = item[0]
|
||||
self.__polkit_map[self.__registry_locks_branch][1][key] = item[1]
|
||||
|
||||
if deny_all_win:
|
||||
logdata = dict()
|
||||
logdata['Deny_All_win'] = deny_all_win.data
|
||||
log('D69', logdata)
|
||||
self.__polkit_map[self.__deny_all_win][1]['Deny_All'] = deny_all_win.data
|
||||
template_file = self.__polkit_map[self.__deny_all][0]
|
||||
template_vars = self.__polkit_map[self.__deny_all][1]
|
||||
if deny_all:
|
||||
logging.debug(slogm('Deny_All setting found: {}'.format(deny_all.data)))
|
||||
self.__polkit_map[self.__deny_all][1]['Deny_All'] = deny_all.data
|
||||
else:
|
||||
log('D71')
|
||||
logging.debug(slogm('Deny_All setting not found'))
|
||||
self.policies = []
|
||||
self.policies.append(polkit(template_file, template_vars))
|
||||
self.policies.append(polkit(template_file_all, template_vars_all))
|
||||
self.policies.append(polkit(template_file_all_lock, template_vars_all_lock))
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
|
||||
def apply(self):
|
||||
'''
|
||||
Trigger control facility invocation.
|
||||
'''
|
||||
if self.__module_enabled:
|
||||
log('D73')
|
||||
for policy in self.policies:
|
||||
policy.generate()
|
||||
else:
|
||||
log('D75')
|
||||
|
||||
class polkit_applier_user(applier_frontend):
|
||||
__module_name = 'PolkitApplierUser'
|
||||
__module_experimental = False
|
||||
__module_enabled = True
|
||||
__deny_all_win = 'Software\\Policies\\Microsoft\\Windows\\RemovableStorageDevices\\Deny_All'
|
||||
__registry_branch = 'Software\\BaseALT\\Policies\\Polkit\\'
|
||||
__polkit_map = {
|
||||
__deny_all_win: ['48-gpoa_disk_permissions_user', { 'Deny_All': 0, 'User': '' }],
|
||||
__registry_branch : ['48-alt_group_policy_permissions_user', {'User': ''}]
|
||||
}
|
||||
|
||||
def __init__(self, storage, sid, username):
|
||||
self.storage = storage
|
||||
self.sid = sid
|
||||
self.username = username
|
||||
deny_all_win = None
|
||||
if check_windows_mapping_enabled(self.storage):
|
||||
deny_all_win = storage.filter_hkcu_entries(self.sid, self.__deny_all_win).first()
|
||||
polkit_filter = '{}%'.format(self.__registry_branch)
|
||||
self.polkit_keys = self.storage.filter_hkcu_entries(self.sid, polkit_filter)
|
||||
# Deny_All hook: initialize defaults
|
||||
template_file = self.__polkit_map[self.__deny_all_win][0]
|
||||
template_vars = self.__polkit_map[self.__deny_all_win][1]
|
||||
template_file_all = self.__polkit_map[self.__registry_branch][0]
|
||||
template_vars_all = self.__polkit_map[self.__registry_branch][1]
|
||||
|
||||
dict_lists_rules = {'No': [],
|
||||
'Yes': [],
|
||||
'Auth_self': [],
|
||||
'Auth_admin': [],
|
||||
'Auth_self_keep': [],
|
||||
'Auth_admin_keep': []}
|
||||
|
||||
for it_data in self.polkit_keys:
|
||||
dict_lists_rules[it_data.data].append(it_data.valuename)
|
||||
|
||||
self.__polkit_map[self.__registry_branch][1]['User'] = self.username
|
||||
|
||||
for key, item in dict_lists_rules.items():
|
||||
self.__polkit_map[self.__registry_branch][1][key] = item
|
||||
|
||||
if deny_all_win:
|
||||
logdata = dict()
|
||||
logdata['user'] = self.username
|
||||
logdata['Deny_All_win'] = deny_all_win.data
|
||||
log('D70', logdata)
|
||||
self.__polkit_map[self.__deny_all_win][1]['Deny_All'] = deny_all_win.data
|
||||
self.__polkit_map[self.__deny_all_win][1]['User'] = self.username
|
||||
else:
|
||||
log('D72')
|
||||
self.policies = []
|
||||
self.policies.append(polkit(template_file, template_vars, self.username))
|
||||
self.policies.append(polkit(template_file_all, template_vars_all, self.username))
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
|
||||
def user_context_apply(self):
|
||||
pass
|
||||
|
||||
def admin_context_apply(self):
|
||||
'''
|
||||
Trigger control facility invocation.
|
||||
'''
|
||||
if self.__module_enabled:
|
||||
log('D74')
|
||||
for policy in self.policies:
|
||||
policy.generate()
|
||||
else:
|
||||
log('D76')
|
||||
|
||||
for policy in self.policies:
|
||||
policy.generate()
|
||||
|
||||
|
@ -1,157 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2022 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
from util.logging import log
|
||||
from .appliers.folder import remove_dir_tree
|
||||
from .applier_frontend import (
|
||||
applier_frontend
|
||||
, check_enabled
|
||||
)
|
||||
|
||||
class scripts_applier(applier_frontend):
|
||||
__module_name = 'ScriptsApplier'
|
||||
__module_experimental = True
|
||||
__module_enabled = False
|
||||
__cache_scripts = '/var/cache/gpupdate_scripts_cache/machine/'
|
||||
|
||||
def __init__(self, storage, sid):
|
||||
self.storage = storage
|
||||
self.sid = sid
|
||||
self.startup_scripts = self.storage.get_scripts(self.sid, 'STARTUP')
|
||||
self.shutdown_scripts = self.storage.get_scripts(self.sid, 'SHUTDOWN')
|
||||
self.folder_path = Path(self.__cache_scripts)
|
||||
self.__module_enabled = check_enabled(self.storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
|
||||
def cleaning_cache(self):
|
||||
log('D160')
|
||||
try:
|
||||
remove_dir_tree(self.folder_path, True, True, True,)
|
||||
except FileNotFoundError as exc:
|
||||
log('D154')
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['exc'] = exc
|
||||
log('E64', logdata)
|
||||
|
||||
def filling_cache(self):
|
||||
'''
|
||||
Creating and updating folder directories for scripts and copying them
|
||||
'''
|
||||
self.folder_path.mkdir(parents=True, exist_ok=True)
|
||||
for ts in self.startup_scripts:
|
||||
script_path = os.path.join(self.__cache_scripts, 'STARTUP')
|
||||
install_script(ts, script_path, '700')
|
||||
for ts in self.shutdown_scripts:
|
||||
script_path = os.path.join(self.__cache_scripts, 'SHUTDOWN')
|
||||
install_script(ts, script_path, '700')
|
||||
|
||||
def run(self):
|
||||
self.filling_cache()
|
||||
|
||||
def apply(self):
|
||||
self.cleaning_cache()
|
||||
if self.__module_enabled:
|
||||
log('D156')
|
||||
self.run()
|
||||
else:
|
||||
log('D157')
|
||||
|
||||
class scripts_applier_user(applier_frontend):
|
||||
__module_name = 'ScriptsApplierUser'
|
||||
__module_experimental = True
|
||||
__module_enabled = False
|
||||
__cache_scripts = '/var/cache/gpupdate_scripts_cache/users/'
|
||||
|
||||
def __init__(self, storage, sid, username):
|
||||
self.storage = storage
|
||||
self.sid = sid
|
||||
self.logon_scripts = self.storage.get_scripts(self.sid, 'LOGON')
|
||||
self.logoff_scripts = self.storage.get_scripts(self.sid, 'LOGOFF')
|
||||
self.username = username
|
||||
self.folder_path = Path(self.__cache_scripts + self.username)
|
||||
self.__module_enabled = check_enabled(self.storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
self.filling_cache()
|
||||
|
||||
def cleaning_cache(self):
|
||||
log('D161')
|
||||
try:
|
||||
remove_dir_tree(self.folder_path, True, True, True,)
|
||||
except FileNotFoundError as exc:
|
||||
log('D155')
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['exc'] = exc
|
||||
log('E65', logdata)
|
||||
|
||||
def filling_cache(self):
|
||||
'''
|
||||
Creating and updating folder directories for scripts and copying them
|
||||
'''
|
||||
self.folder_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for ts in self.logon_scripts:
|
||||
script_path = os.path.join(self.__cache_scripts, self.username, 'LOGON')
|
||||
install_script(ts, script_path, '755')
|
||||
for ts in self.logoff_scripts:
|
||||
script_path = os.path.join(self.__cache_scripts, self.username, 'LOGOFF')
|
||||
install_script(ts, script_path, '755')
|
||||
|
||||
def user_context_apply(self):
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
self.filling_cache()
|
||||
|
||||
def admin_context_apply(self):
|
||||
self.cleaning_cache()
|
||||
if self.__module_enabled:
|
||||
log('D158')
|
||||
self.run()
|
||||
else:
|
||||
log('D159')
|
||||
|
||||
def install_script(storage_script_entry, script_dir, access_permissions):
|
||||
'''
|
||||
Copy scripts to specific directories and
|
||||
if given arguments
|
||||
create directories for them and copy them there
|
||||
'''
|
||||
dir_cr = Path(script_dir)
|
||||
dir_cr.mkdir(parents=True, exist_ok=True)
|
||||
script_name = str(int(storage_script_entry.number)).zfill(5) + '_' + os.path.basename(storage_script_entry.path)
|
||||
script_file = os.path.join(script_dir, script_name)
|
||||
shutil.copyfile(storage_script_entry.path, script_file)
|
||||
|
||||
os.chmod(script_file, int(access_permissions, base = 8))
|
||||
if storage_script_entry.args:
|
||||
dir_path = script_dir + '/' + script_name + '.arg'
|
||||
dir_arg = Path(dir_path)
|
||||
dir_arg.mkdir(parents=True, exist_ok=True)
|
||||
file_arg = open(dir_path + '/arg', 'w')
|
||||
file_arg.write(storage_script_entry.args)
|
||||
file_arg.close()
|
@ -19,46 +19,35 @@
|
||||
import logging
|
||||
import subprocess
|
||||
|
||||
from .applier_frontend import (
|
||||
applier_frontend
|
||||
, check_enabled
|
||||
)
|
||||
from .applier_frontend import applier_frontend
|
||||
from gpt.shortcuts import json2sc
|
||||
from util.windows import expand_windows_var
|
||||
from util.logging import slogm, log
|
||||
from util.logging import slogm
|
||||
from util.util import (
|
||||
get_homedir,
|
||||
homedir_exists
|
||||
)
|
||||
|
||||
def storage_get_shortcuts(storage, sid, username=None):
|
||||
def storage_get_shortcuts(storage, sid):
|
||||
'''
|
||||
Query storage for shortcuts' rows for specified SID.
|
||||
'''
|
||||
shortcut_objs = storage.get_shortcuts(sid)
|
||||
shortcuts = list()
|
||||
|
||||
for sc in shortcut_objs:
|
||||
if username:
|
||||
sc.set_expanded_path(expand_windows_var(sc.path, username))
|
||||
for sc_obj in shortcut_objs:
|
||||
sc = json2sc(sc_obj.shortcut)
|
||||
shortcuts.append(sc)
|
||||
|
||||
return shortcuts
|
||||
|
||||
def apply_shortcut(shortcut, username=None):
|
||||
def write_shortcut(shortcut, username=None):
|
||||
'''
|
||||
Apply the single shortcut file to the disk.
|
||||
Write the single shortcut file to the disk.
|
||||
|
||||
:username: None means working with machine variables and paths
|
||||
'''
|
||||
dest_abspath = shortcut.dest
|
||||
if not dest_abspath.startswith('/') and not dest_abspath.startswith('%'):
|
||||
dest_abspath = '%HOME%/' + dest_abspath
|
||||
logdata = dict()
|
||||
logdata['shortcut'] = dest_abspath
|
||||
logdata['for'] = username
|
||||
log('D105', logdata)
|
||||
dest_abspath = expand_windows_var(dest_abspath, username).replace('\\', '/') + '.desktop'
|
||||
dest_abspath = expand_windows_var(shortcut.dest, username).replace('\\', '/') + '.desktop'
|
||||
|
||||
# Check that we're working for user, not on global system level
|
||||
if username:
|
||||
@ -67,106 +56,52 @@ def apply_shortcut(shortcut, username=None):
|
||||
if dest_abspath.startswith(get_homedir(username)):
|
||||
# Don't try to operate on non-existent directory
|
||||
if not homedir_exists(username):
|
||||
logdata = dict()
|
||||
logdata['user'] = username
|
||||
logdata['dest_abspath'] = dest_abspath
|
||||
log('W7', logdata)
|
||||
logging.warning(slogm('No home directory exists for user {}: will not create link {}'.format(username, dest_abspath)))
|
||||
return None
|
||||
else:
|
||||
logdata = dict()
|
||||
logdata['user'] = username
|
||||
logdata['bad path'] = dest_abspath
|
||||
log('W8', logdata)
|
||||
return None
|
||||
|
||||
if '%' in dest_abspath:
|
||||
logdata = dict()
|
||||
logdata['dest_abspath'] = dest_abspath
|
||||
log('E53', logdata)
|
||||
return None
|
||||
|
||||
if not dest_abspath.startswith('/'):
|
||||
logdata = dict()
|
||||
logdata['dest_abspath'] = dest_abspath
|
||||
log('E54', logdata)
|
||||
return None
|
||||
logdata = dict()
|
||||
logdata['file'] = dest_abspath
|
||||
logdata['with_action'] = shortcut.action
|
||||
log('D106', logdata)
|
||||
shortcut.apply_desktop(dest_abspath)
|
||||
logging.debug(slogm('Writing shortcut file to {}'.format(dest_abspath)))
|
||||
shortcut.write_desktop(dest_abspath)
|
||||
|
||||
class shortcut_applier(applier_frontend):
|
||||
__module_name = 'ShortcutsApplier'
|
||||
__module_experimental = False
|
||||
__module_enabled = True
|
||||
|
||||
def __init__(self, storage):
|
||||
self.storage = storage
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
|
||||
def run(self):
|
||||
def apply(self):
|
||||
shortcuts = storage_get_shortcuts(self.storage, self.storage.get_info('machine_sid'))
|
||||
if shortcuts:
|
||||
for sc in shortcuts:
|
||||
apply_shortcut(sc)
|
||||
if len(shortcuts) > 0:
|
||||
# According to ArchWiki - this thing is needed to rebuild MIME
|
||||
# type cache in order file bindings to work. This rebuilds
|
||||
# databases located in /usr/share/applications and
|
||||
# /usr/local/share/applications
|
||||
subprocess.check_call(['/usr/bin/update-desktop-database'])
|
||||
write_shortcut(sc)
|
||||
else:
|
||||
logdata = dict()
|
||||
logdata['machine_sid'] = self.storage.get_info('machine_sid')
|
||||
log('D100', logdata)
|
||||
|
||||
def apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D98')
|
||||
self.run()
|
||||
else:
|
||||
log('D99')
|
||||
logging.debug(slogm('No shortcuts to process for {}'.format(self.storage.get_info('machine_sid'))))
|
||||
# According to ArchWiki - this thing is needed to rebuild MIME
|
||||
# type cache in order file bindings to work. This rebuilds
|
||||
# databases located in /usr/share/applications and
|
||||
# /usr/local/share/applications
|
||||
subprocess.check_call(['/usr/bin/update-desktop-database'])
|
||||
|
||||
class shortcut_applier_user(applier_frontend):
|
||||
__module_name = 'ShortcutsApplierUser'
|
||||
__module_experimental = False
|
||||
__module_enabled = True
|
||||
|
||||
def __init__(self, storage, sid, username):
|
||||
self.storage = storage
|
||||
self.sid = sid
|
||||
self.username = username
|
||||
|
||||
def run(self, in_usercontext):
|
||||
shortcuts = storage_get_shortcuts(self.storage, self.sid, self.username)
|
||||
def user_context_apply(self):
|
||||
shortcuts = storage_get_shortcuts(self.storage, self.sid)
|
||||
|
||||
if shortcuts:
|
||||
for sc in shortcuts:
|
||||
if in_usercontext and sc.is_usercontext():
|
||||
apply_shortcut(sc, self.username)
|
||||
if not in_usercontext and not sc.is_usercontext():
|
||||
apply_shortcut(sc, self.username)
|
||||
if sc.is_usercontext():
|
||||
write_shortcut(sc, self.username)
|
||||
else:
|
||||
logdata = dict()
|
||||
logdata['sid'] = self.sid
|
||||
log('D100', logdata)
|
||||
|
||||
def user_context_apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D101')
|
||||
self.run(True)
|
||||
else:
|
||||
log('D102')
|
||||
logging.debug(slogm('No shortcuts to process for {}'.format(self.sid)))
|
||||
|
||||
def admin_context_apply(self):
|
||||
if self.__module_enabled:
|
||||
log('D103')
|
||||
self.run(False)
|
||||
else:
|
||||
log('D104')
|
||||
shortcuts = storage_get_shortcuts(self.storage, self.sid)
|
||||
|
||||
if shortcuts:
|
||||
for sc in shortcuts:
|
||||
if not sc.is_usercontext():
|
||||
write_shortcut(sc, self.username)
|
||||
else:
|
||||
logging.debug(slogm('No shortcuts to process for {}'.format(self.sid)))
|
||||
|
||||
|
@ -16,67 +16,39 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from .applier_frontend import (
|
||||
applier_frontend
|
||||
, check_enabled
|
||||
)
|
||||
from .applier_frontend import applier_frontend
|
||||
from .appliers.systemd import systemd_unit
|
||||
from util.logging import slogm, log
|
||||
from util.logging import slogm
|
||||
|
||||
import logging
|
||||
|
||||
class systemd_applier(applier_frontend):
|
||||
__module_name = 'SystemdApplier'
|
||||
__module_experimental = False
|
||||
__module_enabled = True
|
||||
__registry_branch = 'Software/BaseALT/Policies/SystemdUnits'
|
||||
__registry_branch = 'Software\\BaseALT\\Policies\\SystemdUnits'
|
||||
|
||||
def __init__(self, storage):
|
||||
self.storage = storage
|
||||
self.systemd_unit_settings = self.storage.filter_hklm_entries(self.__registry_branch)
|
||||
self.systemd_unit_settings = self.storage.filter_hklm_entries('Software\\BaseALT\\Policies\\SystemdUnits%')
|
||||
self.units = []
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
|
||||
def run(self):
|
||||
for setting in self.systemd_unit_settings:
|
||||
valuename = setting.hive_key.rpartition('/')[2]
|
||||
try:
|
||||
self.units.append(systemd_unit(valuename, int(setting.data)))
|
||||
logdata = dict()
|
||||
logdata['unit'] = format(valuename)
|
||||
log('I4', logdata)
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['unit'] = format(valuename)
|
||||
logdata['exc'] = exc
|
||||
log('I5', logdata)
|
||||
for unit in self.units:
|
||||
try:
|
||||
unit.apply()
|
||||
except:
|
||||
logdata = dict()
|
||||
logdata['unit'] = unit.unit_name
|
||||
log('E45', logdata)
|
||||
|
||||
def apply(self):
|
||||
'''
|
||||
Trigger control facility invocation.
|
||||
'''
|
||||
if self.__module_enabled:
|
||||
log('D78')
|
||||
self.run()
|
||||
else:
|
||||
log('D79')
|
||||
for setting in self.systemd_unit_settings:
|
||||
valuename = setting.hive_key.rpartition('\\')[2]
|
||||
try:
|
||||
self.units.append(systemd_unit(valuename, int(setting.data)))
|
||||
logging.info(slogm('Working with systemd unit {}'.format(valuename)))
|
||||
except Exception as exc:
|
||||
logging.info(slogm('Unable to work with systemd unit {}: {}'.format(valuename, exc)))
|
||||
for unit in self.units:
|
||||
try:
|
||||
unit.apply()
|
||||
except:
|
||||
logging.error(slogm('Failed applying unit {}'.format(unit.unit_name)))
|
||||
|
||||
class systemd_applier_user(applier_frontend):
|
||||
__module_name = 'SystemdApplierUser'
|
||||
__module_experimental = False
|
||||
__module_enabled = True
|
||||
__registry_branch = 'Software/BaseALT/Policies/SystemdUnits'
|
||||
__registry_branch = 'Software\\BaseALT\\Policies\\SystemdUnits'
|
||||
|
||||
def __init__(self, storage, sid, username):
|
||||
self.storage = storage
|
||||
|
@ -1,198 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2022 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from .applier_frontend import (
|
||||
applier_frontend
|
||||
, check_enabled
|
||||
)
|
||||
|
||||
import json
|
||||
import os
|
||||
from util.logging import log
|
||||
from util.util import is_machine_name, string_to_literal_eval
|
||||
|
||||
class yandex_browser_applier(applier_frontend):
|
||||
__module_name = 'YandexBrowserApplier'
|
||||
__module_enabled = True
|
||||
__module_experimental = False
|
||||
__registry_branch = 'Software/Policies/YandexBrowser'
|
||||
__managed_policies_path = '/etc/opt/yandex/browser/policies/managed'
|
||||
__recommended_policies_path = '/etc/opt/yandex/browser/policies/recommended'
|
||||
|
||||
def __init__(self, storage, sid, username):
|
||||
self.storage = storage
|
||||
self.sid = sid
|
||||
self.username = username
|
||||
self._is_machine_name = is_machine_name(self.username)
|
||||
yandex_filter = '{}%'.format(self.__registry_branch)
|
||||
self.yandex_keys = self.storage.filter_hklm_entries(yandex_filter)
|
||||
|
||||
self.policies_json = dict()
|
||||
|
||||
self.__module_enabled = check_enabled(
|
||||
self.storage
|
||||
, self.__module_name
|
||||
, self.__module_experimental
|
||||
)
|
||||
|
||||
def machine_apply(self):
|
||||
'''
|
||||
Apply machine settings.
|
||||
'''
|
||||
|
||||
destfile = os.path.join(self.__managed_policies_path, 'policies.json')
|
||||
|
||||
try:
|
||||
recommended__json = self.policies_json.pop('Recommended')
|
||||
except:
|
||||
recommended__json = {}
|
||||
|
||||
#Replacing all nested dictionaries with a list
|
||||
dict_item_to_list = (
|
||||
lambda target_dict :
|
||||
{key:[*val.values()] if type(val) == dict else string_to_literal_eval(val) for key,val in target_dict.items()}
|
||||
)
|
||||
os.makedirs(self.__managed_policies_path, exist_ok=True)
|
||||
with open(destfile, 'w') as f:
|
||||
json.dump(dict_item_to_list(self.policies_json), f)
|
||||
logdata = dict()
|
||||
logdata['destfile'] = destfile
|
||||
log('D185', logdata)
|
||||
|
||||
destfilerec = os.path.join(self.__recommended_policies_path, 'policies.json')
|
||||
os.makedirs(self.__recommended_policies_path, exist_ok=True)
|
||||
with open(destfilerec, 'w') as f:
|
||||
json.dump(dict_item_to_list(recommended__json), f)
|
||||
logdata = dict()
|
||||
logdata['destfilerec'] = destfilerec
|
||||
log('D185', logdata)
|
||||
|
||||
|
||||
def apply(self):
|
||||
'''
|
||||
All actual job done here.
|
||||
'''
|
||||
if self.__module_enabled:
|
||||
log('D183')
|
||||
self.create_dict(self.yandex_keys)
|
||||
self.machine_apply()
|
||||
else:
|
||||
log('D184')
|
||||
|
||||
def get_valuename_typeint(self):
|
||||
'''
|
||||
List of keys resulting from parsing chrome.admx with parsing_chrom_admx_intvalues.py
|
||||
'''
|
||||
valuename_typeint = (['DefaultPageSaveSettings',
|
||||
'DefaultUploadSetting',
|
||||
'YandexAutoLaunchMode',
|
||||
'DefaultClipboardSetting',
|
||||
'DefaultFileSystemReadGuardSetting',
|
||||
'DefaultFileSystemWriteGuardSetting',
|
||||
'DefaultImagesSetting',
|
||||
'DefaultJavaScriptJitSetting',
|
||||
'DefaultJavaScriptSetting',
|
||||
'DefaultLocalFontsSetting',
|
||||
'DefaultPopupsSetting',
|
||||
'DefaultSensorsSetting',
|
||||
'DefaultSerialGuardSetting',
|
||||
'DefaultWebBluetoothGuardSetting',
|
||||
'DefaultWebHidGuardSetting',
|
||||
'DefaultWebUsbGuardSetting',
|
||||
'DefaultWindowManagementSetting',
|
||||
'SafeSitesFilterBehavior',
|
||||
'YandexUserFeedbackMode',
|
||||
'TurboSettings',
|
||||
'SidePanelMode',
|
||||
'RestoreOnStartup',
|
||||
'RestoreOnStartup_recommended',
|
||||
'BrowserSwitcherParsingMode',
|
||||
'DefaultNotificationsSetting',
|
||||
'YandexPowerSavingMode',
|
||||
'ChromeVariations',
|
||||
'DeveloperToolsAvailability',
|
||||
'DownloadRestrictions',
|
||||
'NetworkPredictionOptions',
|
||||
'DownloadRestrictions_recommended',
|
||||
'NetworkPredictionOptions_recommended',
|
||||
'DefaultCookiesSetting',
|
||||
'DefaultGeolocationSetting',
|
||||
'IncognitoModeAvailability',
|
||||
'DefaultPrintingSettings',
|
||||
'DefaultPluginsSetting',
|
||||
'DefaultInsecureContentSetting',
|
||||
'PasswordProtectionWarningTrigger',
|
||||
'SafeBrowsingProtectionLevel',
|
||||
'SafeBrowsingProtectionLevel_recommended',
|
||||
'DiskCacheSize'])
|
||||
return valuename_typeint
|
||||
|
||||
|
||||
def get_boolean(self,data):
|
||||
if data in ['0', 'false', None, 'none', 0]:
|
||||
return False
|
||||
if data in ['1', 'true', 1]:
|
||||
return True
|
||||
def get_parts(self, hivekeyname):
|
||||
'''
|
||||
Parse registry path string and leave key parameters
|
||||
'''
|
||||
parts = hivekeyname.replace(self.__registry_branch, '').split('/')
|
||||
return parts
|
||||
|
||||
|
||||
def create_dict(self, yandex_keys):
|
||||
'''
|
||||
Collect dictionaries from registry keys into a general dictionary
|
||||
'''
|
||||
counts = dict()
|
||||
#getting the list of keys to read as an integer
|
||||
valuename_typeint = self.get_valuename_typeint()
|
||||
for it_data in yandex_keys:
|
||||
branch = counts
|
||||
try:
|
||||
if type(it_data.data) is bytes:
|
||||
it_data.data = it_data.data.decode(encoding='utf-16').replace('\x00','')
|
||||
parts = self.get_parts(it_data.hive_key)
|
||||
#creating a nested dictionary from elements
|
||||
for part in parts[:-1]:
|
||||
branch = branch.setdefault(part, {})
|
||||
#dictionary key value initialization
|
||||
if it_data.type == 4:
|
||||
if it_data.valuename in valuename_typeint:
|
||||
branch[parts[-1]] = int(it_data.data)
|
||||
else:
|
||||
branch[parts[-1]] = self.get_boolean(it_data.data)
|
||||
else:
|
||||
if it_data.data[0] == '[' and it_data.data[-1] == ']':
|
||||
try:
|
||||
branch[parts[-1]] = json.loads(str(it_data.data))
|
||||
except:
|
||||
branch[parts[-1]] = str(it_data.data).replace('\\', '/')
|
||||
else:
|
||||
branch[parts[-1]] = str(it_data.data).replace('\\', '/')
|
||||
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['Exception'] = exc
|
||||
logdata['keyname'] = it_data.keyname
|
||||
log('D178', logdata)
|
||||
try:
|
||||
self.policies_json = counts['']
|
||||
except:
|
||||
self.policies_json = {}
|
91
gpoa/gpoa
91
gpoa/gpoa
@ -18,17 +18,16 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import gettext
|
||||
import locale
|
||||
|
||||
from backend import backend_factory, save_dconf
|
||||
from backend import backend_factory
|
||||
from frontend.frontend_manager import frontend_manager, determine_username
|
||||
from plugin import plugin_manager
|
||||
from messages import message_with_code
|
||||
|
||||
from util.util import get_machine_name
|
||||
from util.kerberos import machine_kinit
|
||||
from util.users import (
|
||||
is_root,
|
||||
get_process_user
|
||||
@ -36,8 +35,7 @@ from util.users import (
|
||||
from util.arguments import (
|
||||
set_loglevel
|
||||
)
|
||||
from util.logging import log
|
||||
from util.exceptions import geterr
|
||||
from util.logging import slogm
|
||||
from util.signals import signal_handler
|
||||
|
||||
def parse_arguments():
|
||||
@ -58,9 +56,6 @@ def parse_arguments():
|
||||
arguments.add_argument('--noplugins',
|
||||
action='store_true',
|
||||
help='Don\'t start plugins')
|
||||
arguments.add_argument('--list-backends',
|
||||
action='store_true',
|
||||
help='Show list of available backends')
|
||||
arguments.add_argument('--loglevel',
|
||||
type=int,
|
||||
default=4,
|
||||
@ -68,60 +63,34 @@ def parse_arguments():
|
||||
return arguments.parse_args()
|
||||
|
||||
class gpoa_controller:
|
||||
__kinit_successful = False
|
||||
__args = None
|
||||
|
||||
def __init__(self):
|
||||
self.__args = parse_arguments()
|
||||
self.is_machine = False
|
||||
self.noupdate = self.__args.noupdate
|
||||
set_loglevel(self.__args.loglevel)
|
||||
|
||||
locale.bindtextdomain('gpoa', '/usr/lib/python3/site-packages/gpoa/locale')
|
||||
gettext.bindtextdomain('gpoa', '/usr/lib/python3/site-packages/gpoa/locale')
|
||||
gettext.textdomain('gpoa')
|
||||
|
||||
if not self.__args.user:
|
||||
self.username = get_machine_name()
|
||||
user = get_machine_name()
|
||||
self.is_machine = True
|
||||
else:
|
||||
self.username = self.__args.user
|
||||
set_loglevel(self.__args.loglevel)
|
||||
self.__kinit_successful = machine_kinit()
|
||||
|
||||
uname = get_process_user()
|
||||
uid = os.getuid()
|
||||
logdata = dict()
|
||||
logdata['username'] = self.username
|
||||
logdata['is_machine'] = self.is_machine
|
||||
logdata['process_username'] = uname
|
||||
logdata['process_uid'] = uid
|
||||
|
||||
if self.is_machine:
|
||||
log('D61', logdata)
|
||||
else:
|
||||
log('D1', logdata)
|
||||
self.username = determine_username(self.username)
|
||||
logging.debug(slogm('The process was started for user {} with UID {}'.format(uname, uid), uid=uid))
|
||||
|
||||
if not is_root():
|
||||
self.noupdate = True
|
||||
|
||||
if self.is_machine:
|
||||
msgtext = message_with_code('E34')
|
||||
log('E34', {'username': self.username})
|
||||
raise Exception(msgtext)
|
||||
|
||||
log('D59', {'username': self.username})
|
||||
self.username = uname
|
||||
else:
|
||||
log('D60', {'username': self.username})
|
||||
self.username = determine_username(self.__args.user)
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
GPOA controller entry point
|
||||
'''
|
||||
if self.__args.list_backends:
|
||||
print('local')
|
||||
print('samba')
|
||||
return
|
||||
self.start_plugins()
|
||||
self.start_backend()
|
||||
self.start_frontend()
|
||||
|
||||
def start_backend(self):
|
||||
'''
|
||||
@ -132,34 +101,11 @@ class gpoa_controller:
|
||||
if self.__args.nodomain:
|
||||
nodomain = True
|
||||
|
||||
if not self.noupdate:
|
||||
if not self.__args.noupdate:
|
||||
if is_root():
|
||||
back = None
|
||||
try:
|
||||
back = backend_factory(dc, self.username, self.is_machine, nodomain)
|
||||
except Exception as exc:
|
||||
logdata = dict({'msg': str(exc)})
|
||||
einfo = geterr()
|
||||
print(einfo)
|
||||
print(type(einfo))
|
||||
#logdata.update(einfo)
|
||||
log('E12', logdata)
|
||||
back = backend_factory(dc, self.username, self.is_machine, nodomain)
|
||||
if back:
|
||||
try:
|
||||
back.retrieve_and_store()
|
||||
# Start frontend only on successful backend finish
|
||||
self.start_frontend()
|
||||
except Exception as exc:
|
||||
logdata = dict({'message': str(exc)})
|
||||
# In case we're handling "E3" - it means that
|
||||
# this is a very specific exception that was
|
||||
# not handled properly on lower levels of
|
||||
# code so we're also printing file name and
|
||||
# other information.
|
||||
einfo = geterr()
|
||||
logdata.update(einfo)
|
||||
log('E3', logdata)
|
||||
save_dconf(self.username, self.is_machine)
|
||||
back.retrieve_and_store()
|
||||
|
||||
def start_frontend(self):
|
||||
'''
|
||||
@ -169,11 +115,7 @@ class gpoa_controller:
|
||||
appl = frontend_manager(self.username, self.is_machine)
|
||||
appl.apply_parameters()
|
||||
except Exception as exc:
|
||||
logdata = dict({'message': str(exc)})
|
||||
einfo = geterr()
|
||||
#print(einfo)
|
||||
logdata.update(einfo)
|
||||
log('E4', logdata)
|
||||
logging.error(slogm('Error occured while running applier: {}'.format(exc)))
|
||||
|
||||
def start_plugins(self):
|
||||
'''
|
||||
@ -188,7 +130,6 @@ def main():
|
||||
controller.run()
|
||||
|
||||
if __name__ == "__main__":
|
||||
default_handler = signal.getsignal(signal.SIGINT)
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
main()
|
||||
|
||||
|
@ -22,13 +22,24 @@ from Crypto.Cipher import AES
|
||||
|
||||
from util.xml import get_xml_root
|
||||
|
||||
def read_drives(drives_file):
|
||||
drives = list()
|
||||
|
||||
for drive in get_xml_root(drives_file):
|
||||
drive_obj = drivemap()
|
||||
|
||||
props = drive.find('Properties')
|
||||
drive_obj.set_login(props.get('username'))
|
||||
drive_obj.set_pass(props.get('cpassword'))
|
||||
|
||||
drives.append(drive_obj)
|
||||
|
||||
return drives
|
||||
|
||||
def decrypt_pass(cpassword):
|
||||
'''
|
||||
AES key for cpassword decryption: http://msdn.microsoft.com/en-us/library/2c15cbf0-f086-4c74-8b70-1f2fa45dd4be%28v=PROT.13%29#endNote2
|
||||
'''
|
||||
if not cpassword:
|
||||
return cpassword
|
||||
|
||||
key = (
|
||||
b'\x4e\x99\x06\xe8'
|
||||
b'\xfc\xb6\x6c\xc9'
|
||||
@ -42,110 +53,23 @@ def decrypt_pass(cpassword):
|
||||
cpass_len = len(cpassword)
|
||||
padded_pass = (cpassword + "=" * ((4 - cpass_len % 4) % 4))
|
||||
password = b64decode(padded_pass)
|
||||
decrypter = AES.new(key, AES.MODE_CBC, '\x00' * 16)
|
||||
decrypter = AES(key, AES.MODE_CBC, '\x00' * 16)
|
||||
|
||||
# decrypt() returns byte array which is immutable and we need to
|
||||
# strip padding, then convert UTF-16LE to UTF-8
|
||||
binstr = decrypter.decrypt(password)
|
||||
by = list()
|
||||
for item in binstr:
|
||||
if item != 16:
|
||||
by.append(item)
|
||||
utf16str = bytes(by).decode('utf-16', 'ignore')
|
||||
utf8str = utf16str.encode('utf8')
|
||||
|
||||
return utf8str.decode()
|
||||
|
||||
def read_drives(drives_file):
|
||||
drives = list()
|
||||
|
||||
for drive in get_xml_root(drives_file):
|
||||
drive_obj = drivemap()
|
||||
|
||||
props = drive.find('Properties')
|
||||
drive_obj.set_login(props.get('username'))
|
||||
drive_obj.set_pass(decrypt_pass(props.get('cpassword')))
|
||||
drive_obj.set_dir(props.get('letter'))
|
||||
drive_obj.set_path(props.get('path'))
|
||||
drive_obj.set_action(props.get('action'))
|
||||
drive_obj.set_thisDrive(props.get('thisDrive'))
|
||||
drive_obj.set_allDrives(props.get('allDrives'))
|
||||
drive_obj.set_label(props.get('label'))
|
||||
drive_obj.set_persistent(props.get('persistent'))
|
||||
drive_obj.set_useLetter(props.get('useLetter'))
|
||||
|
||||
drives.append(drive_obj)
|
||||
|
||||
return drives
|
||||
|
||||
def merge_drives(storage, sid, drive_objects, policy_name):
|
||||
for drive in drive_objects:
|
||||
storage.add_drive(sid, drive, policy_name)
|
||||
|
||||
def json2drive(json_str):
|
||||
json_obj = json.loads(json_str)
|
||||
drive_obj = drivemap()
|
||||
|
||||
drive_obj.set_login(json_obj['login'])
|
||||
drive_obj.set_pass(json_obj['password'])
|
||||
drive_obj.set_dir(json_obj['dir'])
|
||||
drive_obj.set_path(json_obj['path'])
|
||||
|
||||
return drive_obj
|
||||
return decrypter.decrypt(password)
|
||||
|
||||
class drivemap:
|
||||
def __init__(self):
|
||||
self.login = None
|
||||
self.password = None
|
||||
self.dir = None
|
||||
self.path = None
|
||||
self.action = None
|
||||
self.thisDrive = None
|
||||
self.allDrives = None
|
||||
self.label = None
|
||||
self.persistent = None
|
||||
self.useLetter = None
|
||||
|
||||
def set_login(self, username):
|
||||
self.login = username
|
||||
if not username:
|
||||
self.login = ''
|
||||
|
||||
def set_pass(self, password):
|
||||
self.password = password
|
||||
if not password:
|
||||
self.password = ''
|
||||
|
||||
def set_dir(self, path):
|
||||
self.dir = path
|
||||
|
||||
def set_path(self, path):
|
||||
self.path = path
|
||||
|
||||
def set_action(self, action):
|
||||
self.action = action
|
||||
|
||||
def set_thisDrive(self, thisDrive):
|
||||
self.thisDrive = thisDrive
|
||||
|
||||
def set_allDrives(self, allDrives):
|
||||
self.allDrives = allDrives
|
||||
|
||||
def set_label(self, label):
|
||||
self.label = label
|
||||
|
||||
def set_persistent(self, persistent):
|
||||
self.persistent = persistent
|
||||
|
||||
def set_useLetter(self, useLetter):
|
||||
self.useLetter = useLetter
|
||||
|
||||
def to_json(self):
|
||||
drive = dict()
|
||||
drive['login'] = self.login
|
||||
drive['password'] = self.password
|
||||
drive['dir'] = self.dir
|
||||
drive['path'] = self.path
|
||||
|
||||
contents = dict()
|
||||
contents['drive'] = drive
|
||||
|
@ -18,48 +18,17 @@
|
||||
|
||||
from util.xml import get_xml_root
|
||||
|
||||
from enum import Enum
|
||||
|
||||
class FileAction(Enum):
|
||||
CREATE = 'C'
|
||||
REPLACE = 'R'
|
||||
UPDATE = 'U'
|
||||
DELETE = 'D'
|
||||
|
||||
|
||||
def action_letter2enum(letter):
|
||||
if letter in ['C', 'R', 'U', 'D']:
|
||||
if letter == 'C': return FileAction.CREATE
|
||||
if letter == 'R': return FileAction.REPLACE
|
||||
if letter == 'U': return FileAction.UPDATE
|
||||
if letter == 'D': return FileAction.DELETE
|
||||
|
||||
return FileAction.CREATE
|
||||
|
||||
def read_envvars(envvars_file):
|
||||
variables = list()
|
||||
|
||||
for var in get_xml_root(envvars_file):
|
||||
props = var.find('Properties')
|
||||
name = props.get('name')
|
||||
value = props.get('value')
|
||||
var_obj = envvar(name, value)
|
||||
var_obj.set_action(action_letter2enum(props.get('action', default='C')))
|
||||
var_obj = envvar()
|
||||
|
||||
variables.append(var_obj)
|
||||
|
||||
return variables
|
||||
|
||||
def merge_envvars(storage, sid, envvar_objects, policy_name):
|
||||
for envv in envvar_objects:
|
||||
storage.add_envvar(sid, envv, policy_name)
|
||||
|
||||
class envvar:
|
||||
def __init__(self, name, value):
|
||||
self.name = name
|
||||
self.value = value
|
||||
self.action = FileAction.CREATE
|
||||
|
||||
def set_action(self, action):
|
||||
self.action = action
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
|
@ -22,38 +22,13 @@ def read_files(filesxml):
|
||||
files = list()
|
||||
|
||||
for fil in get_xml_root(filesxml):
|
||||
props = fil.find('Properties')
|
||||
fil_obj = fileentry(props.get('fromPath'))
|
||||
fil_obj.set_action(props.get('action', default='C'))
|
||||
fil_obj.set_target_path(props.get('targetPath', default=None))
|
||||
fil_obj.set_read_only(props.get('readOnly', default=None))
|
||||
fil_obj.set_archive(props.get('archive', default=None))
|
||||
fil_obj.set_hidden(props.get('hidden', default=None))
|
||||
fil_obj.set_suppress(props.get('suppress', default=None))
|
||||
fil_obj.set_executable(props.get('executable', default=None))
|
||||
fil_obj = fileentry()
|
||||
|
||||
files.append(fil_obj)
|
||||
|
||||
return files
|
||||
|
||||
def merge_files(storage, sid, file_objects, policy_name):
|
||||
for fileobj in file_objects:
|
||||
storage.add_file(sid, fileobj, policy_name)
|
||||
|
||||
class fileentry:
|
||||
def __init__(self, fromPath):
|
||||
self.fromPath = fromPath
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def set_action(self, action):
|
||||
self.action = action
|
||||
def set_target_path(self, targetPath):
|
||||
self.targetPath = targetPath
|
||||
def set_read_only(self, readOnly):
|
||||
self.readOnly = readOnly
|
||||
def set_archive(self, archive):
|
||||
self.archive = archive
|
||||
def set_hidden(self, hidden):
|
||||
self.hidden = hidden
|
||||
def set_suppress(self, suppress):
|
||||
self.suppress = suppress
|
||||
def set_executable(self, executable):
|
||||
self.executable = executable
|
||||
|
@ -16,88 +16,19 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
from enum import Enum
|
||||
|
||||
|
||||
from util.xml import get_xml_root
|
||||
|
||||
|
||||
class FileAction(Enum):
|
||||
CREATE = 'C'
|
||||
REPLACE = 'R'
|
||||
UPDATE = 'U'
|
||||
DELETE = 'D'
|
||||
|
||||
|
||||
def action_letter2enum(letter):
|
||||
if letter in ['C', 'R', 'U', 'D']:
|
||||
if letter == 'C': return FileAction.CREATE
|
||||
if letter == 'R': return FileAction.REPLACE
|
||||
if letter == 'U': return FileAction.UPDATE
|
||||
if letter == 'D': return FileAction.DELETE
|
||||
|
||||
return FileAction.CREATE
|
||||
|
||||
|
||||
def action_enum2letter(enumitem):
|
||||
return enumitem.value
|
||||
|
||||
|
||||
def folder_int2bool(val):
|
||||
value = val
|
||||
|
||||
if type(value) == str:
|
||||
value = int(value)
|
||||
|
||||
if value == 1:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def read_folders(folders_file):
|
||||
folders = list()
|
||||
|
||||
for fld in get_xml_root(folders_file):
|
||||
props = fld.find('Properties')
|
||||
fld_obj = folderentry(props.get('path'))
|
||||
fld_obj.set_action(action_letter2enum(props.get('action', default='C')))
|
||||
fld_obj.set_delete_folder(folder_int2bool(props.get('deleteFolder', default=1)))
|
||||
fld_obj.set_delete_sub_folders(folder_int2bool(props.get('deleteSubFolders', default=1)))
|
||||
fld_obj.set_delete_files(folder_int2bool(props.get('deleteFiles', default=1)))
|
||||
fld_obj.set_hidden_folder(folder_int2bool(props.get('hidden', default=0)))
|
||||
fld_obj = folderentry()
|
||||
|
||||
folders.append(fld_obj)
|
||||
|
||||
|
||||
return folders
|
||||
|
||||
def merge_folders(storage, sid, folder_objects, policy_name):
|
||||
for folder in folder_objects:
|
||||
storage.add_folder(sid, folder, policy_name)
|
||||
|
||||
|
||||
class folderentry:
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
self.action = FileAction.CREATE
|
||||
self.delete_folder = False
|
||||
self.delete_sub_folders = False
|
||||
self.delete_files = False
|
||||
self.hidden_folder = False
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def set_action(self, action):
|
||||
self.action = action
|
||||
|
||||
def set_delete_folder(self, del_bool):
|
||||
self.delete_folder = del_bool
|
||||
|
||||
def set_delete_sub_folders(self, del_bool):
|
||||
self.delete_sub_folders = del_bool
|
||||
|
||||
def set_delete_files(self, del_bool):
|
||||
self.delete_files = del_bool
|
||||
|
||||
def set_hidden_folder(self, hid_bool):
|
||||
self.hidden_folder = hid_bool
|
434
gpoa/gpt/gpt.py
434
gpoa/gpt/gpt.py
@ -16,191 +16,57 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from enum import Enum, unique
|
||||
|
||||
from samba.gp_parse.gp_pol import GPPolParser
|
||||
|
||||
from storage import registry_factory
|
||||
|
||||
from .polfile import (
|
||||
read_polfile
|
||||
, merge_polfile
|
||||
)
|
||||
from .shortcuts import (
|
||||
read_shortcuts
|
||||
, merge_shortcuts
|
||||
)
|
||||
from .services import (
|
||||
read_services
|
||||
, merge_services
|
||||
)
|
||||
from .printers import (
|
||||
read_printers
|
||||
, merge_printers
|
||||
)
|
||||
from .inifiles import (
|
||||
read_inifiles
|
||||
, merge_inifiles
|
||||
)
|
||||
from .folders import (
|
||||
read_folders
|
||||
, merge_folders
|
||||
)
|
||||
from .files import (
|
||||
read_files
|
||||
, merge_files
|
||||
)
|
||||
from .envvars import (
|
||||
read_envvars
|
||||
, merge_envvars
|
||||
)
|
||||
from .drives import (
|
||||
read_drives
|
||||
, merge_drives
|
||||
)
|
||||
from .tasks import (
|
||||
read_tasks
|
||||
, merge_tasks
|
||||
)
|
||||
from .scriptsini import (
|
||||
read_scripts
|
||||
, merge_scripts
|
||||
)
|
||||
from .networkshares import (
|
||||
read_networkshares
|
||||
, merge_networkshares
|
||||
)
|
||||
from .shortcuts import read_shortcuts
|
||||
from .services import read_services
|
||||
from .printers import read_printers
|
||||
from .inifiles import read_inifiles
|
||||
from .folders import read_folders
|
||||
from .files import read_files
|
||||
from .envvars import read_envvars
|
||||
from .drives import read_drives
|
||||
import util
|
||||
import util.preg
|
||||
from util.paths import (
|
||||
local_policy_path,
|
||||
default_policy_path,
|
||||
cache_dir,
|
||||
local_policy_cache
|
||||
)
|
||||
from util.logging import log
|
||||
|
||||
|
||||
@unique
|
||||
class FileType(Enum):
|
||||
PREG = 'registry.pol'
|
||||
SHORTCUTS = 'shortcuts.xml'
|
||||
FOLDERS = 'folders.xml'
|
||||
FILES = 'files.xml'
|
||||
DRIVES = 'drives.xml'
|
||||
SCHEDULEDTASKS = 'scheduledtasks.xml'
|
||||
ENVIRONMENTVARIABLES = 'environmentvariables.xml'
|
||||
INIFILES = 'inifiles.xml'
|
||||
SERVICES = 'services.xml'
|
||||
PRINTERS = 'printers.xml'
|
||||
SCRIPTS = 'scripts.ini'
|
||||
NETWORKSHARES = 'networkshares.xml'
|
||||
|
||||
def get_preftype(path_to_file):
|
||||
fpath = Path(path_to_file)
|
||||
|
||||
if fpath.exists():
|
||||
file_name = fpath.name.lower()
|
||||
for item in FileType:
|
||||
if file_name == item.value:
|
||||
return item
|
||||
|
||||
return None
|
||||
|
||||
def pref_parsers():
|
||||
parsers = dict()
|
||||
|
||||
parsers[FileType.PREG] = read_polfile
|
||||
parsers[FileType.SHORTCUTS] = read_shortcuts
|
||||
parsers[FileType.FOLDERS] = read_folders
|
||||
parsers[FileType.FILES] = read_files
|
||||
parsers[FileType.DRIVES] = read_drives
|
||||
parsers[FileType.SCHEDULEDTASKS] = read_tasks
|
||||
parsers[FileType.ENVIRONMENTVARIABLES] = read_envvars
|
||||
parsers[FileType.INIFILES] = read_inifiles
|
||||
parsers[FileType.SERVICES] = read_services
|
||||
parsers[FileType.PRINTERS] = read_printers
|
||||
parsers[FileType.SCRIPTS] = read_scripts
|
||||
parsers[FileType.NETWORKSHARES] = read_networkshares
|
||||
|
||||
return parsers
|
||||
|
||||
def get_parser(preference_type):
|
||||
parsers = pref_parsers()
|
||||
return parsers[preference_type]
|
||||
|
||||
def pref_mergers():
|
||||
mergers = dict()
|
||||
|
||||
mergers[FileType.PREG] = merge_polfile
|
||||
mergers[FileType.SHORTCUTS] = merge_shortcuts
|
||||
mergers[FileType.FOLDERS] = merge_folders
|
||||
mergers[FileType.FILES] = merge_files
|
||||
mergers[FileType.DRIVES] = merge_drives
|
||||
mergers[FileType.SCHEDULEDTASKS] = merge_tasks
|
||||
mergers[FileType.ENVIRONMENTVARIABLES] = merge_envvars
|
||||
mergers[FileType.INIFILES] = merge_inifiles
|
||||
mergers[FileType.SERVICES] = merge_services
|
||||
mergers[FileType.PRINTERS] = merge_printers
|
||||
mergers[FileType.SCRIPTS] = merge_scripts
|
||||
mergers[FileType.NETWORKSHARES] = merge_networkshares
|
||||
|
||||
return mergers
|
||||
|
||||
def get_merger(preference_type):
|
||||
mergers = pref_mergers()
|
||||
return mergers[preference_type]
|
||||
from util.logging import slogm
|
||||
|
||||
class gpt:
|
||||
def __init__(self, gpt_path, sid, username='Machine', version=None):
|
||||
__user_policy_mode_key = 'Software\\Policies\\Microsoft\\Windows\\System\\UserPolicyMode'
|
||||
|
||||
def __init__(self, gpt_path, sid):
|
||||
self.path = gpt_path
|
||||
self.username = username
|
||||
self.sid = sid
|
||||
self.storage = registry_factory()
|
||||
self.storage._gpt_read_flag = True
|
||||
self.version = version
|
||||
self.name = ''
|
||||
self.storage = registry_factory('registry')
|
||||
self._scan_gpt()
|
||||
|
||||
def _scan_gpt(self):
|
||||
'''
|
||||
Collect the data from the specified GPT on file system (cached
|
||||
by Samba).
|
||||
'''
|
||||
self.guid = self.path.rpartition('/')[2]
|
||||
self.name = ''
|
||||
if 'default' == self.guid:
|
||||
self.guid = 'Local Policy'
|
||||
|
||||
self._machine_path = find_dir(self.path, 'Machine')
|
||||
self._user_path = find_dir(self.path, 'User')
|
||||
self._scripts_machine_path = find_dir(self._machine_path, 'Scripts')
|
||||
self._scripts_user_path = find_dir(self._user_path, 'Scripts')
|
||||
|
||||
self.settings_list = [
|
||||
'shortcuts'
|
||||
, 'drives'
|
||||
, 'environmentvariables'
|
||||
, 'printers'
|
||||
, 'folders'
|
||||
, 'files'
|
||||
, 'inifiles'
|
||||
, 'services'
|
||||
, 'scheduledtasks'
|
||||
, 'scripts'
|
||||
, 'networkshares'
|
||||
]
|
||||
self.settings = dict()
|
||||
self.settings['machine'] = dict()
|
||||
self.settings['user'] = dict()
|
||||
self.settings['machine']['regpol'] = find_file(self._machine_path, 'registry.pol')
|
||||
self.settings['user']['regpol'] = find_file(self._user_path, 'registry.pol')
|
||||
for setting in self.settings_list:
|
||||
machine_preffile = find_preffile(self._machine_path, setting)
|
||||
user_preffile = find_preffile(self._user_path, setting)
|
||||
mlogdata = dict({'setting': setting, 'prefpath': machine_preffile})
|
||||
log('D24', mlogdata)
|
||||
self.settings['machine'][setting] = machine_preffile
|
||||
ulogdata = dict({'setting': setting, 'prefpath': user_preffile})
|
||||
log('D23', ulogdata)
|
||||
self.settings['user'][setting] = user_preffile
|
||||
|
||||
self.settings['machine']['scripts'] = find_file(self._scripts_machine_path, 'scripts.ini')
|
||||
self.settings['user']['scripts'] = find_file(self._scripts_user_path, 'scripts.ini')
|
||||
self._machine_prefs = find_dir(self._machine_path, 'Preferences')
|
||||
self._user_prefs = find_dir(self._user_path, 'Preferences')
|
||||
|
||||
logging.debug(slogm('Looking for machine part of GPT {}'.format(self.guid)))
|
||||
self._find_machine()
|
||||
logging.debug(slogm('Looking for user part of GPT {}'.format(self.guid)))
|
||||
self._find_user()
|
||||
|
||||
def set_name(self, name):
|
||||
'''
|
||||
@ -208,61 +74,157 @@ class gpt:
|
||||
'''
|
||||
self.name = name
|
||||
|
||||
def merge_machine(self):
|
||||
def get_policy_mode(self):
|
||||
'''
|
||||
Merge machine settings to storage.
|
||||
Get UserPolicyMode parameter value in order to determine if it
|
||||
is possible to work with user's part of GPT. This value is
|
||||
checked only if working for user's SID.
|
||||
'''
|
||||
try:
|
||||
# Merge machine policies to registry if possible
|
||||
if self.settings['machine']['regpol']:
|
||||
mlogdata = dict({'polfile': self.settings['machine']['regpol']})
|
||||
log('D34', mlogdata)
|
||||
util.preg.merge_polfile(self.settings['machine']['regpol'], policy_name=self.name, version=self.version)
|
||||
# Merge machine preferences to registry if possible
|
||||
for preference_name, preference_path in self.settings['machine'].items():
|
||||
if preference_path:
|
||||
preference_type = get_preftype(preference_path)
|
||||
logdata = dict({'pref': preference_type.value, 'sid': self.sid})
|
||||
log('D28', logdata)
|
||||
preference_parser = get_parser(preference_type)
|
||||
preference_merger = get_merger(preference_type)
|
||||
preference_objects = preference_parser(preference_path)
|
||||
preference_merger(self.storage, self.sid, preference_objects, self.name)
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['gpt'] = self.name
|
||||
logdata['msg'] = str(exc)
|
||||
log('E28', logdata)
|
||||
upm = self.storage.get_hklm_entry(self.__user_policy_mode_key)
|
||||
if not upm:
|
||||
upm = 0
|
||||
upm = int(upm)
|
||||
if 0 > upm or 2 > upm:
|
||||
upm = 0
|
||||
|
||||
def merge_user(self):
|
||||
return upm
|
||||
|
||||
def _find_user(self):
|
||||
self._user_regpol = self._find_regpol('user')
|
||||
self._user_shortcuts = self._find_shortcuts('user')
|
||||
|
||||
def _find_machine(self):
|
||||
self._machine_regpol = self._find_regpol('machine')
|
||||
self._machine_shortcuts = self._find_shortcuts('machine')
|
||||
|
||||
def _find_regpol(self, part):
|
||||
'''
|
||||
Merge user settings to storage.
|
||||
Find Registry.pol files.
|
||||
'''
|
||||
try:
|
||||
# Merge user policies to registry if possible
|
||||
if self.settings['user']['regpol']:
|
||||
mulogdata = dict({'polfile': self.settings['user']['regpol']})
|
||||
log('D35', mulogdata)
|
||||
util.preg.merge_polfile(self.settings['user']['regpol'],
|
||||
sid=self.sid,
|
||||
policy_name=self.name,
|
||||
username=self.username,
|
||||
version=self.version)
|
||||
# Merge user preferences to registry if possible
|
||||
for preference_name, preference_path in self.settings['user'].items():
|
||||
if preference_path:
|
||||
preference_type = get_preftype(preference_path)
|
||||
logdata = dict({'pref': preference_type.value, 'sid': self.sid})
|
||||
log('D29', logdata)
|
||||
preference_parser = get_parser(preference_type)
|
||||
preference_merger = get_merger(preference_type)
|
||||
preference_objects = preference_parser(preference_path)
|
||||
preference_merger(self.storage, self.sid, preference_objects, self.name)
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['gpt'] = self.name
|
||||
logdata['msg'] = str(exc)
|
||||
log('E29', logdata)
|
||||
search_path = self._machine_path
|
||||
if 'user' == part:
|
||||
search_path = self._user_path
|
||||
if not search_path:
|
||||
return None
|
||||
|
||||
return find_file(search_path, 'registry.pol')
|
||||
|
||||
def _find_shortcuts(self, part):
|
||||
'''
|
||||
Find Shortcuts.xml files.
|
||||
'''
|
||||
shortcuts_dir = find_dir(self._machine_prefs, 'Shortcuts')
|
||||
shortcuts_file = find_file(shortcuts_dir, 'shortcuts.xml')
|
||||
|
||||
if 'user' == part:
|
||||
shortcuts_dir = find_dir(self._user_prefs, 'Shortcuts')
|
||||
shortcuts_file = find_file(shortcuts_dir, 'shortcuts.xml')
|
||||
|
||||
return shortcuts_file
|
||||
|
||||
def _find_envvars(self, part):
|
||||
'''
|
||||
Find EnvironmentVariables.xml files.
|
||||
'''
|
||||
search_path = os.path.join(self._machine_path, 'Preferences', 'EnvironmentVariables')
|
||||
if 'user' == part:
|
||||
search_path = os.path.join(self._user_path, 'Preferences', 'EnvironmentVariables')
|
||||
if not search_path:
|
||||
return None
|
||||
|
||||
return find_file(search_path, 'environmentvariables.xml')
|
||||
|
||||
def _find_drives(self, part):
|
||||
'''
|
||||
Find Drives.xml files.
|
||||
'''
|
||||
search_path = os.path.join(self._machine_path, 'Preferences', 'Drives')
|
||||
if 'user' == part:
|
||||
search_path = os.path.join(self._user_path, 'Preferences', 'Drives')
|
||||
if not search_path:
|
||||
return None
|
||||
|
||||
return find_file(search_path, 'drives.xml')
|
||||
|
||||
def _find_printers(self, part):
|
||||
'''
|
||||
Find Printers.xml files.
|
||||
'''
|
||||
search_path = os.path.join(self._machine_path, 'Preferences', 'Printers')
|
||||
if 'user' == part:
|
||||
search_path = os.path.join(self._user_path, 'Preferences', 'Printers')
|
||||
if not search_path:
|
||||
return None
|
||||
|
||||
return find_file(search_path, 'printers.xml')
|
||||
|
||||
def _merge_shortcuts(self):
|
||||
shortcuts = list()
|
||||
|
||||
if self.sid == self.storage.get_info('machine_sid'):
|
||||
shortcuts = read_shortcuts(self._machine_shortcuts)
|
||||
else:
|
||||
shortcuts = read_shortcuts(self._user_shortcuts)
|
||||
|
||||
for sc in shortcuts:
|
||||
self.storage.add_shortcut(self.sid, sc)
|
||||
|
||||
def merge(self):
|
||||
'''
|
||||
Merge machine and user (if sid provided) settings to storage.
|
||||
'''
|
||||
if self.sid == self.storage.get_info('machine_sid'):
|
||||
# Merge machine settings to registry if possible
|
||||
if self._machine_regpol:
|
||||
logging.debug(slogm('Merging machine settings from {}'.format(self._machine_regpol)))
|
||||
util.preg.merge_polfile(self._machine_regpol)
|
||||
if self._user_regpol:
|
||||
logging.debug(slogm('Merging machine(user) settings from {}'.format(self._machine_regpol)))
|
||||
util.preg.merge_polfile(self._user_regpol, self.sid)
|
||||
if self._machine_shortcuts:
|
||||
logging.debug(slogm('Merging machine shortcuts from {}'.format(self._machine_shortcuts)))
|
||||
self._merge_shortcuts()
|
||||
else:
|
||||
# Merge user settings if UserPolicyMode set accordingly
|
||||
# and user settings (for HKCU) are exist.
|
||||
policy_mode = upm2str(self.get_policy_mode())
|
||||
if 'Merge' == policy_mode or 'Not configured' == policy_mode:
|
||||
if self._user_regpol:
|
||||
logging.debug(slogm('Merging user settings from {} for {}'.format(self._user_regpol, self.sid)))
|
||||
util.preg.merge_polfile(self._user_regpol, self.sid)
|
||||
if self._user_shortcuts:
|
||||
logging.debug(slogm('Merging user shortcuts from {} for {}'.format(self._user_shortcuts, self.sid)))
|
||||
self._merge_shortcuts()
|
||||
|
||||
def __str__(self):
|
||||
template = '''
|
||||
GUID: {}
|
||||
Name: {}
|
||||
For SID: {}
|
||||
|
||||
Machine part: {}
|
||||
Machine Registry.pol: {}
|
||||
Machine Shortcuts.xml: {}
|
||||
|
||||
User part: {}
|
||||
User Registry.pol: {}
|
||||
User Shortcuts.xml: {}
|
||||
|
||||
'''
|
||||
result = template.format(
|
||||
self.guid,
|
||||
self.name,
|
||||
self.sid,
|
||||
|
||||
self._machine_path,
|
||||
self._machine_regpol,
|
||||
self._machine_shortcuts,
|
||||
|
||||
self._user_path,
|
||||
self._user_regpol,
|
||||
self._user_shortcuts,
|
||||
)
|
||||
return result
|
||||
|
||||
def find_dir(search_path, name):
|
||||
'''
|
||||
@ -307,38 +269,11 @@ def find_file(search_path, name):
|
||||
|
||||
return None
|
||||
|
||||
def find_preferences(search_path):
|
||||
'''
|
||||
Find 'Preferences' directory
|
||||
'''
|
||||
if not search_path:
|
||||
return None
|
||||
|
||||
return find_dir(search_path, 'Preferences')
|
||||
|
||||
def find_preffile(search_path, prefname):
|
||||
'''
|
||||
Find file with path like Preferences/prefname/prefname.xml
|
||||
'''
|
||||
# Look for 'Preferences' directory
|
||||
prefdir = find_preferences(search_path)
|
||||
|
||||
if not prefdir:
|
||||
return None
|
||||
|
||||
# Then search for preference directory
|
||||
pref_dir = find_dir(prefdir, prefname)
|
||||
file_name = '{}.xml'.format(prefname)
|
||||
# And then try to find the corresponding file.
|
||||
pref_file = find_file(pref_dir, file_name)
|
||||
|
||||
return pref_file
|
||||
|
||||
def lp2gpt():
|
||||
'''
|
||||
Convert local-policy to full-featured GPT.
|
||||
'''
|
||||
lppath = os.path.join(local_policy_path(), 'Machine/Registry.pol.xml')
|
||||
lppath = os.path.join(default_policy_path(), 'Machine/Registry.pol.xml')
|
||||
|
||||
# Load settings from XML PolFile
|
||||
polparser = GPPolParser()
|
||||
@ -356,9 +291,24 @@ def get_local_gpt(sid):
|
||||
'''
|
||||
Convert default policy to GPT and create object out of it.
|
||||
'''
|
||||
log('D25')
|
||||
logging.debug(slogm('Re-caching Local Policy'))
|
||||
lp2gpt()
|
||||
local_policy = gpt(str(local_policy_cache()), sid)
|
||||
local_policy.set_name('Local Policy')
|
||||
|
||||
return local_policy
|
||||
|
||||
def upm2str(upm_num):
|
||||
'''
|
||||
Translate UserPolicyMode to string.
|
||||
'''
|
||||
result = 'Not configured'
|
||||
|
||||
if upm_num in [1, '1']:
|
||||
result = 'Replace'
|
||||
|
||||
if upm_num in [2, '2']:
|
||||
result = 'Merge'
|
||||
|
||||
return result
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2022 BaseALT Ltd.
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
@ -21,32 +21,14 @@ from util.xml import get_xml_root
|
||||
def read_inifiles(inifiles_file):
|
||||
inifiles = list()
|
||||
|
||||
for ini in get_xml_root(inifiles_file):
|
||||
prors = ini.find('Properties')
|
||||
ini_obj = inifile(prors.get('path'))
|
||||
ini_obj.set_section(prors.get('section', default=None))
|
||||
ini_obj.set_property(prors.get('property', default=None))
|
||||
ini_obj.set_value(prors.get('value', default=None))
|
||||
ini_obj.set_action(prors.get('action'))
|
||||
for inifile in get_xml_root(inifiles_file):
|
||||
ini_obj = inifile()
|
||||
|
||||
inifiles.append(ini_obj)
|
||||
|
||||
return inifiles
|
||||
|
||||
def merge_inifiles(storage, sid, inifile_objects, policy_name):
|
||||
for iniobj in inifile_objects:
|
||||
storage.add_ini(sid, iniobj, policy_name)
|
||||
|
||||
class inifile:
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
|
||||
def set_section(self, section):
|
||||
self.section = section
|
||||
def set_property(self, property):
|
||||
self.property = property
|
||||
def set_value(self, value):
|
||||
self.value = value
|
||||
def set_action(self, action):
|
||||
self.action = action
|
||||
def inifile():
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
|
@ -1,57 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2022 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from util.xml import get_xml_root
|
||||
from storage.dconf_registry import Dconf_registry
|
||||
|
||||
def read_networkshares(networksharesxml):
|
||||
networkshares = list()
|
||||
|
||||
for share in get_xml_root(networksharesxml):
|
||||
props = share.find('Properties')
|
||||
networkshare_obj = networkshare(props.get('name'))
|
||||
networkshare_obj.set_action(props.get('action', default='C'))
|
||||
networkshare_obj.set_path(props.get('path', default=None))
|
||||
networkshare_obj.set_all_regular(props.get('allRegular', default=None))
|
||||
networkshare_obj.set_comment(props.get('comment', default=None))
|
||||
networkshare_obj.set_limitUsers(props.get('limitUsers', default=None))
|
||||
networkshare_obj.set_abe(props.get('abe', default=None))
|
||||
networkshares.append(networkshare_obj)
|
||||
|
||||
return networkshares
|
||||
|
||||
def merge_networkshares(storage, sid, networkshares_objects, policy_name):
|
||||
for networkshareobj in networkshares_objects:
|
||||
storage.add_networkshare(sid, networkshareobj, policy_name)
|
||||
|
||||
class networkshare:
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
def set_action(self, action):
|
||||
self.action = action
|
||||
def set_path(self, path):
|
||||
self.path = path
|
||||
def set_all_regular(self, allRegular):
|
||||
self.allRegular = allRegular
|
||||
def set_comment(self, comment):
|
||||
self.comment = comment
|
||||
def set_limitUsers(self, limitUsers):
|
||||
self.limitUsers = limitUsers
|
||||
def set_abe(self, abe):
|
||||
self.abe = abe
|
@ -1,33 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from util.preg import (
|
||||
load_preg
|
||||
)
|
||||
|
||||
def read_polfile(filename):
|
||||
return load_preg(filename).entries
|
||||
|
||||
def merge_polfile(storage, sid, policy_objects, policy_name):
|
||||
pass
|
||||
# for entry in policy_objects:
|
||||
# if not sid:
|
||||
# storage.add_hklm_entry(entry, policy_name)
|
||||
# else:
|
||||
# storage.add_hkcu_entry(entry, sid, policy_name)
|
||||
|
@ -41,10 +41,6 @@ def read_printers(printers_file):
|
||||
|
||||
return printers
|
||||
|
||||
def merge_printers(storage, sid, printer_objects, policy_name):
|
||||
for device in printer_objects:
|
||||
storage.add_printer(sid, device, policy_name)
|
||||
|
||||
def json2printer(json_str):
|
||||
'''
|
||||
Build printer object out of string-serialized JSON.
|
||||
|
@ -1,147 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import configparser
|
||||
import os
|
||||
|
||||
|
||||
def read_scripts(scripts_file):
|
||||
scripts = Scripts_lists()
|
||||
|
||||
logon_scripts = dict()
|
||||
logoff_scripts = dict()
|
||||
startup_scripts = dict()
|
||||
shutdown_scripts = dict()
|
||||
|
||||
config = configparser.ConfigParser()
|
||||
config.read(scripts_file, encoding = 'utf-16')
|
||||
scripts_file_dir = os.path.dirname(scripts_file)
|
||||
|
||||
actions = config.sections()
|
||||
|
||||
for act in actions:
|
||||
act_upper = act.upper()
|
||||
if act_upper == 'LOGON':
|
||||
section_scripts = logon_scripts
|
||||
elif act_upper == 'LOGOFF':
|
||||
section_scripts = logoff_scripts
|
||||
elif act_upper == 'STARTUP':
|
||||
section_scripts = startup_scripts
|
||||
elif act_upper == 'SHUTDOWN':
|
||||
section_scripts = shutdown_scripts
|
||||
else:
|
||||
continue
|
||||
|
||||
for key in config[act]:
|
||||
key_lower = key.lower()
|
||||
key_split = key_lower.split('cmdline')
|
||||
if len(key_split) > 1 and not key_split[1]:
|
||||
if key_split[0].isdigit():
|
||||
key_index = int(key_split[0])
|
||||
section_scripts[key_index] = Script(act, scripts_file_dir, config[act][key])
|
||||
key_split = key_lower.split('parameters')
|
||||
if len(key_split) > 1 and not key_split[1]:
|
||||
if key_split[0].isdigit():
|
||||
key_index = int(key_split[0])
|
||||
section_scripts[key_index].set_args(config[act][key])
|
||||
if logon_scripts:
|
||||
for i in sorted(logon_scripts.keys()):
|
||||
scripts.add_script('LOGON', logon_scripts[i])
|
||||
|
||||
if logoff_scripts:
|
||||
for i in sorted(logoff_scripts.keys()):
|
||||
scripts.add_script('LOGOFF', logoff_scripts[i])
|
||||
|
||||
if startup_scripts:
|
||||
for i in sorted(startup_scripts.keys()):
|
||||
scripts.add_script('STARTUP', startup_scripts[i])
|
||||
|
||||
if shutdown_scripts:
|
||||
for i in sorted(shutdown_scripts.keys()):
|
||||
scripts.add_script('SHUTDOWN', shutdown_scripts[i])
|
||||
|
||||
|
||||
return scripts
|
||||
|
||||
def merge_scripts(storage, sid, scripts_objects, policy_name):
|
||||
for script in scripts_objects.get_logon_scripts():
|
||||
storage.add_script(sid, script, policy_name)
|
||||
for script in scripts_objects.get_logoff_scripts():
|
||||
storage.add_script(sid, script, policy_name)
|
||||
for script in scripts_objects.get_startup_scripts():
|
||||
storage.add_script(sid, script, policy_name)
|
||||
for script in scripts_objects.get_shutdown_scripts():
|
||||
storage.add_script(sid, script, policy_name)
|
||||
|
||||
class Scripts_lists:
|
||||
def __init__ (self):
|
||||
self.__logon_scripts = list()
|
||||
self.__logoff_scripts = list()
|
||||
self.__startup_scripts = list()
|
||||
self.__shutdown_scripts = list()
|
||||
|
||||
def get_logon_scripts(self):
|
||||
return self.__logon_scripts
|
||||
def get_logoff_scripts(self):
|
||||
return self.__logoff_scripts
|
||||
def get_startup_scripts(self):
|
||||
return self.__startup_scripts
|
||||
def get_shutdown_scripts(self):
|
||||
return self.__shutdown_scripts
|
||||
|
||||
def add_script(self, action, script):
|
||||
if action == 'LOGON':
|
||||
self.get_logon_scripts().append(script)
|
||||
elif action == 'LOGOFF':
|
||||
self.get_logoff_scripts().append(script)
|
||||
elif action == 'STARTUP':
|
||||
self.get_startup_scripts().append(script)
|
||||
elif action == 'SHUTDOWN':
|
||||
self.get_shutdown_scripts().append(script)
|
||||
|
||||
|
||||
class Script:
|
||||
__logon_counter = 0
|
||||
__logoff_counter = 0
|
||||
__startup_counter = 0
|
||||
__shutdown_counter = 0
|
||||
|
||||
def __init__(self, action, script_dir, script_filename):
|
||||
action_upper = action.upper()
|
||||
self.action = action_upper
|
||||
self.path = os.path.join(script_dir, action_upper, script_filename.upper())
|
||||
if not os.path.isfile(self.path):
|
||||
return None
|
||||
self.args = None
|
||||
|
||||
if action_upper == 'LOGON':
|
||||
self.number = Script.__logon_counter
|
||||
Script.__logon_counter += 1
|
||||
elif action_upper == 'LOGOFF':
|
||||
self.number = Script.__logoff_counter
|
||||
Script.__logoff_counter += 1
|
||||
elif action_upper == 'STARTUP':
|
||||
self.number = Script.__startup_counter
|
||||
Script.__startup_counter += 1
|
||||
elif action_upper == 'SHUTDOWN':
|
||||
self.number = Script.__shutdown_counter
|
||||
Script.__shutdown_counter += 1
|
||||
|
||||
def set_args(self, args):
|
||||
self.args = args
|
||||
|
@ -39,10 +39,6 @@ def read_services(service_file):
|
||||
|
||||
return services
|
||||
|
||||
def merge_services(storage, sid, service_objects, policy_name):
|
||||
for srv in service_objects:
|
||||
pass
|
||||
|
||||
class service:
|
||||
def __init__(self, name):
|
||||
self.unit = name
|
||||
@ -50,7 +46,7 @@ class service:
|
||||
self.serviceaction = None
|
||||
|
||||
def set_clsid(self, clsid):
|
||||
self.guid = clsid
|
||||
self.guid = uid
|
||||
|
||||
def set_usercontext(self, usercontext=False):
|
||||
ctx = False
|
||||
|
@ -27,7 +27,6 @@ import json
|
||||
|
||||
from util.windows import transform_windows_path
|
||||
from util.xml import get_xml_root
|
||||
from util.paths import get_desktop_files_directory
|
||||
|
||||
class TargetType(Enum):
|
||||
FILESYSTEM = 'FILESYSTEM'
|
||||
@ -80,23 +79,15 @@ def read_shortcuts(shortcuts_file):
|
||||
# URL or FILESYSTEM
|
||||
target_type = get_ttype(props.get('targetType'))
|
||||
|
||||
sc = shortcut(dest, path, arguments, link.get('name'), props.get('action'), target_type)
|
||||
sc = shortcut(dest, path, arguments, link.get('name'), target_type)
|
||||
sc.set_changed(link.get('changed'))
|
||||
sc.set_clsid(link.get('clsid'))
|
||||
sc.set_guid(link.get('uid'))
|
||||
sc.set_usercontext(link.get('userContext', False))
|
||||
sc.set_icon(props.get('iconPath'))
|
||||
if props.get('comment'):
|
||||
sc.set_comment(props.get('comment'))
|
||||
|
||||
shortcuts.append(sc)
|
||||
|
||||
return shortcuts
|
||||
|
||||
def merge_shortcuts(storage, sid, shortcut_objects, policy_name):
|
||||
for shortcut in shortcut_objects:
|
||||
storage.add_shortcut(sid, shortcut, policy_name)
|
||||
|
||||
def json2sc(json_str):
|
||||
'''
|
||||
Build shortcut out of string-serialized JSON
|
||||
@ -104,33 +95,16 @@ def json2sc(json_str):
|
||||
json_obj = json.loads(json_str)
|
||||
link_type = get_ttype(json_obj['type'])
|
||||
|
||||
sc = shortcut(json_obj['dest'], json_obj['path'], json_obj['arguments'], json_obj['name'], json_obj['action'], link_type)
|
||||
sc = shortcut(json_obj['dest'], json_obj['path'], json_obj['arguments'], json_obj['name'], link_type)
|
||||
sc.set_changed(json_obj['changed'])
|
||||
sc.set_clsid(json_obj['clsid'])
|
||||
sc.set_guid(json_obj['guid'])
|
||||
sc.set_usercontext(json_obj['is_in_user_context'])
|
||||
if 'comment' in json_obj:
|
||||
sc.set_comment(json_obj['comment'])
|
||||
if 'icon' in json_obj:
|
||||
sc.set_icon(json_obj['icon'])
|
||||
|
||||
return sc
|
||||
|
||||
def find_desktop_entry(binary_path):
|
||||
desktop_dir = get_desktop_files_directory()
|
||||
binary_name = ''.join(binary_path.split('/')[-1])
|
||||
desktop_file_path = Path(f"{desktop_dir}/{binary_name}.desktop")
|
||||
|
||||
if desktop_file_path.exists():
|
||||
desktop_entry = DesktopEntry()
|
||||
desktop_entry.parse(desktop_file_path)
|
||||
return desktop_entry
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class shortcut:
|
||||
def __init__(self, dest, path, arguments, name=None, action=None, ttype=TargetType.FILESYSTEM):
|
||||
def __init__(self, dest, path, arguments, name=None, ttype=TargetType.FILESYSTEM):
|
||||
'''
|
||||
:param dest: Path to resulting file on file system
|
||||
:param path: Path where the link should point to
|
||||
@ -138,34 +112,13 @@ class shortcut:
|
||||
:param name: Name of the application
|
||||
:param type: Link type - FILESYSTEM or URL
|
||||
'''
|
||||
self.dest = self.replace_slashes(dest)
|
||||
self.dest = dest
|
||||
self.path = path
|
||||
self.expanded_path = None
|
||||
self.arguments = arguments
|
||||
self.name = self.replace_name(name)
|
||||
self.action = action
|
||||
self.name = name
|
||||
self.changed = ''
|
||||
self.icon = None
|
||||
self.comment = ''
|
||||
self.is_in_user_context = self.set_usercontext()
|
||||
self.type = ttype
|
||||
self.desktop_file_template = None
|
||||
|
||||
def replace_slashes(self, input_path):
|
||||
if input_path.startswith('%'):
|
||||
index = input_path.find('%', 1)
|
||||
if index != -1:
|
||||
replace_path = input_path[:index + 2] + input_path[index + 2:].replace('/','-')
|
||||
return replace_path
|
||||
return input_path.replace('/','-')
|
||||
|
||||
def replace_name(self, input_name):
|
||||
if input_name.startswith('%'):
|
||||
index = input_name.find('%', 1)
|
||||
if index != -1:
|
||||
replace_name = input_name[index + 2:]
|
||||
return replace_name
|
||||
return input_name
|
||||
|
||||
def __str__(self):
|
||||
result = self.to_json()
|
||||
@ -183,12 +136,6 @@ class shortcut:
|
||||
def set_guid(self, uid):
|
||||
self.guid = uid
|
||||
|
||||
def set_icon(self, icon_name):
|
||||
self.icon = icon_name
|
||||
|
||||
def set_comment(self, comment):
|
||||
self.comment = comment
|
||||
|
||||
def set_type(self, ttype):
|
||||
'''
|
||||
Set type of the hyperlink - FILESYSTEM or URL
|
||||
@ -208,12 +155,6 @@ class shortcut:
|
||||
|
||||
self.is_in_user_context = ctx
|
||||
|
||||
def set_expanded_path(self, path):
|
||||
'''
|
||||
Adjust shortcut path with expanding windows variables
|
||||
'''
|
||||
self.expanded_path = path
|
||||
|
||||
def is_usercontext(self):
|
||||
return self.is_in_user_context
|
||||
|
||||
@ -229,98 +170,44 @@ class shortcut:
|
||||
content['clsid'] = self.clsid
|
||||
content['guid'] = self.guid
|
||||
content['changed'] = self.changed
|
||||
content['action'] = self.action
|
||||
content['is_in_user_context'] = self.is_in_user_context
|
||||
content['type'] = ttype2str(self.type)
|
||||
if self.icon:
|
||||
content['icon'] = self.icon
|
||||
if self.comment:
|
||||
content['comment'] = self.comment
|
||||
|
||||
result = self.desktop()
|
||||
result.content.update(content)
|
||||
|
||||
return json.dumps(result.content)
|
||||
|
||||
def desktop(self, dest=None):
|
||||
def desktop(self):
|
||||
'''
|
||||
Returns desktop file object which may be written to disk.
|
||||
'''
|
||||
if dest:
|
||||
self.desktop_file = DesktopEntry(dest)
|
||||
else:
|
||||
self.desktop_file_template = find_desktop_entry(self.path)
|
||||
self.desktop_file = DesktopEntry()
|
||||
self.desktop_file.addGroup('Desktop Entry')
|
||||
self.desktop_file.set('Version', '1.0')
|
||||
self._update_desktop()
|
||||
self.desktop_file = DesktopEntry()
|
||||
self.desktop_file.addGroup('Desktop Entry')
|
||||
|
||||
return self.desktop_file
|
||||
|
||||
def _update_desktop(self):
|
||||
'''
|
||||
Update desktop file object from internal data.
|
||||
'''
|
||||
if self.type == TargetType.URL:
|
||||
self.desktop_file.set('Type', 'Link')
|
||||
else:
|
||||
self.desktop_file.set('Type', 'Application')
|
||||
|
||||
self.desktop_file.set('Version', '1.0')
|
||||
self.desktop_file.set('Name', self.name)
|
||||
|
||||
desktop_path = self.path
|
||||
if self.expanded_path:
|
||||
desktop_path = self.expanded_path
|
||||
if self.type == TargetType.URL:
|
||||
self.desktop_file.set('URL', desktop_path)
|
||||
self.desktop_file.set('URL', self.path)
|
||||
else:
|
||||
str2bool_lambda = (lambda boolstr: boolstr if isinstance(boolstr, bool)
|
||||
else boolstr and boolstr.lower() in ['True', 'true', 'yes', '1'])
|
||||
if self.desktop_file_template:
|
||||
terminal_state = str2bool_lambda(self.desktop_file_template.get('Terminal'))
|
||||
self.desktop_file.set('Terminal', 'true' if terminal_state else 'false')
|
||||
self.desktop_file.set('Exec', '{} {}'.format(desktop_path, self.arguments))
|
||||
self.desktop_file.set('Comment', self.comment)
|
||||
self.desktop_file.set('Terminal', 'false')
|
||||
self.desktop_file.set('Exec', '{} {}'.format(self.path, self.arguments))
|
||||
|
||||
if self.icon:
|
||||
self.desktop_file.set('Icon', self.icon)
|
||||
elif self.desktop_file_template and self.desktop_file_template.get('Icon', False):
|
||||
self.desktop_file.set('Icon', self.desktop_file_template.get('Icon'))
|
||||
return self.desktop_file
|
||||
|
||||
def _write_desktop(self, dest, create_only=False, read_firstly=False):
|
||||
def write_desktop(self, dest):
|
||||
'''
|
||||
Write .desktop file to disk using path 'dest'. Please note that
|
||||
.desktop files must have executable bit set in order to work in
|
||||
GUI.
|
||||
'''
|
||||
self.desktop().write(dest)
|
||||
sc = Path(dest)
|
||||
if sc.exists() and create_only:
|
||||
return
|
||||
|
||||
if sc.exists() and read_firstly:
|
||||
self.desktop(dest).write(dest)
|
||||
else:
|
||||
self.desktop().write(dest)
|
||||
|
||||
sc.chmod(sc.stat().st_mode | stat.S_IEXEC)
|
||||
|
||||
def _remove_desktop(self, dest):
|
||||
'''
|
||||
Remove .desktop file fromo disk using path 'dest'.
|
||||
'''
|
||||
sc = Path(dest)
|
||||
if sc.exists():
|
||||
sc.unlink()
|
||||
|
||||
def apply_desktop(self, dest):
|
||||
'''
|
||||
Apply .desktop file by action.
|
||||
'''
|
||||
if self.action == 'U':
|
||||
self._write_desktop(dest, read_firstly=True)
|
||||
elif self.action == 'D':
|
||||
self._remove_desktop(dest)
|
||||
elif self.action == 'R':
|
||||
self._remove_desktop(dest)
|
||||
self._write_desktop(dest)
|
||||
elif self.action == 'C':
|
||||
self._write_desktop(dest, create_only=True)
|
||||
|
@ -1,25 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
def read_tasks(filename):
|
||||
pass
|
||||
|
||||
def merge_tasks(storage, sid, task_objects, policy_name):
|
||||
for task in task_objects:
|
||||
pass
|
||||
|
@ -18,11 +18,11 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import argparse
|
||||
import locale
|
||||
import gettext
|
||||
|
||||
import subprocess
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import pwd
|
||||
import signal
|
||||
|
||||
@ -31,7 +31,6 @@ from util.users import (
|
||||
)
|
||||
from util.arguments import (
|
||||
process_target,
|
||||
set_loglevel,
|
||||
ExitCodeUpdater
|
||||
)
|
||||
from util.dbus import (
|
||||
@ -40,54 +39,38 @@ from util.dbus import (
|
||||
)
|
||||
from util.signals import signal_handler
|
||||
|
||||
from util.logging import log
|
||||
|
||||
#logging.basicConfig(level=logging.DEBUG)
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
class file_runner:
|
||||
_gpoa_exe = '/usr/sbin/gpoa'
|
||||
|
||||
def __init__(self, loglevel, username=None):
|
||||
def __init__(self, username=None):
|
||||
self._user = username
|
||||
self._loglevel = loglevel
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Call gpoa utility to generate scripts
|
||||
'''
|
||||
gpoa_cmd = [self._gpoa_exe]
|
||||
if self._loglevel != None:
|
||||
gpoa_cmd += ["--loglevel", str(self._loglevel)]
|
||||
if self._user:
|
||||
gpoa_cmd += [self._user]
|
||||
|
||||
subprocess.check_output(gpoa_cmd)
|
||||
output = subprocess.call(gpoa_cmd)
|
||||
sys.exit(output)
|
||||
|
||||
def parse_cli_arguments():
|
||||
'''
|
||||
Command line argument parser
|
||||
'''
|
||||
argparser = argparse.ArgumentParser(description='Update group policies for computer and the specified user')
|
||||
argparser = argparse.ArgumentParser(description='Update group policies for the specified user')
|
||||
argparser.add_argument('-u',
|
||||
'--user',
|
||||
default=None,
|
||||
help='Name of the user for GPO update')
|
||||
argparser.add_argument('-t',
|
||||
'--target',
|
||||
argparser.add_argument('--target',
|
||||
default=None,
|
||||
type=str.upper,
|
||||
choices=["ALL", "USER", "COMPUTER"],
|
||||
type=str,
|
||||
help='Specify if it is needed to update user\'s or computer\'s policies')
|
||||
argparser.add_argument('-l',
|
||||
'--loglevel',
|
||||
type=int,
|
||||
default=5,
|
||||
help='Set logging verbosity level')
|
||||
argparser.add_argument('-s',
|
||||
'--system',
|
||||
action='store_true',
|
||||
default=None,
|
||||
help='Run gpoa directly in system mode')
|
||||
|
||||
return argparser.parse_args()
|
||||
|
||||
@ -97,74 +80,60 @@ def runner_factory(args, target):
|
||||
factors taken into account.
|
||||
'''
|
||||
username = None
|
||||
target = target.upper()
|
||||
if is_root():
|
||||
# Only root may specify any username to update.
|
||||
try:
|
||||
if args.user:
|
||||
username = pwd.getpwnam(args.user).pw_name
|
||||
else:
|
||||
target = 'COMPUTER'
|
||||
target = 'Computer'
|
||||
except:
|
||||
username = None
|
||||
logdata = dict({'username': args.user})
|
||||
log('W1', logdata)
|
||||
logstring = (
|
||||
'Unable to perform gpupdate for non-existent user {},'
|
||||
' will update machine settings'
|
||||
)
|
||||
logging.error(logstring.format(args.user))
|
||||
else:
|
||||
# User may only perform gpupdate for machine (None) or
|
||||
# itself (os.getusername()).
|
||||
username = pwd.getpwuid(os.getuid()).pw_name
|
||||
if args.user != username:
|
||||
logdata = dict({'username': username})
|
||||
log('W2', logdata)
|
||||
logstring = (
|
||||
'Unable to perform gpupdate for {} with current'
|
||||
' permissions, will update current user settings'
|
||||
)
|
||||
logging.error(logstring.format(args.user))
|
||||
|
||||
if args.system:
|
||||
return try_directly(username, target, args.loglevel)
|
||||
else:
|
||||
return try_by_oddjob(username, target)
|
||||
|
||||
def try_by_oddjob(username, target):
|
||||
'''
|
||||
Run group policies applying by oddjob service
|
||||
'''
|
||||
if is_oddjobd_gpupdate_accessible():
|
||||
log('D13')
|
||||
logging.debug('Starting gpupdate via D-Bus')
|
||||
computer_runner = None
|
||||
user_runner = None
|
||||
if target == 'ALL' or target == 'COMPUTER':
|
||||
if target == 'All' or target == 'Computer':
|
||||
computer_runner = dbus_runner()
|
||||
if username:
|
||||
if target == 'ALL' or target == 'USER':
|
||||
if target == 'All' or target == 'User':
|
||||
user_runner = dbus_runner(username)
|
||||
return (computer_runner, user_runner)
|
||||
else:
|
||||
log('W3')
|
||||
logging.warning('oddjobd is inaccessible')
|
||||
|
||||
return None
|
||||
|
||||
def try_directly(username, target, loglevel):
|
||||
'''
|
||||
Run group policies applying directly
|
||||
'''
|
||||
if is_root():
|
||||
log('D14')
|
||||
logging.debug('Starting gpupdate by command invocation')
|
||||
computer_runner = None
|
||||
user_runner = None
|
||||
if target == 'ALL' or target == 'COMPUTER':
|
||||
computer_runner = file_runner(loglevel)
|
||||
if target == 'ALL' or target == 'USER':
|
||||
user_runner = file_runner(loglevel, username)
|
||||
if target == 'All' or target == 'Computer':
|
||||
computer_runner = file_runner()
|
||||
if target == 'All' or target == 'User':
|
||||
user_runner = file_runner(username)
|
||||
return (computer_runner, user_runner)
|
||||
else:
|
||||
log('E1')
|
||||
logging.error('Insufficient permissions to run gpupdate')
|
||||
|
||||
return None
|
||||
|
||||
def main():
|
||||
args = parse_cli_arguments()
|
||||
locale.bindtextdomain('gpoa', '/usr/lib/python3/site-packages/gpoa/locale')
|
||||
gettext.bindtextdomain('gpoa', '/usr/lib/python3/site-packages/gpoa/locale')
|
||||
gettext.textdomain('gpoa')
|
||||
set_loglevel(args.loglevel)
|
||||
gpo_appliers = runner_factory(args, process_target(args.target))
|
||||
|
||||
if gpo_appliers:
|
||||
@ -172,19 +141,17 @@ def main():
|
||||
try:
|
||||
gpo_appliers[0].run()
|
||||
except Exception as exc:
|
||||
logdata = dict({'error': str(exc)})
|
||||
log('E5')
|
||||
logging.error('Error running GPOA for computer: {}'.format(exc))
|
||||
return int(ExitCodeUpdater.FAIL_GPUPDATE_COMPUTER_NOREPLY)
|
||||
|
||||
if gpo_appliers[1]:
|
||||
try:
|
||||
gpo_appliers[1].run()
|
||||
except Exception as exc:
|
||||
logdata = dict({'error': str(exc)})
|
||||
log('E6', logdata)
|
||||
logging.error('Error running GPOA for user: {}'.format(exc))
|
||||
return int(ExitCodeUpdater.FAIL_GPUPDATE_USER_NOREPLY)
|
||||
else:
|
||||
log('E2')
|
||||
logging.error('gpupdate will not be started')
|
||||
return int(ExitCodeUpdater.FAIL_NO_RUNNER)
|
||||
|
||||
return int(ExitCodeUpdater.EXIT_SUCCESS)
|
||||
|
@ -1,376 +0,0 @@
|
||||
#! /usr/bin/env python3
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import subprocess
|
||||
|
||||
from util.util import (
|
||||
runcmd
|
||||
, get_backends
|
||||
, get_default_policy_name
|
||||
, get_policy_entries
|
||||
, get_policy_variants
|
||||
)
|
||||
from util.config import GPConfig
|
||||
from util.paths import get_custom_policy_dir
|
||||
|
||||
|
||||
class Runner:
|
||||
__control_path = '/usr/sbin/control'
|
||||
__systemctl_path = '/bin/systemctl'
|
||||
|
||||
def __init__(self):
|
||||
self.arguments = parse_arguments()
|
||||
|
||||
def parse_arguments():
|
||||
'''
|
||||
Parse CLI arguments.
|
||||
'''
|
||||
parser = argparse.ArgumentParser(prog='gpupdate-setup')
|
||||
subparsers = parser.add_subparsers(dest='action',
|
||||
metavar='action',
|
||||
help='Group Policy management actions (default action is status)')
|
||||
|
||||
parser_list = subparsers.add_parser('list',
|
||||
help='List avalable types of local policy')
|
||||
parser_list = subparsers.add_parser('list-backends',
|
||||
help='Show list of available backends')
|
||||
parser_status = subparsers.add_parser('status',
|
||||
help='Show current Group Policy status')
|
||||
parser_enable = subparsers.add_parser('enable',
|
||||
help='Enable Group Policy subsystem')
|
||||
|
||||
parser_disable = subparsers.add_parser('disable',
|
||||
help='Disable Group Policy subsystem')
|
||||
parser_update = subparsers.add_parser('update',
|
||||
help='Update state')
|
||||
parser_write = subparsers.add_parser('write',
|
||||
help='Operate on Group Policies (enable or disable)')
|
||||
parser_set_backend = subparsers.add_parser('set-backend',
|
||||
help='Set or change currently active backend')
|
||||
parser_default = subparsers.add_parser('default-policy',
|
||||
help='Show name of default policy')
|
||||
parser_active = subparsers.add_parser('active-policy',
|
||||
help='Show name of policy enabled')
|
||||
parser_active_backend = subparsers.add_parser('active-backend',
|
||||
help='Show currently configured backend')
|
||||
|
||||
parser_set_backend.add_argument('backend',
|
||||
default='samba',
|
||||
type=str,
|
||||
nargs='?',
|
||||
const='backend',
|
||||
choices=['local', 'samba'],
|
||||
help='Backend (source of settings) name')
|
||||
|
||||
parser_write.add_argument('status',
|
||||
choices=['enable', 'disable'],
|
||||
help='Enable or disable Group Policies')
|
||||
parser_write.add_argument('localpolicy',
|
||||
default=None,
|
||||
nargs='?',
|
||||
help='Name of local policy to enable')
|
||||
parser_write.add_argument('backend',
|
||||
default='samba',
|
||||
type=str,
|
||||
nargs='?',
|
||||
const='backend',
|
||||
choices=['local', 'samba'],
|
||||
help='Backend (source of settings) name')
|
||||
|
||||
parser_enable.add_argument('--local-policy',
|
||||
default=None,
|
||||
help='Name of local policy to enable')
|
||||
parser_enable.add_argument('--backend',
|
||||
default='samba',
|
||||
type=str,
|
||||
choices=['local', 'samba'],
|
||||
help='Backend (source of settings) name')
|
||||
|
||||
parser_update.add_argument('--local-policy',
|
||||
default=None,
|
||||
help='Name of local policy to enable')
|
||||
parser_update.add_argument('--backend',
|
||||
default='samba',
|
||||
type=str,
|
||||
choices=['local', 'samba'],
|
||||
help='Backend (source of settings) name')
|
||||
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
def validate_policy_name(policy_name):
|
||||
return policy_name in [os.path.basename(d) for d in get_policy_variants()]
|
||||
|
||||
def is_unit_enabled(unit_name, unit_global=False):
|
||||
'''
|
||||
Check that designated systemd unit is enabled
|
||||
'''
|
||||
command = ['/bin/systemctl', 'is-enabled', unit_name]
|
||||
if unit_global:
|
||||
command = ['/bin/systemctl', '--global', 'is-enabled', unit_name]
|
||||
value = runcmd(command)
|
||||
|
||||
# If first line of stdout is equal to "enabled" and return code
|
||||
# is zero then unit is considered enabled.
|
||||
rc = value[0]
|
||||
result = []
|
||||
try:
|
||||
result = value[1].replace('\n', '')
|
||||
except IndexError as exc:
|
||||
return False
|
||||
|
||||
if result == 'enabled' and rc == 0:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_status():
|
||||
'''
|
||||
Check that gpupdate.timer and gpupdate-user.timer are enabled.
|
||||
'''
|
||||
is_gpupdate = is_unit_enabled('gpupdate.timer')
|
||||
is_gpupdate_user = is_unit_enabled('gpupdate-user.timer', unit_global=True)
|
||||
|
||||
if is_gpupdate and is_gpupdate_user:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_active_policy_name():
|
||||
'''
|
||||
Show the name of an active Local Policy template
|
||||
'''
|
||||
config = GPConfig()
|
||||
return os.path.basename(config.get_local_policy_template())
|
||||
|
||||
def get_active_backend():
|
||||
config = GPConfig()
|
||||
return config.get_backend()
|
||||
|
||||
def rollback_on_error(command_name):
|
||||
'''
|
||||
Disable group policy services in case command returns error code
|
||||
'''
|
||||
if 0 != runcmd(command_name)[0]:
|
||||
disable_gp()
|
||||
return False
|
||||
return True
|
||||
|
||||
def disable_gp():
|
||||
'''
|
||||
Consistently disable group policy services
|
||||
'''
|
||||
cmd_set_global_policy = ['/usr/sbin/control', 'system-policy', 'remote']
|
||||
cmd_set_local_policy = ['/usr/sbin/control', 'system-policy', 'local']
|
||||
cmd_disable_gpupdate_service = ['/bin/systemctl', 'disable', 'gpupdate.service']
|
||||
cmd_disable_gpupdate_user_service = ['/bin/systemctl', '--global', 'disable', 'gpupdate-user.service']
|
||||
cmd_disable_gpupdate_timer = ['/bin/systemctl', 'disable', 'gpupdate.timer']
|
||||
cmd_disable_gpupdate_user_timer = ['/bin/systemctl', '--global', 'disable', 'gpupdate-user.timer']
|
||||
cmd_control_system_auth = ['/usr/sbin/control', 'system-auth']
|
||||
cmd_disable_gpupdate_scripts_service = ['/bin/systemctl', 'disable', 'gpupdate-scripts-run.service']
|
||||
cmd_disable_gpupdate_scripts_user_service = ['/bin/systemctl', '--global', 'disable', 'gpupdate-scripts-run-user.service']
|
||||
|
||||
config = GPConfig()
|
||||
|
||||
auth_result = 'local'
|
||||
try:
|
||||
auth_result = runcmd(cmd_control_system_auth)[1][0]
|
||||
except Exception as exc:
|
||||
print(str(exc))
|
||||
|
||||
if auth_result != 'local':
|
||||
runcmd(cmd_set_global_policy)
|
||||
else:
|
||||
runcmd(cmd_set_local_policy)
|
||||
runcmd(cmd_disable_gpupdate_service)
|
||||
runcmd(cmd_disable_gpupdate_user_service)
|
||||
runcmd(cmd_disable_gpupdate_timer)
|
||||
runcmd(cmd_disable_gpupdate_user_timer)
|
||||
runcmd(cmd_disable_gpupdate_scripts_service)
|
||||
runcmd(cmd_disable_gpupdate_scripts_user_service)
|
||||
config.set_local_policy_template()
|
||||
config.set_backend()
|
||||
|
||||
def enable_gp(policy_name, backend_type):
|
||||
'''
|
||||
Consistently enable group policy services
|
||||
'''
|
||||
cmd_set_gpupdate_policy = ['/usr/sbin/control', 'system-policy', 'gpupdate']
|
||||
cmd_gpoa_nodomain = ['/usr/sbin/gpoa', '--nodomain', '--loglevel', '5']
|
||||
cmd_enable_gpupdate_service = ['/bin/systemctl', 'enable', 'gpupdate.service']
|
||||
cmd_enable_gpupdate_user_service = ['/bin/systemctl', '--global', 'disable', 'gpupdate-user.service']
|
||||
cmd_enable_gpupdate_timer = ['/bin/systemctl', 'enable', 'gpupdate.timer']
|
||||
cmd_enable_gpupdate_user_timer = ['/bin/systemctl', '--global', 'enable', 'gpupdate-user.timer']
|
||||
cmd_enable_gpupdate_scripts_service = ['/bin/systemctl', 'enable', 'gpupdate-scripts-run.service']
|
||||
cmd_enable_gpupdate_user_scripts_service = ['/bin/systemctl', '--global', 'enable', 'gpupdate-scripts-run-user.service']
|
||||
|
||||
config = GPConfig()
|
||||
|
||||
custom_policy_dir = get_custom_policy_dir()
|
||||
if not os.path.isdir(custom_policy_dir):
|
||||
os.makedirs(custom_policy_dir)
|
||||
|
||||
target_policy_name = get_default_policy_name()
|
||||
if policy_name:
|
||||
if validate_policy_name(policy_name):
|
||||
target_policy_name = policy_name
|
||||
print (target_policy_name)
|
||||
|
||||
config.set_local_policy_template(target_policy_name)
|
||||
config.set_backend(backend_type)
|
||||
|
||||
# Enable oddjobd_gpupdate in PAM config
|
||||
if not rollback_on_error(cmd_set_gpupdate_policy):
|
||||
return
|
||||
# Bootstrap the Group Policy engine
|
||||
if not rollback_on_error(cmd_gpoa_nodomain):
|
||||
return
|
||||
# Enable gpupdate.service
|
||||
if not rollback_on_error(cmd_enable_gpupdate_service):
|
||||
return
|
||||
if not is_unit_enabled('gpupdate.service'):
|
||||
disable_gp()
|
||||
return
|
||||
# Enable gpupdate-setup.service for all users
|
||||
if not rollback_on_error(cmd_enable_gpupdate_user_service):
|
||||
return
|
||||
# Enable gpupdate-scripts-run.service
|
||||
if not rollback_on_error(cmd_enable_gpupdate_scripts_service):
|
||||
return
|
||||
if not is_unit_enabled('gpupdate-scripts-run.service'):
|
||||
disable_gp()
|
||||
return
|
||||
# Enable gpupdate-scripts-run-user.service for all users
|
||||
if not rollback_on_error(cmd_enable_gpupdate_user_scripts_service):
|
||||
return
|
||||
if not is_unit_enabled('gpupdate-scripts-run-user.service', unit_global=True):
|
||||
disable_gp()
|
||||
return
|
||||
|
||||
# Enable gpupdate.timer
|
||||
if not rollback_on_error(cmd_enable_gpupdate_timer):
|
||||
return
|
||||
if not is_unit_enabled('gpupdate.timer'):
|
||||
disable_gp()
|
||||
return
|
||||
# Enable gpupdate-setup.timer for all users
|
||||
if not rollback_on_error(cmd_enable_gpupdate_user_timer):
|
||||
return
|
||||
if not is_unit_enabled('gpupdate-user.timer', unit_global=True):
|
||||
disable_gp()
|
||||
return
|
||||
|
||||
def act_list():
|
||||
'''
|
||||
Show list of available templates of Local Policy
|
||||
'''
|
||||
for entry in get_policy_variants():
|
||||
print(entry.rpartition('/')[2])
|
||||
|
||||
def act_list_backends():
|
||||
'''
|
||||
List backends supported by GPOA
|
||||
'''
|
||||
backends = get_backends()
|
||||
for backend in backends:
|
||||
print(backend)
|
||||
|
||||
def act_status():
|
||||
'''
|
||||
Check that group policy services are enabled
|
||||
'''
|
||||
if get_status():
|
||||
print('enabled')
|
||||
else:
|
||||
print('disabled')
|
||||
|
||||
def act_set_backend(backend_name):
|
||||
config = GPConfig()
|
||||
config.set_backend(backend_name)
|
||||
|
||||
def act_write(status, localpolicy, backend):
|
||||
'''
|
||||
Enable or disable group policy services
|
||||
'''
|
||||
if status == 'enable' or status == '#t':
|
||||
enable_gp(localpolicy, backend)
|
||||
if status == 'disable' or status == '#f':
|
||||
disable_gp()
|
||||
|
||||
def act_enable(localpolicy, backend):
|
||||
'''
|
||||
Enable group policy services
|
||||
'''
|
||||
enable_gp(localpolicy, backend)
|
||||
|
||||
def act_active_policy():
|
||||
'''
|
||||
Print active Local Policy template name to stdout
|
||||
'''
|
||||
print(get_active_policy_name())
|
||||
|
||||
def act_active_backend():
|
||||
'''
|
||||
Print currently configured backend.
|
||||
'''
|
||||
print(get_active_backend())
|
||||
|
||||
def act_default_policy():
|
||||
'''
|
||||
Print default Local Policy template name to stdout
|
||||
'''
|
||||
print(get_default_policy_name())
|
||||
|
||||
def main():
|
||||
arguments = parse_arguments()
|
||||
|
||||
action = dict()
|
||||
action['list'] = act_list
|
||||
action['list-backends'] = act_list_backends
|
||||
action['status'] = act_status
|
||||
action['set-backend'] = act_set_backend
|
||||
action['write'] = act_write
|
||||
action['enable'] = act_enable
|
||||
action['update'] = act_enable
|
||||
action['disable'] = disable_gp
|
||||
action['active-policy'] = act_active_policy
|
||||
action['active-backend'] = act_active_backend
|
||||
action['default-policy'] = act_default_policy
|
||||
|
||||
if arguments.action == None:
|
||||
action['status']()
|
||||
elif arguments.action == 'update':
|
||||
if get_status():
|
||||
action[arguments.action](arguments.local_policy, arguments.backend)
|
||||
elif arguments.action == 'enable':
|
||||
action[arguments.action](arguments.local_policy, arguments.backend)
|
||||
elif arguments.action == 'write':
|
||||
action[arguments.action](arguments.status, arguments.localpolicy, arguments.backend)
|
||||
elif arguments.action == 'set-backend':
|
||||
action[arguments.action](arguments.backend)
|
||||
else:
|
||||
action[arguments.action]()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -1,965 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#domain "gpoa"
|
||||
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: 0.8.0\n"
|
||||
"Report-Msgid-Bugs-To: samba@lists.altlinux.org\n"
|
||||
"PO-Revision-Date: \n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain;charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Language: ru\n"
|
||||
|
||||
msgid "Don't start plugins"
|
||||
msgstr "Не запускать модули"
|
||||
|
||||
# Info
|
||||
msgid "Got GPO list for username"
|
||||
msgstr "Получен список GPO для пользователя"
|
||||
|
||||
msgid "Got GPO"
|
||||
msgstr "Получен объект групповой политики"
|
||||
|
||||
msgid "Unknown info code"
|
||||
msgstr "Неизвестный код информационного сообщения"
|
||||
|
||||
msgid "Working with control"
|
||||
msgstr "Применение настроек control"
|
||||
|
||||
msgid "Working with systemd"
|
||||
msgstr "Работа с systemd"
|
||||
|
||||
msgid "Unable to work with systemd unit"
|
||||
msgstr "Невозможно создать оъект для unit systemd"
|
||||
|
||||
msgid "Starting systemd unit"
|
||||
msgstr "Запуск unit systemd"
|
||||
|
||||
msgid "Firefox policy"
|
||||
msgstr "Политика Firefox"
|
||||
|
||||
msgid "Chromium policy"
|
||||
msgstr "Политика Chromium"
|
||||
|
||||
msgid "Set user property to"
|
||||
msgstr "Установка свойств для пользователя"
|
||||
|
||||
msgid "The line in the configuration file was cleared"
|
||||
msgstr "В конфигурационном файле была очищена строка"
|
||||
|
||||
# Error
|
||||
msgid "Insufficient permissions to run gpupdate"
|
||||
msgstr "Недостаточно прав для запуска gpupdate"
|
||||
|
||||
msgid "gpupdate will not be started"
|
||||
msgstr "gpupdate не будет запущен"
|
||||
|
||||
msgid "Backend execution error"
|
||||
msgstr "Ошибка бэкэнда"
|
||||
|
||||
msgid "Error occurred while running frontend manager"
|
||||
msgstr "Ошибка фронтенда"
|
||||
|
||||
msgid "Error running GPOA for computer"
|
||||
msgstr "Ошибка запуска GPOA для машины"
|
||||
|
||||
msgid "Error running GPOA for user"
|
||||
msgstr "Ошибка запуска GPOA для пользователя"
|
||||
|
||||
msgid "Unable to initialize Samba backend"
|
||||
msgstr "Невозможно инициализировать бэкэнд Samba"
|
||||
|
||||
msgid "Unable to initialize no-domain backend"
|
||||
msgstr "Невозможно инициализировать бэкэнд-заглушку"
|
||||
|
||||
msgid "Error running ADP"
|
||||
msgstr "Ошибка во время работы ADP"
|
||||
|
||||
msgid "Unable to determine DC hostname"
|
||||
msgstr "Невозможно определить имя контроллера домена"
|
||||
|
||||
msgid "Error occured while running applier with user privileges"
|
||||
msgstr "Ошибка во время работы applier в контексте пользователя"
|
||||
|
||||
msgid "Unable to initialize backend"
|
||||
msgstr "Невозможно инициализировать бэкэнд"
|
||||
|
||||
msgid "Not sufficient privileges to run machine appliers"
|
||||
msgstr "Недостаточно прав для запуска appliers для машины"
|
||||
|
||||
msgid "Kerberos ticket check failed"
|
||||
msgstr "Проверка билета Kerberos закончилась неудачно"
|
||||
|
||||
msgid "Unable to retrieve domain name via CLDAP query"
|
||||
msgstr "Не удалось определить имя домена AD через запрос к LDAP"
|
||||
|
||||
msgid "Error getting SID using wbinfo, will use SID from cache"
|
||||
msgstr "Не удалось определить SID с использованием утилиты wbinfo, будет использоваться фиктивный/кэшированный SID"
|
||||
|
||||
msgid "Unable to get GPO list for user from AD DC"
|
||||
msgstr "Не удалось получить список групповых политик для пользователя от контроллера домена AD"
|
||||
|
||||
msgid "Error getting XDG_DESKTOP_DIR"
|
||||
msgstr "Не удалось получить значение XDG_DESKTOP_DIR"
|
||||
|
||||
msgid "Error occured while running user applier in administrator context"
|
||||
msgstr "Ошибка выполнения applier в контексте администратора"
|
||||
|
||||
msgid "Error occured while running user applier in user context (with dropped privileges)"
|
||||
msgstr "Ошибка работы пользовательского applier в пользовательском контексте (со сбросом привилегий процесса)"
|
||||
|
||||
msgid "No reply from oddjobd GPOA runner via D-Bus for current user"
|
||||
msgstr "Не получен ответ от oddjobd для текущего пользователя"
|
||||
|
||||
msgid "No reply from oddjobd GPOA runner via D-Bus for computer"
|
||||
msgstr "Не получен ответ от oddjobd для компьютера"
|
||||
|
||||
msgid "No reply from oddjobd GPOA runner via D-Bus for user"
|
||||
msgstr "Не получен ответ от oddjobd для пользователя"
|
||||
|
||||
msgid "Error occured while running machine applier"
|
||||
msgstr "Ошибка во время работы applier для машины"
|
||||
|
||||
msgid "Error occured while initializing user applier"
|
||||
msgstr "Ошибка инициализации пользовательского applier"
|
||||
|
||||
msgid "Error merging machine GPT"
|
||||
msgstr "Ошибка слияния машинной групповой политики"
|
||||
|
||||
msgid "Error merging user GPT"
|
||||
msgstr "Ошибка слияния пользовательской групповой политики"
|
||||
|
||||
msgid "Error merging machine part of GPT"
|
||||
msgstr "Ошибка слияния машинной части групповой политики"
|
||||
|
||||
msgid "Error merging user part of GPT"
|
||||
msgstr "Ошибка слияния пользовательской части групповой политики"
|
||||
|
||||
msgid "Unknown error code"
|
||||
msgstr "Неизвестный код ошибки"
|
||||
|
||||
msgid "Unable to work with control"
|
||||
msgstr "Не удалось применить настройки control"
|
||||
|
||||
msgid "Control applier for machine will not be started"
|
||||
msgstr "Приминение Control для машины не удалось"
|
||||
|
||||
msgid "Error getting control"
|
||||
msgstr "Ошибка установки control"
|
||||
|
||||
msgid "Is not in possible values for control"
|
||||
msgstr "Не входит в возможные значения для control"
|
||||
|
||||
msgid "Unable to set"
|
||||
msgstr "Невозможно установить"
|
||||
|
||||
msgid "Unable to generate file"
|
||||
msgstr "Невозможно создать файл"
|
||||
|
||||
msgid "Failed applying unit"
|
||||
msgstr "Не удалось применить настройки"
|
||||
|
||||
msgid "Unable to start systemd unit"
|
||||
msgstr "Невозможно запустить systemd unit"
|
||||
|
||||
msgid "Unable to cache specified URI"
|
||||
msgstr "Невозможно кэшировать указанный URI"
|
||||
|
||||
msgid "Unable to cache specified URI for machine"
|
||||
msgstr "Невозможно кэшировать указанный URI для компьютера"
|
||||
|
||||
msgid "Error recompiling global GSettings schemas"
|
||||
msgstr "Ошибка перекомпиляции глобальных GSettings schemas"
|
||||
|
||||
msgid "Error update configuration dconf"
|
||||
msgstr "Ошибка обновления конфигурации dconf"
|
||||
|
||||
msgid "Unable to cache specified URI for user"
|
||||
msgstr "Невозможно кэшировать указанный URI для пользователя"
|
||||
|
||||
msgid "Error during attempt to read Chromium preferences for user"
|
||||
msgstr "Ошибка при попытке прочитать настройки Chromium для пользователя"
|
||||
|
||||
msgid "Fail for applying shortcut to file with %"
|
||||
msgstr "Не удалось применить ярлык к файлу с %"
|
||||
|
||||
msgid "Fail for applying shortcut to not absolute path"
|
||||
msgstr "Не удалось применить ярлык к не абсолютному пути"
|
||||
|
||||
msgid "Error running pkcon_runner sync for machine"
|
||||
msgstr "Ошибка при запуске pkcon_runner синхронно для компьютера"
|
||||
|
||||
msgid "Package install error"
|
||||
msgstr "Ошибка установки пакета"
|
||||
|
||||
msgid "Package remove error"
|
||||
msgstr "Ошибка удаления пакета"
|
||||
|
||||
msgid "Error running pkcon_runner sync for user"
|
||||
msgstr "Ошибка при запуске pkcon_runner синхронно для пользователя"
|
||||
|
||||
msgid "Error running pkcon_runner async for machine"
|
||||
msgstr "Ошибка при запуске pkcon_runner асинхронно для компьютера"
|
||||
|
||||
msgid "Error running pkcon_runner async for user"
|
||||
msgstr "Ошибка при запуске pkcon_runner асинхронно для пользователя"
|
||||
|
||||
msgid "Error merging user GPT (from machine GPO)"
|
||||
msgstr "Ошибка слияния пользовательской групповой политики (машинная часть)"
|
||||
|
||||
msgid "Error cleaning directory for machine"
|
||||
msgstr "Ошибка очистки каталога для машины"
|
||||
|
||||
msgid "Error cleaning directory for user"
|
||||
msgstr "Ошибка очистки каталога для пользователя"
|
||||
|
||||
msgid "Error while executing command for widgets"
|
||||
msgstr "Ошибка при выполнении команды для виджетов"
|
||||
|
||||
msgid "Error creating environment variables"
|
||||
msgstr "Ошибка создания переменных среды"
|
||||
|
||||
msgid "Error running kwriteconfig5 command"
|
||||
msgstr "Ошибка выполнения команды kwriteconfig5"
|
||||
|
||||
msgid "Error getting list of keys"
|
||||
msgstr "Ошибка получения списка ключей"
|
||||
|
||||
msgid "Error getting key value"
|
||||
msgstr "Ошибка при получении значения ключей"
|
||||
|
||||
msgid "Failed to update dconf database"
|
||||
msgstr "Не удалось обновить базу данных dconf"
|
||||
|
||||
msgid "Exception occurred while updating dconf database"
|
||||
msgstr "Возникло исключение при обновлении базы данных dconf"
|
||||
|
||||
# Error_end
|
||||
|
||||
# Debug
|
||||
msgid "The GPOA process was started for user"
|
||||
msgstr "Произведён запуск GPOA для обновления политик пользователя"
|
||||
|
||||
msgid "Username is not specified - will use username of the current process"
|
||||
msgstr "Имя пользователя не указано - будет использовано имя владельца процесса"
|
||||
|
||||
msgid "Initializing plugin manager"
|
||||
msgstr "Инициализация плагинов"
|
||||
|
||||
msgid "ADP plugin initialized"
|
||||
msgstr "Инициализирован плагин ADP"
|
||||
|
||||
msgid "Running ADP plugin"
|
||||
msgstr "Запущен плагин ADP"
|
||||
|
||||
msgid "Starting GPOA for user via D-Bus"
|
||||
msgstr "Запускается GPOA для пользователя обращением к oddjobd через D-Bus"
|
||||
|
||||
msgid "Cache directory determined"
|
||||
msgstr "Определена директория кэша Samba"
|
||||
|
||||
msgid "Initializing local backend without domain"
|
||||
msgstr "Инициализация бэкэнда-заглушки"
|
||||
|
||||
msgid "Initializing Samba backend for domain"
|
||||
msgstr "Инициализация бэкэнда Samba"
|
||||
|
||||
msgid "Group Policy target set for update"
|
||||
msgstr "Групповые политики будут обновлены для указанной цели"
|
||||
|
||||
msgid "Starting GPOA for computer via D-Bus"
|
||||
msgstr "Запускается GPOA для компьютера обращением к oddjobd через D-Bus"
|
||||
|
||||
msgid "Got exit code"
|
||||
msgstr "Получен код возврата из утилиты"
|
||||
|
||||
msgid "Starting GPOA via D-Bus"
|
||||
msgstr "Запускается GPOA обращением к oddjobd через D-Bus"
|
||||
|
||||
msgid "Starting GPOA via command invocation"
|
||||
msgstr "GPOA запускается с помощью прямого вызова приложения"
|
||||
|
||||
msgid "Username for frontend is determined"
|
||||
msgstr "Определено имя пользователя для фронтенда"
|
||||
|
||||
msgid "Applying computer part of settings"
|
||||
msgstr "Применение настроек для машины"
|
||||
|
||||
msgid "Kerberos ticket check succeed"
|
||||
msgstr "Проверка билета Kerberos прошла успешно"
|
||||
|
||||
msgid "Found AD domain via CLDAP query"
|
||||
msgstr "Имя домена Active Directory успешно определено при запросе к LDAP"
|
||||
|
||||
msgid "Setting info"
|
||||
msgstr "Установка вспомогательной переменной"
|
||||
|
||||
msgid "Initializing cache"
|
||||
msgstr "Инициализация кэша"
|
||||
|
||||
msgid "Set operational SID"
|
||||
msgstr "Установка рабочего SID"
|
||||
|
||||
msgid "Got PReg entry"
|
||||
msgstr "Получен ключ реестра"
|
||||
|
||||
msgid "Looking for preference in user part of GPT"
|
||||
msgstr "Поиск настроек в пользовательской части GPT"
|
||||
|
||||
msgid "Looking for preference in machine part of GPT"
|
||||
msgstr "Поиск настроек в машинной части GPT"
|
||||
|
||||
msgid "Re-caching Local Policy"
|
||||
msgstr "Обновление кэша локальной политики"
|
||||
|
||||
msgid "Adding HKCU entry"
|
||||
msgstr "Слияние ключа в пользовательскую (HKCU) часть реестра"
|
||||
|
||||
msgid "Skipping HKLM branch deletion key"
|
||||
msgstr "Пропускаем специальный ключ удаления ветви реестра HKLM"
|
||||
|
||||
msgid "Reading and merging machine preference"
|
||||
msgstr "Вычитывание и слияние машинных настроек"
|
||||
|
||||
msgid "Reading and merging user preference"
|
||||
msgstr "Вычитывание и слияние пользовательских настроек"
|
||||
|
||||
msgid "Found SYSVOL entry"
|
||||
msgstr "Найден путь SYSVOL"
|
||||
|
||||
msgid "Trying to load PReg from .pol file"
|
||||
msgstr "Пробуем загрузить ключи реестра из .pol файла"
|
||||
|
||||
msgid "Finished reading PReg from .pol file"
|
||||
msgstr "Вычитаны ключи реестра из .pol файла"
|
||||
|
||||
msgid "Determined length of PReg file"
|
||||
msgstr "Определена длина .pol файла"
|
||||
|
||||
msgid "Merging machine settings from PReg file"
|
||||
msgstr "Слияние машинных настроек из .pol файла"
|
||||
|
||||
msgid "Merging machine (user part) settings from PReg file"
|
||||
msgstr "Слияние пользовательской части машинных настроек из .pol файла"
|
||||
|
||||
msgid "Loading PReg from XML"
|
||||
msgstr "Загружаем ключи реестра из XML"
|
||||
|
||||
msgid "Setting process permissions"
|
||||
msgstr "Установка прав процесса"
|
||||
|
||||
msgid "Samba DC setting is overriden by user setting"
|
||||
msgstr "Используется указанный пользователем контроллер домена AD"
|
||||
|
||||
msgid "Saving information about drive mapping"
|
||||
msgstr "Сохранение информации о привязках дисков"
|
||||
|
||||
msgid "Saving information about printer"
|
||||
msgstr "Сохранение информации о принтерах"
|
||||
|
||||
msgid "Saving information about link"
|
||||
msgstr "Сохранение информации о ярлычках"
|
||||
|
||||
msgid "Saving information about folder"
|
||||
msgstr "Сохранение информации о папках"
|
||||
|
||||
msgid "No value cached for object"
|
||||
msgstr "Отсутствует кэшированное значение для объекта"
|
||||
|
||||
msgid "Key is already present in cache, will update the value"
|
||||
msgstr "Ключ уже существует, его значение будет обновлено"
|
||||
|
||||
msgid "GPO update started"
|
||||
msgstr "Начато обновление GPO"
|
||||
|
||||
msgid "GPO update finished"
|
||||
msgstr "Завершено обновление GPO"
|
||||
|
||||
msgid "Retrieving list of GPOs to replicate from AD DC"
|
||||
msgstr "Получение списка GPO для репликации с контроллера домена AD"
|
||||
|
||||
msgid "Establishing connection with AD DC"
|
||||
msgstr "Установка соединения с контроллером домена AD"
|
||||
|
||||
msgid "Started GPO replication from AD DC"
|
||||
msgstr "Начата репликация GPO от контроллера домена AD"
|
||||
|
||||
msgid "Finished GPO replication from AD DC"
|
||||
msgstr "Завершена репликация GPO от контроллера домена AD"
|
||||
|
||||
msgid "Skipping HKCU branch deletion key"
|
||||
msgstr "Пропускаем специальный ключ удаления ветви реестра HKCU"
|
||||
|
||||
msgid "Read domain name from configuration file"
|
||||
msgstr "Имя контроллера домена для репликации прочитано из файла конфигурации"
|
||||
|
||||
msgid "Saving information about environment variables"
|
||||
msgstr "Сохранение информации о переменных окружения"
|
||||
|
||||
msgid "Unknown debug code"
|
||||
msgstr "Неизвестный отладочный код"
|
||||
|
||||
msgid "Running Control applier for machine"
|
||||
msgstr "Начато применение Control для машины"
|
||||
|
||||
msgid "Setting control"
|
||||
msgstr "Установка control"
|
||||
|
||||
msgid "Deny_All setting found"
|
||||
msgstr "Deny_All настройка найдена"
|
||||
|
||||
msgid "Deny_All setting for user"
|
||||
msgstr "Deny_All настройка для пользователя"
|
||||
|
||||
msgid "Deny_All setting not found"
|
||||
msgstr "Deny_All настройка не найдена"
|
||||
|
||||
msgid "Deny_All setting not found for user"
|
||||
msgstr "Deny_All настройка не найдена для пользователя"
|
||||
|
||||
msgid "Running Polkit applier for machine"
|
||||
msgstr "Начато применение настроек Polkit для машины"
|
||||
|
||||
msgid "Running Polkit applier for user in administrator context"
|
||||
msgstr "Начато применение настроек Polkit пользователя в контексте администратора"
|
||||
|
||||
msgid "Polkit applier for machine will not be started"
|
||||
msgstr "Polkit для машины не запускается"
|
||||
|
||||
msgid "Polkit applier for user in administrator context will not be started"
|
||||
msgstr "Polkit для пользователя в контексте администратора не запускается"
|
||||
|
||||
msgid "Generated file"
|
||||
msgstr "Созданный файл"
|
||||
|
||||
msgid "Running systemd applier for machine"
|
||||
msgstr "Начато применение настроек systemd для машины"
|
||||
|
||||
msgid "Running systemd applier for machine will not be started"
|
||||
msgstr "Применение настроек systemd для машины не удалось"
|
||||
|
||||
msgid "Running GSettings applier for machine"
|
||||
msgstr "Запуск применение настроек GSettings для машины"
|
||||
|
||||
msgid "GSettings applier for machine will not be started"
|
||||
msgstr "Применение настроек GSettings для машины не удалось"
|
||||
|
||||
msgid "Removing GSettings policy file from previous run"
|
||||
msgstr "Удаление файла политики GSettings от предыдущего запуска"
|
||||
|
||||
msgid "Mapping Windows policies to GSettings policies"
|
||||
msgstr "Сопоставление политик Windows с политиками GSettings"
|
||||
|
||||
msgid "GSettings windows policies mapping not enabled"
|
||||
msgstr "Сопоставление политик Windows GSettings не включено"
|
||||
|
||||
msgid "Applying user setting"
|
||||
msgstr "Применение пользовательских настроек"
|
||||
|
||||
msgid "Found GSettings windows mapping"
|
||||
msgstr "Найдены соответствия настроек windows-GSettings"
|
||||
|
||||
msgid "Running GSettings applier for user in user context"
|
||||
msgstr "Запуск применение настроек GSettings в контексте пользователя"
|
||||
|
||||
msgid "GSettings applier for user in user context will not be started"
|
||||
msgstr "GSettings в контексте пользователя не запускается"
|
||||
|
||||
msgid "Applying machine setting"
|
||||
msgstr "Применение настроек машины"
|
||||
|
||||
msgid "Path not resolved as UNC URI"
|
||||
msgstr "Путь не разрешен"
|
||||
|
||||
msgid "Getting cached file for URI"
|
||||
msgstr "Получение кешированного файла для URI"
|
||||
|
||||
msgid "Wrote Firefox preferences to"
|
||||
msgstr "Настройки Firefox записаны в"
|
||||
|
||||
msgid "Found Firefox profile in"
|
||||
msgstr "Найден профиль Firefox в"
|
||||
|
||||
msgid "Running Firefox applier for machine"
|
||||
msgstr "Запуск применение настроек Firefox для машины"
|
||||
|
||||
msgid "Firefox applier for machine will not be started"
|
||||
msgstr "Применение настроек Firefox для компьютера не запускается"
|
||||
|
||||
msgid "Running Chromium applier for machine"
|
||||
msgstr "Запуск применение настроек Chromium для машины"
|
||||
|
||||
msgid "Chromium applier for machine will not be started"
|
||||
msgstr "Применение настроек Chromium для компьютера не запускается"
|
||||
|
||||
msgid "Wrote Chromium preferences to"
|
||||
msgstr "Настройки Chromium записаны в"
|
||||
|
||||
msgid "Running Shortcut applier for machine"
|
||||
msgstr "Запуск применение ярлыков для машины"
|
||||
|
||||
msgid "Shortcut applier for machine will not be started"
|
||||
msgstr "Применение ярлыков для компьютера не запускается"
|
||||
|
||||
msgid "No shortcuts to process for"
|
||||
msgstr "Нет ярлыков для обработки"
|
||||
|
||||
msgid "Running Shortcut applier for user in user context"
|
||||
msgstr "Запуск применение ярлыков в контексте пользователя"
|
||||
|
||||
msgid "Shortcut applier for user in user context will not be started"
|
||||
msgstr "Применение ярлыков в контексте пользователя не запускается"
|
||||
|
||||
msgid "Running Shortcut applier for user in administrator context"
|
||||
msgstr "Запуск применение ярлыков в контексте администратора"
|
||||
|
||||
msgid "Shortcut applier for user in administrator context will not be started"
|
||||
msgstr "Применение ярлыков в контексте администратора не запускается"
|
||||
|
||||
msgid "Try to expand path for shortcut"
|
||||
msgstr "Попытка расширить путь для ярлыка"
|
||||
|
||||
msgid "Applying shortcut file to"
|
||||
msgstr "Применение ярлыка к файлу"
|
||||
|
||||
msgid "Running Folder applier for machine"
|
||||
msgstr "Запуск применение папок для машины"
|
||||
|
||||
msgid "Folder applier for machine will not be started"
|
||||
msgstr "Применение папок для машины не запускается"
|
||||
|
||||
msgid "Folder creation skipped for machine"
|
||||
msgstr "Создание папки для машины пропущено"
|
||||
|
||||
msgid "Folder creation skipped for user"
|
||||
msgstr "Создание папки для пользователя пропущено"
|
||||
|
||||
msgid "Running Folder applier for user in user context"
|
||||
msgstr "Запуск применение папок для пользователя в контексте пользователя"
|
||||
|
||||
msgid "Folder applier for user in user context will not be started"
|
||||
msgstr "Применение папок для пользователя в контексте пользователя не запускается"
|
||||
|
||||
msgid "Running CUPS applier for machine"
|
||||
msgstr "Запуск применение настроек CUPS для машины"
|
||||
|
||||
msgid "CUPS applier for machine will not be started"
|
||||
msgstr "Применение настроек CUPS для машины не запускается"
|
||||
|
||||
msgid "Running CUPS applier for user in administrator context"
|
||||
msgstr "Запуск применение настроек CUPS для пользователя в контексте администратора"
|
||||
|
||||
msgid "CUPS applier for user in administrator context will not be started"
|
||||
msgstr "Применение настроек CUPS для пользователя в контексте администратора не запускается"
|
||||
|
||||
msgid "Running Firewall applier for machine"
|
||||
msgstr "Запуск применение настроек Firewall для машины"
|
||||
|
||||
msgid "Firewall is enabled"
|
||||
msgstr "Firewall включен"
|
||||
|
||||
msgid "Firewall is disabled, settings will be reset"
|
||||
msgstr "Firewall отключен, настройки будут сброшены"
|
||||
|
||||
msgid "Firewall applier will not be started"
|
||||
msgstr "Применение настроек Firewall не запускается"
|
||||
|
||||
msgid "Running NTP applier for machine"
|
||||
msgstr "Запуск применение настроек NTP для машины"
|
||||
|
||||
msgid "NTP server is configured to"
|
||||
msgstr "Сервер NTP настроен на"
|
||||
|
||||
msgid "Starting Chrony daemon"
|
||||
msgstr "Запуск демона Chrony"
|
||||
|
||||
msgid "Setting reference NTP server to"
|
||||
msgstr "Установка эталонного сервера NTP на"
|
||||
|
||||
msgid "Stopping Chrony daemon"
|
||||
msgstr "Остановка демона Chrony"
|
||||
|
||||
msgid "Configuring NTP server..."
|
||||
msgstr "Настройка NTP-сервера ..."
|
||||
|
||||
msgid "NTP server is enabled"
|
||||
msgstr "Сервер NTP включен"
|
||||
|
||||
msgid "NTP server is disabled"
|
||||
msgstr "NTP сервер отключен"
|
||||
|
||||
msgid "NTP server is not configured"
|
||||
msgstr "NTP сервер не настроен"
|
||||
|
||||
msgid "NTP client is enabled"
|
||||
msgstr "Клиент NTP включен"
|
||||
|
||||
msgid "NTP client is disabled"
|
||||
msgstr "Клиент NTP отключен"
|
||||
|
||||
msgid "NTP client is not configured"
|
||||
msgstr "NTP клиент не настроен"
|
||||
|
||||
msgid "NTP applier for machine will not be started"
|
||||
msgstr "Применение настроек NTP для машины не запускается"
|
||||
|
||||
msgid "Running Envvar applier for machine"
|
||||
msgstr "Запуск применение настроек Envvar для машины"
|
||||
|
||||
msgid "Envvar applier for machine will not be started"
|
||||
msgstr "Применение настроек Envvar для машины не запускается"
|
||||
|
||||
msgid "Running Envvar applier for user in user context"
|
||||
msgstr "Запуск применение настроек Envvar для пользователя в контексте пользователя"
|
||||
|
||||
msgid "Envvar applier for user in user context will not be started"
|
||||
msgstr "Применение настроек Envvar для пользователя в контексте пользователя не запускается"
|
||||
|
||||
msgid "Running Package applier for machine"
|
||||
msgstr "Запуск установки пакетов для машины"
|
||||
|
||||
msgid "Package applier for machine will not be started"
|
||||
msgstr "Применение установки пакетов для машины не запускается"
|
||||
|
||||
msgid "Running Package applier for user in administrator context"
|
||||
msgstr "Запуск установки пакетов для пользователя в контексте администратора"
|
||||
|
||||
msgid "Package applier for user in administrator context will not be started"
|
||||
msgstr "Применение установки пакетов для пользователя в контексте администратора не запускается"
|
||||
|
||||
msgid "Running pkcon_runner to install and remove packages"
|
||||
msgstr "Запуск pkcon_runner для установки и удаления пакетов"
|
||||
|
||||
msgid "Run apt-get update"
|
||||
msgstr "Запускаем apt-get update"
|
||||
|
||||
msgid "Error run apt-get update"
|
||||
msgstr "Ошибка запуска apt-get update"
|
||||
|
||||
msgid "Run user context applier with dropped privileges"
|
||||
msgstr "Запуск из контекста пользователя с удаленными привилегиями"
|
||||
|
||||
msgid "Run forked process with droped privileges"
|
||||
msgstr "Запустить разветвленный процесс с удаленными привилегиями"
|
||||
|
||||
msgid "Found connection by org.freedesktop.DBus.GetConnectionUnixProcessID"
|
||||
msgstr "Найдено соединение org.freedesktop.DBus.GetConnectionUnixProcessID"
|
||||
|
||||
msgid "Kill dbus-daemon and dconf-service in user context"
|
||||
msgstr "Остановка dbus-daemon и dconf-service в контексте пользователя"
|
||||
|
||||
msgid "Running CIFS applier for user in administrator context"
|
||||
msgstr "Запуск применение настроек CIFS для пользователя в контексте администратора"
|
||||
|
||||
msgid "CIFS applier for user in administrator context will not be started"
|
||||
msgstr "Применение настроек CIFS для пользователя в контексте администратора не запускается"
|
||||
|
||||
msgid "Installing the package"
|
||||
msgstr "Установка пакета"
|
||||
|
||||
msgid "Removing a package"
|
||||
msgstr "Удаление пакета"
|
||||
|
||||
msgid "Failed to found gsettings for machine"
|
||||
msgstr "Не удалось найти настройки gsettings для машины"
|
||||
|
||||
msgid "Failed to found user gsettings"
|
||||
msgstr "Не удалось найти настройки gsettings пользователя"
|
||||
|
||||
msgid "Configure user Group Policy loopback processing mode"
|
||||
msgstr "Настройка режима обработки замыкания пользовательской групповой политики"
|
||||
|
||||
msgid "Saving information about script"
|
||||
msgstr "Сохранение информации о скрипте"
|
||||
|
||||
msgid "No machine scripts directory to clean up"
|
||||
msgstr "Нет каталога машинных скриптов для очистки"
|
||||
|
||||
msgid "No user scripts directory to clean up"
|
||||
msgstr "Нет каталога пользовательских скриптов для очистки"
|
||||
|
||||
msgid "Prepare Scripts applier for machine"
|
||||
msgstr "Подготовка к применению машинных скриптов"
|
||||
|
||||
msgid "Scripts applier for machine will not be started"
|
||||
msgstr "Применение машинных скриптов не запускается"
|
||||
|
||||
msgid "Prepare Scripts applier for user in user context"
|
||||
msgstr "Подготовка к применению скриптов пользователя в его контексте"
|
||||
|
||||
msgid "Scripts applier for user in user context will not be started"
|
||||
msgstr "Применение скриптов пользователя в его контексте не запускается"
|
||||
|
||||
msgid "Clean machine scripts directory"
|
||||
msgstr "Очистка каталога машинных скриптов"
|
||||
|
||||
msgid "Clean user scripts directory"
|
||||
msgstr "Очистка каталога пользовательских скриптов"
|
||||
|
||||
msgid "Saving information about file"
|
||||
msgstr "Сохранение информации о файле"
|
||||
|
||||
msgid "Failed to return file path"
|
||||
msgstr "Не удалось вернуть путь к файлу"
|
||||
|
||||
msgid "Failed to create file"
|
||||
msgstr "Не удалось создать файл"
|
||||
|
||||
msgid "Failed to delete file"
|
||||
msgstr "Не удалось удалить файл"
|
||||
|
||||
msgid "Failed to update file"
|
||||
msgstr "Не удалось обновить файл"
|
||||
|
||||
msgid "Running File copy applier for machine"
|
||||
msgstr "Запуск применение настроек копирования файлов для машины"
|
||||
|
||||
msgid "Running File copy applier for machine will not be started"
|
||||
msgstr "Применение настроек копирования файлов для машины не будет запущено"
|
||||
|
||||
msgid "Running File copy applier for user in administrator context"
|
||||
msgstr "Запуск применение настроек копирования файлов для пользователя в контексте администратора"
|
||||
|
||||
msgid "Running File copy applier for user in administrator context will not be started"
|
||||
msgstr "Применение настроек копирования файлов для пользователя в контексте администратора не будет запущено"
|
||||
|
||||
msgid "Running ini applier for machine"
|
||||
msgstr "Запуск применение настроек ini файлов для машины"
|
||||
|
||||
msgid "Running ini applier for machine will not be started"
|
||||
msgstr "Применение настроек ini файлов для машины не будет запущено"
|
||||
|
||||
msgid "Running ini applier for user in user context"
|
||||
msgstr "Запуск применение настроек ini файлов для пользователя в контексте пользователя"
|
||||
|
||||
msgid "Running ini applier for user in user context will not be started"
|
||||
msgstr "Применение настроек ini файлов для пользователя в контексте пользователя не будет запущено"
|
||||
|
||||
msgid "Ini-file path not recognized"
|
||||
msgstr "Путь к ini-файлу не распознан"
|
||||
|
||||
msgid "Ini-file is not readable"
|
||||
msgstr "Ini-файл не читается"
|
||||
|
||||
msgid "Saving information about ini-file"
|
||||
msgstr "Сохранение информации об ini-файле"
|
||||
|
||||
msgid "Dictionary key generation failed"
|
||||
msgstr "Формирования ключа словаря не удалось"
|
||||
|
||||
msgid "Running CIFS applier for machine"
|
||||
msgstr "Запуск применение настроек CIFS для машины"
|
||||
|
||||
msgid "CIFS applier for machine will not be started"
|
||||
msgstr "Применение настроек CIFS для машины не будет запущено"
|
||||
|
||||
msgid "Saving information about network shares"
|
||||
msgstr "Сохранение информации о сетевых ресурсах"
|
||||
|
||||
msgid "Running networkshare applier for machine"
|
||||
msgstr "Запуск применение настроек сетевых каталогов для машины"
|
||||
|
||||
msgid "Running networkshare applier for machine will not be starte"
|
||||
msgstr "Применение настроек сетевых каталогов для машины не будет запущено"
|
||||
|
||||
msgid "Apply network share data action failed"
|
||||
msgstr "Не удалось применить действие с данными общего сетевого ресурса"
|
||||
|
||||
msgid "Running yandex_browser_applier for machine"
|
||||
msgstr "Запуск yandex_browser_applier для машины"
|
||||
|
||||
msgid "Yandex_browser_applier for machine will not be started"
|
||||
msgstr "Yandex_browser_applier для машины не запустится"
|
||||
|
||||
msgid "Wrote YandexBrowser preferences to"
|
||||
msgstr "Запись настройки Яндекс Браузера в"
|
||||
|
||||
msgid "Running networkshare applier for user"
|
||||
msgstr "Запуск применение настроек сетевых каталогов для пользователя"
|
||||
|
||||
msgid "File copy"
|
||||
msgstr "Копирование файла"
|
||||
|
||||
msgid "Running networkshare applier for user will not be started"
|
||||
msgstr "Применение настроек сетевых каталогов для пользователя не будет запущено"
|
||||
|
||||
msgid "File update"
|
||||
msgstr "Обновление файла"
|
||||
|
||||
msgid "Applying settings for network share"
|
||||
msgstr "Применение настроек для сетевой папки"
|
||||
|
||||
msgid "Deleting a file"
|
||||
msgstr "Удаление файла"
|
||||
|
||||
msgid "Running GPOA by root for user"
|
||||
msgstr "Запуск GPOA от root для пользователя"
|
||||
|
||||
msgid "The GPOA process was started for computer"
|
||||
msgstr "Процесс GPOA запущен для компьютера"
|
||||
|
||||
msgid "Running networkshare applier for machine will not be started"
|
||||
msgstr "Применение настроек сетевых каталогов для машины не будет запущено"
|
||||
|
||||
msgid "Failed to create a symlink to the network drives mountpoint"
|
||||
msgstr "Не удалось создать ссылку на точку монтирования сетевых дисков пользователя"
|
||||
|
||||
msgid "Failed to create a symlink to the system network drives mountpoint"
|
||||
msgstr "Не удалось создать ссылку на точку монтирования системных сетевых дисков"
|
||||
|
||||
msgid "Failed to create a symlink to the hidden network drives mountpoint"
|
||||
msgstr "Не удалось создать ссылку на точку монтирования скрытых сетевых дисков пользователя"
|
||||
|
||||
msgid "Failed to create a symlink to the hidden system network drives mountpoint"
|
||||
msgstr "Не удалось создать ссылку на точку монтирования скрытых системных сетевых дисков"
|
||||
|
||||
msgid "Running KDE applier for machine"
|
||||
msgstr "Запуск применения настроек KDE для машины"
|
||||
|
||||
msgid "KDE applier for machine will not be started"
|
||||
msgstr "Применение настроек KDE для машины не удалось"
|
||||
|
||||
msgid "Running KDE applier for user in user context"
|
||||
msgstr "Запуск применения настроек KDE в контексте пользователя"
|
||||
|
||||
msgid "KDE applier for user in user context will not be started"
|
||||
msgstr "KDE в контексте пользователя не запускается"
|
||||
|
||||
msgid "Changing the configuration file"
|
||||
msgstr "Изменение конфигурационного файла"
|
||||
|
||||
msgid "Widget command completed successfully"
|
||||
msgstr "Команда для виджетов выполнена успешно"
|
||||
|
||||
msgid "Getting a list of keys"
|
||||
msgstr "Получение списка ключей"
|
||||
|
||||
msgid "Getting the key value"
|
||||
msgstr "Получение значения ключа"
|
||||
|
||||
msgid "Successfully updated dconf database"
|
||||
msgstr "База данных dconf успешно обновлена"
|
||||
|
||||
msgid "Creating a dictionary with keys and values from the dconf database"
|
||||
msgstr "Формирование словаря с ключами и значениями из базы dconf"
|
||||
|
||||
msgid "No entry found for the specified path"
|
||||
msgstr "Не найдено записей по указанному пути"
|
||||
|
||||
msgid "Creating an ini file with policies for dconf"
|
||||
msgstr "Создание ini-файла с политиками для dconf"
|
||||
|
||||
msgid "GPO version was not found"
|
||||
msgstr "Версия GPO не найдена"
|
||||
|
||||
# Debug_end
|
||||
|
||||
# Warning
|
||||
msgid "Unable to perform gpupdate for non-existent user, will update machine settings"
|
||||
msgstr "Невозможно запустить gpupdate для несуществующего пользователя, будут обновлены настройки машины"
|
||||
|
||||
msgid "Current permissions does not allow to perform gpupdate for designated user. Will update current user settings"
|
||||
msgstr "Текущий уровень привилегий не позволяет выполнить gpupdate для указанного пользователя. Будут обновлены настройки текущего пользователя."
|
||||
|
||||
msgid "oddjobd is inaccessible"
|
||||
msgstr "oddjobd недоступен"
|
||||
|
||||
msgid "No SYSVOL entry assigned to GPO"
|
||||
msgstr "Объект групповой политики не имеет привязанного пути на SYSVOL"
|
||||
|
||||
|
||||
msgid "ADP package is not installed - plugin will not be initialized"
|
||||
msgstr "Пакет ADP не установлен, плагин не будет инициализирован"
|
||||
|
||||
msgid "Unknown warning code"
|
||||
msgstr "Неизвестный код предупреждения"
|
||||
|
||||
msgid "Unable to resolve GSettings parameter"
|
||||
msgstr "Не удалось установить параметр GSettings"
|
||||
|
||||
msgid "No home directory exists for user"
|
||||
msgstr "Для пользователя не существует домашнего каталога"
|
||||
|
||||
msgid "User's shortcut not placed to home directory"
|
||||
msgstr "Ярлык пользователя не помещен в домашний каталог"
|
||||
|
||||
msgid "CUPS is not installed: no printer settings will be deployed"
|
||||
msgstr "CUPS не установлен: настройки принтера не будут развернуты"
|
||||
|
||||
msgid "Unsupported NTP server type"
|
||||
msgstr "Неподдерживаемый тип сервера NTP"
|
||||
|
||||
msgid "Failed to read the list of files"
|
||||
msgstr "Не удалось прочитать список файлов"
|
||||
|
||||
msgid "Failed to caching the file"
|
||||
msgstr "Не удалось кэшировать файл"
|
||||
|
||||
msgid "Could not create a valid list of keys"
|
||||
msgstr "Не удалось создать допустимый список ключей"
|
||||
|
||||
msgid "Failed to copy file"
|
||||
msgstr "Не удалось скопировать файл"
|
||||
|
||||
msgid "Failed to create KDE settings list"
|
||||
msgstr "Не удалось создать список настроек KDE"
|
||||
|
||||
msgid "Could not find application tools"
|
||||
msgstr "Не удалось найти инструменты применения"
|
||||
|
||||
msgid "Failed to open KDE settings"
|
||||
msgstr "Не удалось открыть настройки KDE"
|
||||
|
||||
msgid "Failed to change KDE configuration file"
|
||||
msgstr "Не удалось изменить файл конфигурации KDE"
|
||||
|
||||
msgid "Error connecting to server"
|
||||
msgstr "Ошибка при подключении к серверу"
|
||||
|
||||
msgid "Wallpaper configuration file not found"
|
||||
msgstr "Конфигурационный файл для обоев не найден"
|
||||
|
||||
msgid "The user setting was not installed, conflict with computer setting"
|
||||
msgstr "Пользовательская настройка не была установлена, конфликт с настройкой компьютера"
|
||||
|
||||
msgid "Action for ini file failed"
|
||||
msgstr "Не удалось выполнить действие для INI-файла"
|
||||
|
||||
msgid "Couldn't get the uid"
|
||||
msgstr "Не удалось получить uid"
|
||||
|
||||
# Fatal
|
||||
msgid "Unable to refresh GPO list"
|
||||
msgstr "Невозможно обновить список объектов групповых политик"
|
||||
|
||||
msgid "Error getting GPTs for machine"
|
||||
msgstr "Не удалось получить GPT для машины"
|
||||
|
||||
msgid "Error getting GPTs for user"
|
||||
msgstr "Не удалось получить GPT для пользователя"
|
||||
|
||||
msgid "Unknown fatal code"
|
||||
msgstr "Неизвестный код фатальной ошибки"
|
||||
|
||||
# get_message
|
||||
msgid "Unknown message type, no message assigned"
|
||||
msgstr "Неизвестный тип сообщения"
|
||||
|
@ -1,391 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
import gettext
|
||||
|
||||
def info_code(code):
|
||||
info_ids = dict()
|
||||
info_ids[1] = 'Got GPO list for username'
|
||||
info_ids[2] = 'Got GPO'
|
||||
info_ids[3] = 'Working with control'
|
||||
info_ids[4] = 'Working with systemd'
|
||||
info_ids[5] = 'Unable to work with systemd unit'
|
||||
info_ids[6] = 'Starting systemd unit'
|
||||
info_ids[7] = 'Firefox policy'
|
||||
info_ids[8] = 'Chromium policy'
|
||||
info_ids[9] = 'Set user property to'
|
||||
info_ids[10] = 'The line in the configuration file was cleared'
|
||||
|
||||
return info_ids.get(code, 'Unknown info code')
|
||||
|
||||
def error_code(code):
|
||||
error_ids = dict()
|
||||
error_ids[1] = 'Insufficient permissions to run gpupdate'
|
||||
error_ids[2] = 'gpupdate will not be started'
|
||||
error_ids[3] = 'Backend execution error'
|
||||
error_ids[4] = 'Error occurred while running frontend manager'
|
||||
error_ids[5] = 'Error running GPOA for computer'
|
||||
error_ids[6] = 'Error running GPOA for user'
|
||||
error_ids[7] = 'Unable to initialize Samba backend'
|
||||
error_ids[8] = 'Unable to initialize no-domain backend'
|
||||
error_ids[9] = 'Error running ADP'
|
||||
error_ids[10] = 'Unable to determine DC hostname'
|
||||
error_ids[11] = 'Error occured while running applier with user privileges'
|
||||
error_ids[12] = 'Unable to initialize backend'
|
||||
error_ids[13] = 'Not sufficient privileges to run machine appliers'
|
||||
error_ids[14] = 'Kerberos ticket check failed'
|
||||
error_ids[15] = 'Unable to retrieve domain name via CLDAP query'
|
||||
error_ids[16] = 'Error getting SID using wbinfo, will use SID from cache'
|
||||
error_ids[17] = 'Unable to get GPO list for user from AD DC'
|
||||
error_ids[18] = 'Error getting XDG_DESKTOP_DIR'
|
||||
error_ids[19] = 'Error occured while running user applier in administrator context'
|
||||
error_ids[20] = 'Error occured while running user applier in user context (with dropped privileges)'
|
||||
error_ids[21] = 'No reply from oddjobd GPOA runner via D-Bus for current user'
|
||||
error_ids[22] = 'No reply from oddjobd GPOA runner via D-Bus for computer'
|
||||
error_ids[23] = 'No reply from oddjobd GPOA runner via D-Bus for user'
|
||||
error_ids[24] = 'Error occured while running machine applier'
|
||||
error_ids[25] = 'Error occured while initializing user applier'
|
||||
error_ids[26] = 'Error merging machine GPT'
|
||||
error_ids[27] = 'Error merging user GPT'
|
||||
error_ids[28] = 'Error merging machine part of GPT'
|
||||
error_ids[29] = 'Error merging user part of GPT'
|
||||
error_ids[30] = 'Error occured while running dropped privileges process for user context appliers'
|
||||
error_ids[31] = 'Error connecting to DBus Session daemon'
|
||||
error_ids[32] = 'No reply from DBus Session'
|
||||
error_ids[33] = 'Error occured while running forked process with dropped privileges'
|
||||
error_ids[34] = 'Error running GPOA directly for computer'
|
||||
error_ids[35] = 'Error caching URI to file'
|
||||
error_ids[36] = 'Error getting cached file for URI'
|
||||
error_ids[37] = 'Error caching file URIs'
|
||||
error_ids[38] = 'Unable to cache specified URI'
|
||||
error_ids[39] = 'Unable to work with control'
|
||||
error_ids[40] = 'Control applier for machine will not be started'
|
||||
error_ids[41] = 'Error getting control'
|
||||
error_ids[42] = 'Is not in possible values for control'
|
||||
error_ids[43] = 'Unable to set'
|
||||
error_ids[44] = 'Unable to generate file'
|
||||
error_ids[45] = 'Failed applying unit'
|
||||
error_ids[46] = 'Unable to start systemd unit'
|
||||
error_ids[47] = 'Unable to cache specified URI for machine'
|
||||
error_ids[48] = 'Error recompiling global GSettings schemas'
|
||||
error_ids[49] = 'Error update configuration dconf'
|
||||
error_ids[50] = 'Unable to cache specified URI for user'
|
||||
error_ids[52] = 'Error during attempt to read Chromium preferences for user'
|
||||
error_ids[53] = 'Fail for applying shortcut to file with \'%\''
|
||||
error_ids[54] = 'Fail for applying shortcut to not absolute path'
|
||||
error_ids[55] = 'Error running pkcon_runner sync for machine'
|
||||
error_ids[56] = 'Error run apt-get update'
|
||||
error_ids[57] = 'Package install error'
|
||||
error_ids[58] = 'Package remove error'
|
||||
error_ids[59] = 'Is not in possible values for control'
|
||||
error_ids[60] = 'Error running pkcon_runner sync for user'
|
||||
error_ids[61] = 'Error running pkcon_runner async for machine'
|
||||
error_ids[62] = 'Error running pkcon_runner async for user'
|
||||
error_ids[63] = 'Error merging user GPT (from machine GPO)'
|
||||
error_ids[64] = 'Error to cleanup directory for machine'
|
||||
error_ids[65] = 'Error to cleanup directory for user'
|
||||
error_ids[66] = 'Error while executing command for widgets'
|
||||
error_ids[67] = 'Error creating environment variables'
|
||||
error_ids[68] = 'Error running kwriteconfig5 command'
|
||||
error_ids[69] = 'Error getting list of keys'
|
||||
error_ids[70] = 'Error getting key value'
|
||||
error_ids[71] = 'Failed to update dconf database'
|
||||
error_ids[72] = 'Exception occurred while updating dconf database'
|
||||
return error_ids.get(code, 'Unknown error code')
|
||||
|
||||
def debug_code(code):
|
||||
debug_ids = dict()
|
||||
debug_ids[1] = 'The GPOA process was started for user'
|
||||
debug_ids[2] = 'Username is not specified - will use username of the current process'
|
||||
debug_ids[3] = 'Initializing plugin manager'
|
||||
debug_ids[4] = 'ADP plugin initialized'
|
||||
debug_ids[5] = 'Running ADP plugin'
|
||||
debug_ids[6] = 'Starting GPOA for user via D-Bus'
|
||||
debug_ids[7] = 'Cache directory determined'
|
||||
debug_ids[8] = 'Initializing local backend without domain'
|
||||
debug_ids[9] = 'Initializing Samba backend for domain'
|
||||
debug_ids[10] = 'Group Policy target set for update'
|
||||
debug_ids[11] = 'Starting GPOA for computer via D-Bus'
|
||||
debug_ids[12] = 'Got exit code'
|
||||
debug_ids[13] = 'Starting GPOA via D-Bus'
|
||||
debug_ids[14] = 'Starting GPOA via command invocation'
|
||||
debug_ids[15] = 'Username for frontend is determined'
|
||||
debug_ids[16] = 'Applying computer part of settings'
|
||||
debug_ids[17] = 'Kerberos ticket check succeed'
|
||||
debug_ids[18] = 'Found AD domain via CLDAP query'
|
||||
debug_ids[19] = 'Setting info'
|
||||
debug_ids[20] = 'Initializing cache'
|
||||
debug_ids[21] = 'Set operational SID'
|
||||
debug_ids[22] = 'Got PReg entry'
|
||||
debug_ids[23] = 'Looking for preference in user part of GPT'
|
||||
debug_ids[24] = 'Looking for preference in machine part of GPT'
|
||||
debug_ids[25] = 'Re-caching Local Policy'
|
||||
debug_ids[26] = 'Adding HKCU entry'
|
||||
debug_ids[27] = 'Skipping HKLM branch deletion key'
|
||||
debug_ids[28] = 'Reading and merging machine preference'
|
||||
debug_ids[29] = 'Reading and merging user preference'
|
||||
debug_ids[30] = 'Found SYSVOL entry'
|
||||
debug_ids[31] = 'Trying to load PReg from .pol file'
|
||||
debug_ids[32] = 'Finished reading PReg from .pol file'
|
||||
debug_ids[33] = 'Determined length of PReg file'
|
||||
debug_ids[34] = 'Merging machine settings from PReg file'
|
||||
debug_ids[35] = 'Merging machine (user part) settings from PReg file'
|
||||
debug_ids[36] = 'Loading PReg from XML'
|
||||
debug_ids[37] = 'Setting process permissions'
|
||||
debug_ids[38] = 'Samba DC setting is overriden by user setting'
|
||||
debug_ids[39] = 'Saving information about drive mapping'
|
||||
debug_ids[40] = 'Saving information about printer'
|
||||
debug_ids[41] = 'Saving information about link'
|
||||
debug_ids[42] = 'Saving information about folder'
|
||||
debug_ids[43] = 'No value cached for object'
|
||||
debug_ids[44] = 'Key is already present in cache, will update the value'
|
||||
debug_ids[45] = 'GPO update started'
|
||||
debug_ids[46] = 'GPO update finished'
|
||||
debug_ids[47] = 'Retrieving list of GPOs to replicate from AD DC'
|
||||
debug_ids[48] = 'Establishing connection with AD DC'
|
||||
debug_ids[49] = 'Started GPO replication from AD DC'
|
||||
debug_ids[50] = 'Finished GPO replication from AD DC'
|
||||
debug_ids[51] = 'Skipping HKCU branch deletion key'
|
||||
debug_ids[52] = 'Read domain name from configuration file'
|
||||
debug_ids[53] = 'Saving information about environment variables'
|
||||
debug_ids[54] = 'Run forked process with droped privileges'
|
||||
debug_ids[55] = 'Run user context applier with dropped privileges'
|
||||
debug_ids[56] = 'Kill dbus-daemon and dconf-service in user context'
|
||||
debug_ids[57] = 'Found connection by org.freedesktop.DBus.GetConnectionUnixProcessID'
|
||||
debug_ids[58] = 'Connection search return org.freedesktop.DBus.Error.NameHasNoOwner'
|
||||
debug_ids[59] = 'Running GPOA without GPT update directly for user'
|
||||
debug_ids[60] = 'Running GPOA by root for user'
|
||||
debug_ids[61] = 'The GPOA process was started for computer'
|
||||
debug_ids[62] = 'Path not resolved as UNC URI'
|
||||
debug_ids[63] = 'Delete HKLM branch key'
|
||||
debug_ids[64] = 'Delete HKCU branch key'
|
||||
debug_ids[65] = 'Delete HKLM branch key error'
|
||||
debug_ids[66] = 'Delete HKCU branch key error'
|
||||
debug_ids[67] = 'Running Control applier for machine'
|
||||
debug_ids[68] = 'Setting control'
|
||||
debug_ids[69] = 'Deny_All setting found'
|
||||
debug_ids[70] = 'Deny_All setting for user'
|
||||
debug_ids[71] = 'Deny_All setting not found'
|
||||
debug_ids[72] = 'Deny_All setting not found for user'
|
||||
debug_ids[73] = 'Running Polkit applier for machine'
|
||||
debug_ids[74] = 'Running Polkit applier for user in administrator context'
|
||||
debug_ids[75] = 'Polkit applier for machine will not be started'
|
||||
debug_ids[76] = 'Polkit applier for user in administrator context will not be started'
|
||||
debug_ids[77] = 'Generated file'
|
||||
debug_ids[78] = 'Running systemd applier for machine'
|
||||
debug_ids[79] = 'Running systemd applier for machine will not be started'
|
||||
debug_ids[80] = 'Running GSettings applier for machine'
|
||||
debug_ids[81] = 'GSettings applier for machine will not be started'
|
||||
debug_ids[82] = 'Removing GSettings policy file from previous run'
|
||||
debug_ids[83] = 'Mapping Windows policies to GSettings policies'
|
||||
debug_ids[84] = 'GSettings windows policies mapping not enabled'
|
||||
debug_ids[85] = 'Applying user setting'
|
||||
debug_ids[86] = 'Found GSettings windows mapping'
|
||||
debug_ids[87] = 'Running GSettings applier for user in user context'
|
||||
debug_ids[88] = 'GSettings applier for user in user context will not be started'
|
||||
debug_ids[89] = 'Applying machine setting'
|
||||
debug_ids[90] = 'Getting cached file for URI'
|
||||
debug_ids[91] = 'Wrote Firefox preferences to'
|
||||
debug_ids[92] = 'Found Firefox profile in'
|
||||
debug_ids[93] = 'Running Firefox applier for machine'
|
||||
debug_ids[94] = 'Firefox applier for machine will not be started'
|
||||
debug_ids[95] = 'Running Chromium applier for machine'
|
||||
debug_ids[96] = 'Chromium applier for machine will not be started'
|
||||
debug_ids[97] = 'Wrote Chromium preferences to'
|
||||
debug_ids[98] = 'Running Shortcut applier for machine'
|
||||
debug_ids[99] = 'Shortcut applier for machine will not be started'
|
||||
debug_ids[100] = 'No shortcuts to process for'
|
||||
debug_ids[101] = 'Running Shortcut applier for user in user context'
|
||||
debug_ids[102] = 'Shortcut applier for user in user context will not be started'
|
||||
debug_ids[103] = 'Running Shortcut applier for user in administrator context'
|
||||
debug_ids[104] = 'Shortcut applier for user in administrator context will not be started'
|
||||
debug_ids[105] = 'Try to expand path for shortcut'
|
||||
debug_ids[106] = 'Applying shortcut file to'
|
||||
debug_ids[107] = 'Running Folder applier for machine'
|
||||
debug_ids[108] = 'Folder applier for machine will not be started'
|
||||
debug_ids[109] = 'Folder creation skipped for machine'
|
||||
debug_ids[110] = 'Folder creation skipped for user'
|
||||
debug_ids[111] = 'Running Folder applier for user in user context'
|
||||
debug_ids[112] = 'Folder applier for user in user context will not be started'
|
||||
debug_ids[113] = 'Running CUPS applier for machine'
|
||||
debug_ids[114] = 'CUPS applier for machine will not be started'
|
||||
debug_ids[115] = 'Running CUPS applier for user in administrator context'
|
||||
debug_ids[116] = 'CUPS applier for user in administrator context will not be started'
|
||||
debug_ids[117] = 'Running Firewall applier for machine'
|
||||
debug_ids[118] = 'Firewall is enabled'
|
||||
debug_ids[119] = 'Firewall is disabled, settings will be reset'
|
||||
debug_ids[120] = 'Firewall applier will not be started'
|
||||
debug_ids[121] = 'Running NTP applier for machine'
|
||||
debug_ids[122] = 'NTP server is configured to'
|
||||
debug_ids[123] = 'Starting Chrony daemon'
|
||||
debug_ids[124] = 'Setting reference NTP server to'
|
||||
debug_ids[125] = 'Stopping Chrony daemon'
|
||||
debug_ids[126] = 'Configuring NTP server...'
|
||||
debug_ids[127] = 'NTP server is enabled'
|
||||
debug_ids[128] = 'NTP server is disabled'
|
||||
debug_ids[129] = 'NTP server is not configured'
|
||||
debug_ids[130] = 'NTP client is enabled'
|
||||
debug_ids[131] = 'NTP client is disabled'
|
||||
debug_ids[132] = 'NTP client is not configured'
|
||||
debug_ids[133] = 'NTP applier for machine will not be started'
|
||||
debug_ids[134] = 'Running Envvar applier for machine'
|
||||
debug_ids[135] = 'Envvar applier for machine will not be started'
|
||||
debug_ids[136] = 'Running Envvar applier for user in user context'
|
||||
debug_ids[137] = 'Envvar applier for user in user context will not be started'
|
||||
debug_ids[138] = 'Running Package applier for machine'
|
||||
debug_ids[139] = 'Package applier for machine will not be started'
|
||||
debug_ids[140] = 'Running Package applier for user in administrator context'
|
||||
debug_ids[141] = 'Package applier for user in administrator context will not be started'
|
||||
debug_ids[142] = 'Running pkcon_runner to install and remove packages'
|
||||
debug_ids[143] = 'Run apt-get update'
|
||||
debug_ids[144] = 'Unable to cache specified URI'
|
||||
debug_ids[145] = 'Unable to cache specified URI for machine'
|
||||
debug_ids[146] = 'Running CIFS applier for user in administrator context'
|
||||
debug_ids[147] = 'CIFS applier for user in administrator context will not be started'
|
||||
debug_ids[148] = 'Installing the package'
|
||||
debug_ids[149] = 'Removing a package'
|
||||
debug_ids[150] = 'Failed to found gsettings for machine'
|
||||
debug_ids[151] = 'Failed to found user gsettings'
|
||||
debug_ids[152] = 'Configure user Group Policy loopback processing mode'
|
||||
debug_ids[153] = 'Saving information about script'
|
||||
debug_ids[154] = 'No machine scripts directory to clean up'
|
||||
debug_ids[155] = 'No user scripts directory to clean up'
|
||||
debug_ids[156] = 'Prepare Scripts applier for machine'
|
||||
debug_ids[157] = 'Scripts applier for machine will not be started'
|
||||
debug_ids[158] = 'Prepare Scripts applier for user in user context'
|
||||
debug_ids[159] = 'Scripts applier for user in user context will not be started'
|
||||
debug_ids[160] = 'Clean machine scripts directory'
|
||||
debug_ids[161] = 'Clean user scripts directory'
|
||||
debug_ids[162] = 'Saving information about file'
|
||||
debug_ids[163] = 'Failed to return file path'
|
||||
debug_ids[164] = 'Failed to create file'
|
||||
debug_ids[165] = 'Failed to delete file'
|
||||
debug_ids[166] = 'Failed to update file'
|
||||
debug_ids[167] = 'Running File copy applier for machine'
|
||||
debug_ids[168] = 'Running File copy applier for machine will not be started'
|
||||
debug_ids[169] = 'Running File copy applier for user in administrator context'
|
||||
debug_ids[170] = 'Running File copy applier for user in administrator context will not be started'
|
||||
debug_ids[171] = 'Running ini applier for machine'
|
||||
debug_ids[172] = 'Running ini applier for machine will not be started'
|
||||
debug_ids[173] = 'Running ini applier for user in user context'
|
||||
debug_ids[174] = 'Running ini applier for user in user context will not be started'
|
||||
debug_ids[175] = 'Ini-file path not recognized'
|
||||
debug_ids[176] = 'Ini-file is not readable'
|
||||
debug_ids[177] = 'Saving information about ini-file'
|
||||
debug_ids[178] = 'Dictionary key generation failed'
|
||||
debug_ids[179] = 'Running CIFS applier for machine'
|
||||
debug_ids[180] = 'CIFS applier for machine will not be started'
|
||||
debug_ids[181] = 'Running networkshare applier for machine will not be started'
|
||||
debug_ids[182] = 'Apply network share data action failed'
|
||||
debug_ids[183] = 'Running yandex_browser_applier for machine'
|
||||
debug_ids[184] = 'Yandex_browser_applier for machine will not be started'
|
||||
debug_ids[185] = 'Wrote YandexBrowser preferences to'
|
||||
debug_ids[186] = 'Saving information about network shares'
|
||||
debug_ids[187] = 'Running networkshare applier for machine'
|
||||
debug_ids[188] = 'Running networkshare applier for user'
|
||||
debug_ids[189] = 'Running networkshare applier for user will not be started'
|
||||
debug_ids[190] = 'Applying settings for network share'
|
||||
debug_ids[191] = 'File copy'
|
||||
debug_ids[192] = 'File update'
|
||||
debug_ids[193] = 'Deleting a file'
|
||||
debug_ids[194] = 'Failed to create a symlink to the network drives mountpoint'
|
||||
debug_ids[195] = 'Failed to create a symlink to the system network drives mountpoint'
|
||||
debug_ids[196] = 'Failed to create a symlink to the hidden network drives mountpoint'
|
||||
debug_ids[197] = 'Failed to create a symlink to the hidden system network drives mountpoint'
|
||||
debug_ids[198] = 'Running KDE applier for machine'
|
||||
debug_ids[199] = 'KDE applier for machine will not be started'
|
||||
debug_ids[200] = 'Running KDE applier for user in user context'
|
||||
debug_ids[201] = 'KDE applier for user in user context will not be started'
|
||||
debug_ids[202] = 'Changing the configuration file'
|
||||
debug_ids[203] = 'Widget command completed successfully'
|
||||
debug_ids[204] = 'Getting a list of keys'
|
||||
debug_ids[205] = 'Getting the key value'
|
||||
debug_ids[206] = 'Successfully updated dconf database'
|
||||
debug_ids[207] = 'Creating a dictionary with keys and values from the dconf database'
|
||||
debug_ids[208] = 'No entry found for the specified path'
|
||||
debug_ids[209] = 'Creating an ini file with policies for dconf'
|
||||
debug_ids[210] = 'GPO version was not found'
|
||||
|
||||
return debug_ids.get(code, 'Unknown debug code')
|
||||
|
||||
def warning_code(code):
|
||||
warning_ids = dict()
|
||||
warning_ids[1] = (
|
||||
'Unable to perform gpupdate for non-existent user, '
|
||||
'will update machine settings'
|
||||
)
|
||||
warning_ids[2] = (
|
||||
'Current permissions does not allow to perform gpupdate for '
|
||||
'designated user. Will update current user settings'
|
||||
)
|
||||
warning_ids[3] = 'oddjobd is inaccessible'
|
||||
warning_ids[4] = 'No SYSVOL entry assigned to GPO'
|
||||
warning_ids[5] = 'ADP package is not installed - plugin will not be initialized'
|
||||
warning_ids[6] = 'Unable to resolve GSettings parameter'
|
||||
warning_ids[7] = 'No home directory exists for user'
|
||||
warning_ids[8] = 'User\'s shortcut not placed to home directory'
|
||||
warning_ids[9] = 'CUPS is not installed: no printer settings will be deployed'
|
||||
warning_ids[10] = 'Unsupported NTP server type'
|
||||
warning_ids[11] = 'Unable to refresh GPO list'
|
||||
warning_ids[12] = 'Failed to read the list of files'
|
||||
warning_ids[13] = 'Failed to caching the file'
|
||||
warning_ids[14] = 'Could not create a valid list of keys'
|
||||
warning_ids[15] = 'Failed to copy file'
|
||||
warning_ids[16] = 'Failed to create KDE settings list'
|
||||
warning_ids[17] = 'Could not find application tools'
|
||||
warning_ids[18] = 'Failed to open KDE settings'
|
||||
warning_ids[19] = 'Failed to change KDE configuration file'
|
||||
warning_ids[20] = 'Error connecting to server'
|
||||
warning_ids[21] = 'Wallpaper configuration file not found'
|
||||
warning_ids[22] = 'The user setting was not installed, conflict with computer setting'
|
||||
warning_ids[23] = 'Action for ini file failed'
|
||||
warning_ids[24] = 'Couldn\'t get the uid'
|
||||
|
||||
|
||||
return warning_ids.get(code, 'Unknown warning code')
|
||||
|
||||
def fatal_code(code):
|
||||
fatal_ids = dict()
|
||||
fatal_ids[1] = 'Unable to refresh GPO list'
|
||||
fatal_ids[2] = 'Error getting GPTs for machine'
|
||||
fatal_ids[3] = 'Error getting GPTs for user'
|
||||
|
||||
return fatal_ids.get(code, 'Unknown fatal code')
|
||||
|
||||
def get_message(code):
|
||||
retstr = 'Unknown message type, no message assigned'
|
||||
|
||||
if code.startswith('E'):
|
||||
retstr = error_code(int(code[1:]))
|
||||
if code.startswith('I'):
|
||||
retstr = info_code(int(code[1:]))
|
||||
if code.startswith('D'):
|
||||
retstr = debug_code(int(code[1:]))
|
||||
if code.startswith('W'):
|
||||
retstr = warning_code(int(code[1:]))
|
||||
if code.startswith('F'):
|
||||
retstr = fatal_code(int(code[1:]))
|
||||
|
||||
return retstr
|
||||
|
||||
def message_with_code(code):
|
||||
retstr = '[' + code[0:1] + code[1:].rjust(5, '0') + ']| ' + gettext.gettext(get_message(code))
|
||||
|
||||
return retstr
|
||||
|
@ -1,154 +0,0 @@
|
||||
#!/usr/bin/python3
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import rpm
|
||||
import subprocess
|
||||
from gpoa.storage import registry_factory
|
||||
from util.gpoa_ini_parsing import GpoaConfigObj
|
||||
from util.util import get_uid_by_username, string_to_literal_eval
|
||||
import logging
|
||||
from util.logging import log
|
||||
import argparse
|
||||
import gettext
|
||||
import locale
|
||||
|
||||
|
||||
def is_rpm_installed(rpm_name):
|
||||
'''
|
||||
Check if the package named 'rpm_name' is installed
|
||||
'''
|
||||
ts = rpm.TransactionSet()
|
||||
pm = ts.dbMatch('name', rpm_name)
|
||||
if pm.count() > 0:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
class Pkcon_applier:
|
||||
|
||||
def __init__(self, user = None):
|
||||
self.__install_key_name = 'Install'
|
||||
self.__remove_key_name = 'Remove'
|
||||
self.__hklm_branch = '/Software/BaseALT/Policies/Packages'
|
||||
self.__install_command = ['/usr/bin/pkcon', '-y', 'install']
|
||||
self.__remove_command = ['/usr/bin/pkcon', '-y', 'remove']
|
||||
self.__reinstall_command = ['/usr/bin/pkcon', '-y', 'reinstall']
|
||||
self.install_packages = set()
|
||||
self.remove_packages = set()
|
||||
if user:
|
||||
pid = get_uid_by_username(user)
|
||||
#TODO: It is necessary to redo reading from the GVariant database file policy{pid}
|
||||
try:
|
||||
packages_dict = GpoaConfigObj(f'/etc/dconf/db/policy{pid}.d/policy{pid}.ini')
|
||||
except:
|
||||
packages_dict = {}
|
||||
|
||||
self.install_packages_setting = string_to_literal_eval(
|
||||
packages_dict.get(self.__hklm_branch[1:], {}).get(self.__install_key_name, {}))
|
||||
self.remove_packages_setting = string_to_literal_eval(
|
||||
packages_dict.get(self.__hklm_branch[1:], {}).get(self.__remove_key_name, {}))
|
||||
else:
|
||||
storage = registry_factory(username=user)
|
||||
install_branch = '{}/{}'.format(self.__hklm_branch, self.__install_key_name)
|
||||
remove_branch = '{}/{}'.format(self.__hklm_branch, self.__remove_key_name)
|
||||
self.install_packages_setting = storage.get_key_value(install_branch)
|
||||
self.remove_packages_setting = storage.get_key_value(remove_branch)
|
||||
for package in self.install_packages_setting:
|
||||
if not is_rpm_installed(package):
|
||||
self.install_packages.add(package)
|
||||
for package in self.remove_packages_setting:
|
||||
if package in self.install_packages:
|
||||
self.install_packages.remove(package)
|
||||
if is_rpm_installed(package):
|
||||
self.remove_packages.add(package)
|
||||
|
||||
def apply(self):
|
||||
log('D142')
|
||||
self.update()
|
||||
for package in self.remove_packages:
|
||||
try:
|
||||
logdata = dict()
|
||||
logdata['name'] = package
|
||||
log('D149', logdata)
|
||||
self.remove_pkg(package)
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['exc'] = exc
|
||||
log('E58', logdata)
|
||||
|
||||
for package in self.install_packages:
|
||||
try:
|
||||
logdata = dict()
|
||||
logdata['name'] = package
|
||||
log('D148', logdata)
|
||||
self.install_pkg(package)
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['exc'] = exc
|
||||
log('E57', logdata)
|
||||
|
||||
|
||||
def install_pkg(self, package_name):
|
||||
fullcmd = list(self.__install_command)
|
||||
fullcmd.append(package_name)
|
||||
return subprocess.check_output(fullcmd)
|
||||
|
||||
def reinstall_pkg(self, package_name):
|
||||
pass
|
||||
|
||||
def remove_pkg(self, package_name):
|
||||
fullcmd = self.__remove_command
|
||||
fullcmd.append(package_name)
|
||||
return subprocess.check_output(fullcmd)
|
||||
|
||||
def update(self):
|
||||
'''
|
||||
Update APT-RPM database.
|
||||
'''
|
||||
try:
|
||||
res = subprocess.check_output(['/usr/bin/apt-get', 'update'], encoding='utf-8')
|
||||
msg = str(res).split('\n')
|
||||
logdata = dict()
|
||||
for mslog in msg:
|
||||
ms = str(mslog).split(' ')
|
||||
if ms:
|
||||
logdata = {ms[0]: ms[1:-1]}
|
||||
log('D143', logdata)
|
||||
except Exception as exc:
|
||||
logdata = dict()
|
||||
logdata['msg'] = exc
|
||||
log('E56',logdata)
|
||||
|
||||
if __name__ == '__main__':
|
||||
locale.bindtextdomain('gpoa', '/usr/lib/python3/site-packages/gpoa/locale')
|
||||
gettext.bindtextdomain('gpoa', '/usr/lib/python3/site-packages/gpoa/locale')
|
||||
gettext.textdomain('gpoa')
|
||||
logger = logging.getLogger()
|
||||
parser = argparse.ArgumentParser(description='Package applier')
|
||||
parser.add_argument('--user', type = str, help = 'user', nargs = '?', default = None)
|
||||
parser.add_argument('--loglevel', type = int, help = 'loglevel', nargs = '?', default = 30)
|
||||
|
||||
args = parser.parse_args()
|
||||
logger.setLevel(args.loglevel)
|
||||
if args.user:
|
||||
applier = Pkcon_applier(args.user)
|
||||
else:
|
||||
applier = Pkcon_applier()
|
||||
applier.apply()
|
||||
|
@ -22,20 +22,19 @@ import subprocess
|
||||
from util.rpm import is_rpm_installed
|
||||
from .exceptions import PluginInitError
|
||||
from util.logging import slogm
|
||||
from messages import message_with_code
|
||||
|
||||
class adp:
|
||||
def __init__(self):
|
||||
if not is_rpm_installed('adp'):
|
||||
raise PluginInitError(message_with_code('W5'))
|
||||
logging.info(slogm(message_with_code('D4')))
|
||||
raise PluginInitError('adp is not installed - plugin cannot be initialized')
|
||||
logging.info(slogm('ADP plugin initialized'))
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
logging.info(slogm(message_with_code('D5')))
|
||||
logging.info('Running ADP plugin')
|
||||
subprocess.call(['/usr/bin/adp', 'fetch'])
|
||||
subprocess.call(['/usr/bin/adp', 'apply'])
|
||||
except Exception as exc:
|
||||
logging.error(slogm(message_with_code('E9')))
|
||||
logging.error(slogm('Error running ADP'))
|
||||
raise exc
|
||||
|
||||
|
@ -23,16 +23,15 @@ from .roles import roles
|
||||
from .exceptions import PluginInitError
|
||||
from .plugin import plugin
|
||||
from util.logging import slogm
|
||||
from messages import message_with_code
|
||||
|
||||
class plugin_manager:
|
||||
def __init__(self):
|
||||
self.plugins = dict()
|
||||
logging.debug(slogm(message_with_code('D3')))
|
||||
logging.info(slogm('Starting plugin manager'))
|
||||
try:
|
||||
self.plugins['adp'] = adp()
|
||||
except PluginInitError as exc:
|
||||
logging.warning(slogm(str(exc)))
|
||||
logging.error(slogm(exc))
|
||||
|
||||
def run(self):
|
||||
self.plugins.get('adp', plugin('adp')).run()
|
||||
|
@ -1,156 +0,0 @@
|
||||
#!/usr/bin/python3
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2022 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import subprocess
|
||||
import argparse
|
||||
import os
|
||||
from pathlib import Path
|
||||
import psutil
|
||||
import time
|
||||
|
||||
class Scripts_runner:
|
||||
'''
|
||||
A class for an object that iterates over directories with scripts
|
||||
in the desired sequence and launches them
|
||||
'''
|
||||
def __init__(self, work_mode = None, user_name = None, action = None):
|
||||
self.dir_scripts_machine = '/var/cache/gpupdate_scripts_cache/machine/'
|
||||
self.dir_scripts_users = '/var/cache/gpupdate_scripts_cache/users/'
|
||||
self.user_name = user_name
|
||||
self.list_with_all_commands = list()
|
||||
stack_dir = None
|
||||
if work_mode and work_mode.upper() == 'MACHINE':
|
||||
stack_dir = self.machine_runner_fill()
|
||||
elif work_mode and work_mode.upper() == 'USER':
|
||||
stack_dir = self.user_runner_fill()
|
||||
else:
|
||||
print('Invalid arguments entered')
|
||||
return
|
||||
if action:
|
||||
self.action = action.upper()
|
||||
else:
|
||||
print('Action needed')
|
||||
return
|
||||
|
||||
self.find_action(stack_dir)
|
||||
for it_cmd in self.list_with_all_commands:
|
||||
print(self.run_cmd_subprocess(it_cmd))
|
||||
|
||||
def user_runner_fill(self):
|
||||
return self.get_stack_dir(self.dir_scripts_users + self.user_name)
|
||||
|
||||
def machine_runner_fill(self):
|
||||
return self.get_stack_dir(self.dir_scripts_machine)
|
||||
|
||||
def get_stack_dir(self, path_dir):
|
||||
stack_dir = list()
|
||||
try:
|
||||
dir_script = Path(path_dir)
|
||||
for it_dir in dir_script.iterdir():
|
||||
stack_dir.append(str(it_dir))
|
||||
return stack_dir
|
||||
except Exception as exc:
|
||||
print(exc)
|
||||
return None
|
||||
|
||||
def find_action(self, stack_dir):
|
||||
if not stack_dir:
|
||||
return
|
||||
list_tmp = list()
|
||||
while stack_dir:
|
||||
path_turn = stack_dir.pop()
|
||||
basename = os.path.basename(path_turn)
|
||||
if basename == self.action:
|
||||
list_tmp = self.get_stack_dir(path_turn)
|
||||
if list_tmp:
|
||||
self.fill_list_cmd(list_tmp)
|
||||
|
||||
|
||||
def fill_list_cmd(self, list_tmp):
|
||||
list_tmp = sorted(list_tmp)
|
||||
for file_in_task_dir in list_tmp:
|
||||
suffix = os.path.basename(file_in_task_dir)[-4:]
|
||||
if suffix == '.arg':
|
||||
try:
|
||||
arg = self.read_args(file_in_task_dir)
|
||||
for it_arg in arg.split():
|
||||
self.list_with_all_commands[-1].append(it_arg)
|
||||
except Exception as exc:
|
||||
print('Argument read for {}: {}'.format(self.list_with_all_commands.pop(), exc))
|
||||
else:
|
||||
cmd = list()
|
||||
cmd.append(file_in_task_dir)
|
||||
self.list_with_all_commands.append(cmd)
|
||||
|
||||
|
||||
def read_args(self, path):
|
||||
with open(path + '/arg') as f:
|
||||
args = f.readlines()
|
||||
return args[0]
|
||||
|
||||
def run_cmd_subprocess(self, cmd):
|
||||
try:
|
||||
subprocess.run(cmd)
|
||||
return 'Script run: {}'.format(cmd)
|
||||
except Exception as exc:
|
||||
return exc
|
||||
|
||||
def find_process_by_name_and_script(name, script_path):
|
||||
|
||||
for proc in psutil.process_iter(['pid', 'name', 'cmdline']):
|
||||
try:
|
||||
# Check if the process name matches and the script path is in the command line arguments
|
||||
if proc.info['name'] == name and script_path in proc.info['cmdline']:
|
||||
return proc
|
||||
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
|
||||
continue
|
||||
return None
|
||||
|
||||
def wait_for_process(name, script_path, check_interval=1):
|
||||
|
||||
process = find_process_by_name_and_script(name, script_path)
|
||||
if not process:
|
||||
print(f"Process with name {name} and script path {script_path} not found.")
|
||||
return
|
||||
|
||||
try:
|
||||
# Loop to wait for the process to finish
|
||||
while process.is_running():
|
||||
print(f"Waiting for process {name} with PID {process.pid} to finish...")
|
||||
time.sleep(check_interval)
|
||||
print(f"Process {name} with PID {process.pid} has finished.")
|
||||
return
|
||||
except (psutil.NoSuchProcess, psutil.AccessDenied):
|
||||
print(f"Process {name} with PID {process.pid} is no longer accessible.")
|
||||
return
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='Scripts runner')
|
||||
parser.add_argument('--mode', type = str, help = 'MACHINE or USER', nargs = '?', default = None)
|
||||
parser.add_argument('--user', type = str, help = 'User name ', nargs = '?', default = None)
|
||||
parser.add_argument('--action', type = str, help = 'MACHINE : [STARTUP or SHUTDOWN], USER : [LOGON or LOGOFF]', nargs = '?', default = None)
|
||||
|
||||
process_name = "python3"
|
||||
script_path = "/usr/sbin/gpoa"
|
||||
wait_for_process(process_name, script_path)
|
||||
args = parser.parse_args()
|
||||
try:
|
||||
Scripts_runner(args.mode, args.user, args.action)
|
||||
except Exception as exc:
|
||||
print(exc)
|
@ -1,7 +1,7 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2023 BaseALT Ltd.
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
@ -16,19 +16,12 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from .sqlite_registry import sqlite_registry
|
||||
from .sqlite_cache import sqlite_cache
|
||||
|
||||
from storage.dconf_registry import Dconf_registry
|
||||
def cache_factory(cache_name):
|
||||
return sqlite_cache(cache_name)
|
||||
|
||||
def registry_factory(registry_name='', envprofile=None , username=None):
|
||||
if username:
|
||||
Dconf_registry._username = username
|
||||
else:
|
||||
Dconf_registry._envprofile = 'system'
|
||||
if envprofile:
|
||||
Dconf_registry._envprofile = envprofile
|
||||
|
||||
if registry_name == 'dconf':
|
||||
return Dconf_registry()
|
||||
else:
|
||||
return Dconf_registry
|
||||
def registry_factory(registry_name='registry', registry_dir=None):
|
||||
return sqlite_registry(registry_name, registry_dir)
|
||||
|
||||
|
@ -1,591 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2023 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from util.util import string_to_literal_eval, touch_file, get_uid_by_username
|
||||
from util.logging import log
|
||||
import re
|
||||
|
||||
|
||||
class PregDconf():
|
||||
def __init__(self, keyname, valuename, type_preg, data):
|
||||
self.keyname = keyname
|
||||
self.valuename = valuename
|
||||
self.hive_key = '{}/{}'.format(self.keyname, self.valuename)
|
||||
self.type = type_preg
|
||||
self.data = data
|
||||
|
||||
|
||||
class gplist(list):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def first(self):
|
||||
if self:
|
||||
return self[0]
|
||||
else:
|
||||
return None
|
||||
|
||||
def count(self):
|
||||
return len(self)
|
||||
|
||||
class Dconf_registry():
|
||||
'''
|
||||
A class variable that represents a global registry dictionary shared among instances of the class
|
||||
'''
|
||||
_ReadQueue = 'Software/BaseALT/Policies/ReadQueue'
|
||||
global_registry_dict = dict({_ReadQueue:{}})
|
||||
__template_file = '/usr/share/dconf/user_mandatory.template'
|
||||
_policies_path = 'Software/'
|
||||
_policies_win_path = 'SOFTWARE/'
|
||||
_gpt_read_flag = False
|
||||
__dconf_dict_flag = False
|
||||
__dconf_dict = dict()
|
||||
_username = None
|
||||
_envprofile = None
|
||||
|
||||
list_keys = list()
|
||||
_info = dict()
|
||||
|
||||
shortcuts = list()
|
||||
folders = list()
|
||||
files = list()
|
||||
drives = list()
|
||||
scheduledtasks = list()
|
||||
environmentvariables = list()
|
||||
inifiles = list()
|
||||
services = list()
|
||||
printers = list()
|
||||
scripts = list()
|
||||
networkshares = list()
|
||||
|
||||
|
||||
|
||||
@classmethod
|
||||
def set_info(cls, key , data):
|
||||
cls._info[key] = data
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_info(cls, key):
|
||||
return cls._info.setdefault(key, None)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def get_matching_keys(path):
|
||||
if path[0] != '/':
|
||||
path = '/' + path
|
||||
logdata = dict()
|
||||
envprofile = get_dconf_envprofile()
|
||||
try:
|
||||
process = subprocess.Popen(['dconf', 'list', path],
|
||||
env=envprofile, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||
logdata['path'] = path
|
||||
log('D204', logdata)
|
||||
output, error = process.communicate()
|
||||
if not output and not error:
|
||||
return
|
||||
if not error:
|
||||
keys = output.strip().split('\n')
|
||||
for key in keys:
|
||||
Dconf_registry.get_matching_keys(f'{path}{key}')
|
||||
else:
|
||||
Dconf_registry.list_keys.append(path)
|
||||
return Dconf_registry.list_keys
|
||||
except Exception as exc:
|
||||
logdata['exc'] = exc
|
||||
log('E69', logdata)
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_key_values(keys):
|
||||
key_values = {}
|
||||
for key in keys:
|
||||
key_values[key] = Dconf_registry.get_key_value(key)
|
||||
return key_values
|
||||
|
||||
@staticmethod
|
||||
def get_key_value(key):
|
||||
logdata = dict()
|
||||
envprofile = get_dconf_envprofile()
|
||||
try:
|
||||
process = subprocess.Popen(['dconf', 'read', key],
|
||||
env=envprofile, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||
logdata['key'] = key
|
||||
output, error = process.communicate()
|
||||
|
||||
if not error:
|
||||
return string_to_literal_eval(string_to_literal_eval(output))
|
||||
else:
|
||||
return None
|
||||
except Exception as exc:
|
||||
logdata['exc'] = exc
|
||||
log('E70', logdata)
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def dconf_update():
|
||||
logdata = dict()
|
||||
try:
|
||||
process = subprocess.Popen(['dconf', 'update'],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||
output, error = process.communicate()
|
||||
|
||||
if error:
|
||||
logdata['error'] = error
|
||||
log('E71', logdata)
|
||||
else:
|
||||
logdata['outpupt'] = output
|
||||
log('D206', logdata)
|
||||
except Exception as exc:
|
||||
logdata['exc'] = exc
|
||||
log('E72', logdata)
|
||||
|
||||
@classmethod
|
||||
def check_profile_template(cls):
|
||||
if Path(cls.__template_file).exists():
|
||||
return True
|
||||
else:
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def apply_template(cls, uid):
|
||||
logdata = dict()
|
||||
if uid and cls.check_profile_template():
|
||||
with open(cls.__template_file, "r") as f:
|
||||
template = f.read()
|
||||
# Replace the "{uid}" placeholder with the actual UID value
|
||||
content = template.replace("{{uid}}", str(uid))
|
||||
|
||||
elif uid:
|
||||
content = f"user-db:user\n" \
|
||||
f"system-db:policy\n" \
|
||||
f"system-db:policy{uid}\n" \
|
||||
f"system-db:local\n" \
|
||||
f"system-db:default\n" \
|
||||
f"system-db:local\n" \
|
||||
f"system-db:policy{uid}\n" \
|
||||
f"system-db:policy\n"
|
||||
else:
|
||||
logdata['uid'] = uid
|
||||
log('W24', logdata)
|
||||
return
|
||||
|
||||
user_mandatory = f'/run/dconf/user/{uid}'
|
||||
touch_file(user_mandatory)
|
||||
|
||||
with open(user_mandatory, "w") as f:
|
||||
f.write(content)
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_policies_from_dconf(cls):
|
||||
return cls.get_dictionary_from_dconf(cls._policies_path, cls._policies_win_path)
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_dictionary_from_dconf(self, *startswith_list):
|
||||
output_dict = {}
|
||||
for startswith in startswith_list:
|
||||
dconf_dict = self.get_key_values(self.get_matching_keys(startswith))
|
||||
for key, value in dconf_dict.items():
|
||||
keys_tmp = key.split('/')
|
||||
update_dict(output_dict.setdefault('/'.join(keys_tmp[:-1])[1:], {}), {keys_tmp[-1]: str(value)})
|
||||
|
||||
log('D207')
|
||||
return output_dict
|
||||
|
||||
|
||||
@classmethod
|
||||
def filter_entries(cls, startswith):
|
||||
if startswith[-1] == '%':
|
||||
startswith = startswith[:-1]
|
||||
if startswith[-1] == '/' or startswith[-1] == '\\':
|
||||
startswith = startswith[:-1]
|
||||
return filter_dict_keys(startswith, flatten_dictionary(cls.global_registry_dict))
|
||||
return filter_dict_keys(startswith, flatten_dictionary(cls.global_registry_dict))
|
||||
|
||||
|
||||
@classmethod
|
||||
def filter_hklm_entries(cls, startswith):
|
||||
pregs = cls.filter_entries(startswith)
|
||||
list_entiers = list()
|
||||
for keyname, value in pregs.items():
|
||||
if isinstance(value, dict):
|
||||
for valuename, data in value.items():
|
||||
list_entiers.append(PregDconf(
|
||||
keyname, convert_string_dconf(valuename), find_preg_type(data), data))
|
||||
elif isinstance(value, list):
|
||||
for data in value:
|
||||
list_entiers.append(PregDconf(
|
||||
keyname, data, find_preg_type(data), data))
|
||||
else:
|
||||
list_entiers.append(PregDconf(
|
||||
'/'.join(keyname.split('/')[:-1]), convert_string_dconf(keyname.split('/')[-1]), find_preg_type(value), value))
|
||||
|
||||
|
||||
return gplist(list_entiers)
|
||||
|
||||
|
||||
@classmethod
|
||||
def filter_hkcu_entries(cls, sid, startswith):
|
||||
return cls.filter_hklm_entries(startswith)
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_storage(cls,dictionary = None):
|
||||
if dictionary:
|
||||
result = dictionary
|
||||
elif Dconf_registry._gpt_read_flag:
|
||||
result = Dconf_registry.global_registry_dict
|
||||
else:
|
||||
if Dconf_registry.__dconf_dict_flag:
|
||||
result = Dconf_registry.__dconf_dict
|
||||
else:
|
||||
Dconf_registry.__dconf_dict = Dconf_registry.get_policies_from_dconf()
|
||||
result = Dconf_registry.__dconf_dict
|
||||
Dconf_registry.__dconf_dict_flag = True
|
||||
return result
|
||||
|
||||
|
||||
@classmethod
|
||||
def filling_storage_from_dconf(cls):
|
||||
Dconf_registry.global_registry_dict = Dconf_registry.get_storage()
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_entry(cls, path, dictionary = None):
|
||||
logdata = dict()
|
||||
result = Dconf_registry.get_storage(dictionary)
|
||||
|
||||
keys = path.split("\\") if "\\" in path else path.split("/")
|
||||
key = '/'.join(keys[:-1]) if keys[0] else '/'.join(keys[:-1])[1:]
|
||||
|
||||
if isinstance(result, dict) and key in result.keys():
|
||||
data = result.get(key).get(keys[-1])
|
||||
return PregDconf(
|
||||
key, convert_string_dconf(keys[-1]), find_preg_type(data), data)
|
||||
else:
|
||||
logdata['path'] = path
|
||||
log('D208', logdata)
|
||||
return None
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_hkcu_entry(cls, sid, hive_key, dictionary = None):
|
||||
return cls.get_hklm_entry(hive_key, dictionary)
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_hklm_entry(cls, hive_key, dictionary = None):
|
||||
return cls.get_entry(hive_key, dictionary)
|
||||
|
||||
|
||||
|
||||
@classmethod
|
||||
def add_shortcut(cls, sid, sc_obj, policy_name):
|
||||
cls.shortcuts.append(sc_obj)
|
||||
|
||||
|
||||
@classmethod
|
||||
def add_printer(cls, sid, pobj, policy_name):
|
||||
cls.printers.append(pobj)
|
||||
|
||||
|
||||
@classmethod
|
||||
def add_drive(cls, sid, dobj, policy_name):
|
||||
cls.drives.append(dobj)
|
||||
|
||||
|
||||
@classmethod
|
||||
def add_folder(cls, sid, fobj, policy_name):
|
||||
cls.folders.append(fobj)
|
||||
|
||||
|
||||
@classmethod
|
||||
def add_envvar(self, sid, evobj, policy_name):
|
||||
self.environmentvariables.append(evobj)
|
||||
|
||||
|
||||
@classmethod
|
||||
def add_script(cls, sid, scrobj, policy_name):
|
||||
cls.scripts.append(scrobj)
|
||||
|
||||
|
||||
@classmethod
|
||||
def add_file(cls, sid, fileobj, policy_name):
|
||||
cls.files.append(fileobj)
|
||||
|
||||
|
||||
@classmethod
|
||||
def add_ini(cls, sid, iniobj, policy_name):
|
||||
cls.inifiles.append(iniobj)
|
||||
|
||||
|
||||
@classmethod
|
||||
def add_networkshare(cls, sid, networkshareobj, policy_name):
|
||||
cls.networkshares.append(networkshareobj)
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_shortcuts(cls, sid):
|
||||
return cls.shortcuts
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_printers(cls, sid):
|
||||
return cls.printers
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_drives(cls, sid):
|
||||
return cls.drives
|
||||
|
||||
@classmethod
|
||||
def get_folders(cls, sid):
|
||||
return cls.folders
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_envvars(cls, sid):
|
||||
return cls.environmentvariables
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_scripts(cls, sid, action):
|
||||
action_scripts = list()
|
||||
for part in cls.scripts:
|
||||
if action == 'LOGON' and part.action == 'LOGON':
|
||||
action_scripts.append(part)
|
||||
elif action == 'LOGOFF' and part.action == 'LOGOFF':
|
||||
action_scripts.append(part)
|
||||
elif action == 'STARTUP' and part.action == 'STARTUP':
|
||||
action_scripts.append(part)
|
||||
elif action == 'SHUTDOWN' and part.action == 'SHUTDOWN':
|
||||
action_scripts.append(part)
|
||||
return action_scripts
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_files(cls, sid):
|
||||
return cls.files
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_networkshare(cls, sid):
|
||||
return cls.networkshares
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_ini(cls, sid):
|
||||
return cls.inifiles
|
||||
|
||||
|
||||
@classmethod
|
||||
def wipe_user(cls, sid):
|
||||
cls.wipe_hklm()
|
||||
|
||||
|
||||
@classmethod
|
||||
def wipe_hklm(cls):
|
||||
cls.global_registry_dict = dict({cls._ReadQueue:{}})
|
||||
|
||||
|
||||
def filter_dict_keys(starting_string, input_dict):
|
||||
result = dict()
|
||||
for key in input_dict:
|
||||
key_list = remove_empty_values(re.split(r'\\|/', key))
|
||||
start_list = remove_empty_values(re.split(r'\\|/', starting_string))
|
||||
if key_list[:len(start_list)] == start_list:
|
||||
result[key] = input_dict.get(key)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def find_preg_type(argument):
|
||||
if isinstance(argument, int):
|
||||
return 4
|
||||
else:
|
||||
return 1
|
||||
|
||||
|
||||
def update_dict(dict1, dict2):
|
||||
'''
|
||||
Updates dict1 with the key-value pairs from dict2
|
||||
'''
|
||||
for key, value in dict2.items():
|
||||
if key in dict1:
|
||||
# If both values are dictionaries, recursively call the update_dict function
|
||||
if isinstance(dict1[key], dict) and isinstance(value, dict):
|
||||
update_dict(dict1[key], value)
|
||||
# If the value in dict1 is a list, extend it with unique values from value
|
||||
elif isinstance(dict1[key], list):
|
||||
dict1[key].extend(set(value) - set(dict1[key]))
|
||||
else:
|
||||
# If the value in dict1 is not a dictionary or the value in dict2 is not a dictionary,
|
||||
# replace the value in dict1 with the value from dict2
|
||||
dict1[key] = value
|
||||
else:
|
||||
# If the key does not exist in dict1, add the key-value pair from dict2 to dict1
|
||||
dict1[key] = value
|
||||
|
||||
|
||||
def add_to_dict(string, policy_name, username, version):
|
||||
if username is None:
|
||||
correct_path = '/'.join(string.split('/')[:-2])
|
||||
machine= '{}/Machine'.format(Dconf_registry._ReadQueue)
|
||||
dictionary = Dconf_registry.global_registry_dict.setdefault(machine, dict())
|
||||
else:
|
||||
correct_path = '/'.join(string.split('/')[:-2])
|
||||
user = '{}/User'.format(Dconf_registry._ReadQueue)
|
||||
dictionary = Dconf_registry.global_registry_dict.setdefault(user, dict())
|
||||
|
||||
dictionary[len(dictionary)] = (policy_name, correct_path, version)
|
||||
|
||||
|
||||
def load_preg_dconf(pregfile, pathfile, policy_name, username, version=None):
|
||||
'''
|
||||
Loads the configuration from preg registry into a dictionary
|
||||
'''
|
||||
dd = dict()
|
||||
for i in pregfile.entries:
|
||||
# Skip this entry if the valuename starts with '**del'
|
||||
if i.valuename.lower().startswith('**del'):
|
||||
continue
|
||||
valuename = convert_string_dconf(i.valuename)
|
||||
data = check_data(i.data, i.type)
|
||||
if i.valuename != i.data and i.valuename:
|
||||
if i.keyname.replace('\\', '/') in dd:
|
||||
# If the key exists in dd, update its value with the new key-value pair
|
||||
dd[i.keyname.replace('\\', '/')].update({valuename.replace('\\', '/'):data})
|
||||
else:
|
||||
# If the key does not exist in dd, create a new key-value pair
|
||||
dd[i.keyname.replace('\\', '/')] = {valuename.replace('\\', '/'):data}
|
||||
|
||||
elif not i.valuename:
|
||||
keyname_tmp = i.keyname.replace('\\', '/').split('/')
|
||||
keyname = '/'.join(keyname_tmp[:-1])
|
||||
if keyname in dd:
|
||||
# If the key exists in dd, update its value with the new key-value pair
|
||||
dd[keyname].update({keyname_tmp[-1]:data})
|
||||
else:
|
||||
# If the key does not exist in dd, create a new key-value pair
|
||||
dd[keyname] = {keyname_tmp[-1]:data}
|
||||
|
||||
else:
|
||||
# If the value name is the same as the data,
|
||||
# split the keyname and add the data to the appropriate location in dd.
|
||||
all_list_key = i.keyname.split('\\')
|
||||
dd_target = dd.setdefault('/'.join(all_list_key[:-1]),{})
|
||||
dd_target.setdefault(all_list_key[-1], []).append(data)
|
||||
|
||||
# Update the global registry dictionary with the contents of dd
|
||||
add_to_dict(pathfile, policy_name, username, version)
|
||||
update_dict(Dconf_registry.global_registry_dict, dd)
|
||||
|
||||
|
||||
def create_dconf_ini_file(filename, data):
|
||||
'''
|
||||
Create an ini-file based on a dictionary of dictionaries.
|
||||
Args:
|
||||
data (dict): The dictionary of dictionaries containing the data for the ini-file.
|
||||
filename (str): The filename to save the ini-file.
|
||||
Returns:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
'''
|
||||
with open(filename, 'w') as file:
|
||||
for section, section_data in data.items():
|
||||
file.write(f'[{section}]\n')
|
||||
for key, value in section_data.items():
|
||||
if isinstance(value, int):
|
||||
file.write(f'{key} = {value}\n')
|
||||
else:
|
||||
file.write(f'{key} = "{value}"\n')
|
||||
file.write('\n')
|
||||
logdata = dict()
|
||||
logdata['path'] = filename
|
||||
log('D209', logdata)
|
||||
Dconf_registry.dconf_update()
|
||||
|
||||
def clean_data(data):
|
||||
try:
|
||||
cleaned_string = data.replace('\n', '').replace('\r', '')
|
||||
cleaned_string = cleaned_string.replace('"', "'")
|
||||
return cleaned_string
|
||||
except:
|
||||
return None
|
||||
|
||||
def check_data(data, t_data):
|
||||
if isinstance(data, bytes):
|
||||
if t_data == 7:
|
||||
return clean_data(data.decode('utf-16').replace('\x00',''))
|
||||
else:
|
||||
return None
|
||||
elif t_data == 4:
|
||||
return data
|
||||
return clean_data(data)
|
||||
|
||||
def convert_string_dconf(input_string):
|
||||
macros = {
|
||||
'#': '%sharp%',
|
||||
';': '%semicolon%',
|
||||
'//': '%doubleslash%'
|
||||
}
|
||||
output_string = input_string
|
||||
for key, value in macros.items():
|
||||
if key in input_string:
|
||||
output_string = input_string.replace(key, value)
|
||||
elif value in input_string:
|
||||
output_string = input_string.replace(value, key)
|
||||
|
||||
return output_string
|
||||
|
||||
def remove_empty_values(input_list):
|
||||
return list(filter(None, input_list))
|
||||
|
||||
def flatten_dictionary(input_dict, result=None, current_key=''):
|
||||
if result is None:
|
||||
result = {}
|
||||
|
||||
for key, value in input_dict.items():
|
||||
new_key = f"{current_key}/{key}" if current_key else key
|
||||
|
||||
if isinstance(value, dict):
|
||||
flatten_dictionary(value, result, new_key)
|
||||
else:
|
||||
result[new_key] = value
|
||||
|
||||
return result
|
||||
|
||||
def get_dconf_envprofile():
|
||||
dconf_envprofile = {'default': {'DCONF_PROFILE': 'default'},
|
||||
'local': {'DCONF_PROFILE': 'local'},
|
||||
'system': {'DCONF_PROFILE': 'system'}
|
||||
}
|
||||
|
||||
if Dconf_registry._envprofile:
|
||||
return dconf_envprofile.get(Dconf_registry._envprofile, dconf_envprofile['system'])
|
||||
|
||||
if not Dconf_registry._username:
|
||||
return dconf_envprofile['system']
|
||||
|
||||
profile = '/run/dconf/user/{}'.format(get_uid_by_username(Dconf_registry._username))
|
||||
return {'DCONF_PROFILE': profile}
|
@ -1,125 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2021 BaseALT Ltd. <org@basealt.ru>
|
||||
# Copyright (C) 2021 Igor Chudov <nir@nir.org.ru>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
import smbc
|
||||
|
||||
|
||||
from util.logging import log
|
||||
from util.paths import file_cache_dir, file_cache_path_home, UNCPath
|
||||
from util.exceptions import NotUNCPathError
|
||||
|
||||
|
||||
class fs_file_cache:
|
||||
__read_blocksize = 4096
|
||||
|
||||
def __init__(self, cache_name, username = None):
|
||||
self.cache_name = cache_name
|
||||
if username:
|
||||
try:
|
||||
self.storage_uri = file_cache_path_home(username)
|
||||
except:
|
||||
self.storage_uri = file_cache_dir()
|
||||
else:
|
||||
self.storage_uri = file_cache_dir()
|
||||
logdata = dict({'cache_file': self.storage_uri})
|
||||
log('D20', logdata)
|
||||
self.samba_context = smbc.Context(use_kerberos=1)
|
||||
#, debug=10)
|
||||
|
||||
def store(self, uri, destfile = None):
|
||||
try:
|
||||
uri_path = UNCPath(uri)
|
||||
if not destfile:
|
||||
file_name = os.path.basename(uri_path.get_path())
|
||||
file_path = os.path.dirname(uri_path.get_path())
|
||||
destdir = Path('{}/{}/{}'.format(self.storage_uri,
|
||||
uri_path.get_domain(),
|
||||
file_path))
|
||||
else:
|
||||
destdir = destfile.parent
|
||||
except NotUNCPathError:
|
||||
return None
|
||||
|
||||
except Exception as exc:
|
||||
logdata = dict({'exception': str(exc)})
|
||||
log('D144', logdata)
|
||||
raise exc
|
||||
|
||||
if not destdir.exists():
|
||||
destdir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if not destfile:
|
||||
destfile = Path('{}/{}/{}'.format(self.storage_uri,
|
||||
uri_path.get_domain(),
|
||||
uri_path.get_path()))
|
||||
|
||||
try:
|
||||
fd, tmpfile = tempfile.mkstemp('', str(destfile))
|
||||
df = os.fdopen(fd, 'wb')
|
||||
file_handler = self.samba_context.open(str(uri_path), os.O_RDONLY)
|
||||
while True:
|
||||
data = file_handler.read(self.__read_blocksize)
|
||||
if not data:
|
||||
break
|
||||
df.write(data)
|
||||
df.close()
|
||||
os.rename(tmpfile, destfile)
|
||||
os.chmod(destfile, 0o644)
|
||||
except:
|
||||
tmppath = Path(tmpfile)
|
||||
if tmppath.exists():
|
||||
tmppath.unlink()
|
||||
|
||||
def get(self, uri):
|
||||
destfile = uri
|
||||
try:
|
||||
uri_path = UNCPath(uri)
|
||||
file_name = os.path.basename(uri_path.get_path())
|
||||
file_path = os.path.dirname(uri_path.get_path())
|
||||
destfile = Path('{}/{}/{}'.format(self.storage_uri,
|
||||
uri_path.get_domain(),
|
||||
uri_path.get_path()))
|
||||
except NotUNCPathError as exc:
|
||||
logdata = dict({'path': str(exc)})
|
||||
log('D62', logdata)
|
||||
except Exception as exc:
|
||||
logdata = dict({'exception': str(exc)})
|
||||
log('E36', logdata)
|
||||
raise exc
|
||||
|
||||
return str(destfile)
|
||||
|
||||
def get_ls_smbdir(self, uri):
|
||||
type_file_smb = 8
|
||||
try:
|
||||
uri_path = UNCPath(uri)
|
||||
opendir = self.samba_context.opendir(str(uri_path))
|
||||
ls_obj = opendir.getdents()
|
||||
ls = [obj.name for obj in ls_obj if obj.smbc_type == type_file_smb]
|
||||
return ls
|
||||
except Exception as exc:
|
||||
if Path(uri).exists():
|
||||
return None
|
||||
logdata = dict({'exception': str(exc)})
|
||||
log('W12', logdata)
|
||||
return None
|
@ -20,290 +20,40 @@ class samba_preg(object):
|
||||
'''
|
||||
Object mapping representing HKLM entry (registry key without SID)
|
||||
'''
|
||||
def __init__(self, preg_obj, policy_name):
|
||||
self.policy_name = policy_name
|
||||
self.keyname = preg_obj.keyname
|
||||
self.valuename = preg_obj.valuename
|
||||
self.hive_key = '{}\\{}'.format(self.keyname, self.valuename)
|
||||
def __init__(self, preg_obj):
|
||||
self.hive_key = '{}\\{}'.format(preg_obj.keyname, preg_obj.valuename)
|
||||
self.type = preg_obj.type
|
||||
self.data = preg_obj.data
|
||||
|
||||
def update_fields(self):
|
||||
fields = dict()
|
||||
fields['policy_name'] = self.policy_name
|
||||
fields['type'] = self.type
|
||||
fields['data'] = self.data
|
||||
|
||||
return fields
|
||||
|
||||
class samba_hkcu_preg(object):
|
||||
'''
|
||||
Object mapping representing HKCU entry (registry key with SID)
|
||||
'''
|
||||
def __init__(self, sid, preg_obj, policy_name):
|
||||
def __init__(self, sid, preg_obj):
|
||||
self.sid = sid
|
||||
self.policy_name = policy_name
|
||||
self.keyname = preg_obj.keyname
|
||||
self.valuename = preg_obj.valuename
|
||||
self.hive_key = '{}\\{}'.format(self.keyname, self.valuename)
|
||||
self.hive_key = '{}\\{}'.format(preg_obj.keyname, preg_obj.valuename)
|
||||
self.type = preg_obj.type
|
||||
self.data = preg_obj.data
|
||||
|
||||
def update_fields(self):
|
||||
fields = dict()
|
||||
fields['policy_name'] = self.policy_name
|
||||
fields['type'] = self.type
|
||||
fields['data'] = self.data
|
||||
|
||||
return fields
|
||||
|
||||
class ad_shortcut(object):
|
||||
'''
|
||||
Object mapping representing Windows shortcut.
|
||||
'''
|
||||
def __init__(self, sid, sc, policy_name):
|
||||
def __init__(self, sid, sc):
|
||||
self.sid = sid
|
||||
self.policy_name = policy_name
|
||||
self.path = sc.dest
|
||||
self.shortcut = sc.to_json()
|
||||
|
||||
def update_fields(self):
|
||||
fields = dict()
|
||||
fields['policy_name'] = self.policy_name
|
||||
fields['path'] = self.path
|
||||
fields['shortcut'] = self.shortcut
|
||||
|
||||
return fields
|
||||
|
||||
class info_entry(object):
|
||||
def __init__(self, name, value):
|
||||
self.name = name
|
||||
self.value = value
|
||||
|
||||
def update_fields(self):
|
||||
fields = dict()
|
||||
fields['value'] = self.value
|
||||
|
||||
return fields
|
||||
|
||||
class printer_entry(object):
|
||||
'''
|
||||
Object mapping representing Windows printer of some type.
|
||||
'''
|
||||
def __init__(self, sid, pobj, policy_name):
|
||||
def __init__(self, sid, pobj):
|
||||
self.sid = sid
|
||||
self.policy_name = policy_name
|
||||
self.name = pobj.name
|
||||
self.printer = pobj.to_json()
|
||||
|
||||
def update_fields(self):
|
||||
fields = dict()
|
||||
fields['policy_name'] = self.policy_name
|
||||
fields['name'] = self.name
|
||||
fields['printer'] = self.printer.to_json()
|
||||
|
||||
return fields
|
||||
|
||||
class drive_entry(object):
|
||||
'''
|
||||
Object mapping representing Samba share bound to drive letter
|
||||
'''
|
||||
def __init__(self, sid, dobj, policy_name):
|
||||
self.sid = sid
|
||||
self.policy_name = policy_name
|
||||
self.login = dobj.login
|
||||
self.password = dobj.password
|
||||
self.dir = dobj.dir
|
||||
self.path = dobj.path
|
||||
self.action = dobj.action
|
||||
self.thisDrive = dobj.thisDrive
|
||||
self.allDrives = dobj.allDrives
|
||||
self.label = dobj.label
|
||||
self.persistent = dobj.persistent
|
||||
self.useLetter = dobj.useLetter
|
||||
|
||||
|
||||
def update_fields(self):
|
||||
fields = dict()
|
||||
fields['policy_name'] = self.policy_name
|
||||
fields['login'] = self.login
|
||||
fields['password'] = self.password
|
||||
fields['dir'] = self.dir
|
||||
fields['path'] = self.path
|
||||
fields['action'] = self.action
|
||||
fields['thisDrive'] = self.thisDrive
|
||||
fields['allDrives'] = self.allDrives
|
||||
fields['label'] = self.label
|
||||
fields['persistent'] = self.persistent
|
||||
fields['useLetter'] = self.useLetter
|
||||
|
||||
return fields
|
||||
|
||||
class folder_entry(object):
|
||||
'''
|
||||
Object mapping representing file system directory
|
||||
'''
|
||||
def __init__(self, sid, fobj, policy_name):
|
||||
self.sid = sid
|
||||
self.policy_name = policy_name
|
||||
self.path = fobj.path
|
||||
self.action = fobj.action.value
|
||||
self.delete_folder = str(fobj.delete_folder)
|
||||
self.delete_sub_folders = str(fobj.delete_sub_folders)
|
||||
self.delete_files = str(fobj.delete_files)
|
||||
self.hidden_folder = str(fobj.hidden_folder)
|
||||
|
||||
def update_fields(self):
|
||||
'''
|
||||
Return list of fields to update
|
||||
'''
|
||||
fields = dict()
|
||||
fields['policy_name'] = self.policy_name
|
||||
fields['action'] = self.action
|
||||
fields['delete_folder'] = self.delete_folder
|
||||
fields['delete_sub_folders'] = self.delete_sub_folders
|
||||
fields['delete_files'] = self.delete_files
|
||||
fields['hidden_folder'] = self.hidden_folder
|
||||
|
||||
|
||||
return fields
|
||||
|
||||
class envvar_entry(object):
|
||||
'''
|
||||
Object mapping representing environment variables
|
||||
'''
|
||||
def __init__(self, sid, evobj, policy_name):
|
||||
self.sid = sid
|
||||
self.policy_name = policy_name
|
||||
self.name = evobj.name
|
||||
self.value = evobj.value
|
||||
self.action = evobj.action.value
|
||||
|
||||
def update_fields(self):
|
||||
'''
|
||||
Return list of fields to update
|
||||
'''
|
||||
fields = dict()
|
||||
fields['policy_name'] = self.policy_name
|
||||
fields['action'] = self.action
|
||||
fields['value'] = self.value
|
||||
|
||||
return fields
|
||||
|
||||
class script_entry(object):
|
||||
'''
|
||||
Object mapping representing scripts.ini
|
||||
'''
|
||||
def __init__(self, sid, scrobj, policy_name):
|
||||
self.sid = sid
|
||||
self.policy_name = policy_name
|
||||
self.action = scrobj.action
|
||||
self.number = scrobj.number
|
||||
self.path = scrobj.path
|
||||
self.arg = scrobj.args
|
||||
|
||||
def update_fields(self):
|
||||
'''
|
||||
Return list of fields to update
|
||||
'''
|
||||
fields = dict()
|
||||
fields['policy_name'] = self.policy_name
|
||||
fields['action'] = self.action
|
||||
fields['number'] = self.number
|
||||
fields['path'] = self.path
|
||||
fields['arg'] = self.arg
|
||||
|
||||
return fields
|
||||
|
||||
class file_entry(object):
|
||||
'''
|
||||
Object mapping representing FILES.XML
|
||||
'''
|
||||
def __init__(self, sid, fileobj, policy_name):
|
||||
self.sid = sid
|
||||
self.policy_name = policy_name
|
||||
self.action = fileobj.action
|
||||
self.fromPath = fileobj.fromPath
|
||||
self.targetPath = fileobj.targetPath
|
||||
self.readOnly = fileobj.readOnly
|
||||
self.archive = fileobj.archive
|
||||
self.hidden = fileobj.hidden
|
||||
self.suppress = fileobj.suppress
|
||||
self.executable = fileobj.executable
|
||||
|
||||
def update_fields(self):
|
||||
'''
|
||||
Return list of fields to update
|
||||
'''
|
||||
fields = dict()
|
||||
fields['policy_name'] = self.policy_name
|
||||
fields['action'] = self.action
|
||||
fields['fromPath'] = self.fromPath
|
||||
fields['targetPath'] = self.targetPath
|
||||
fields['readOnly'] = self.readOnly
|
||||
fields['archive'] = self.archive
|
||||
fields['hidden'] = self.hidden
|
||||
fields['suppress'] = self.suppress
|
||||
fields['executable'] = self.executable
|
||||
|
||||
return fields
|
||||
|
||||
class ini_entry(object):
|
||||
'''
|
||||
Object mapping representing INIFILES.XML
|
||||
'''
|
||||
def __init__(self, sid, iniobj, policy_name):
|
||||
self.sid = sid
|
||||
self.policy_name = policy_name
|
||||
self.action = iniobj.action
|
||||
self.path = iniobj.path
|
||||
self.section = iniobj.section
|
||||
self.property = iniobj.property
|
||||
self.value = iniobj.value
|
||||
|
||||
|
||||
def update_fields(self):
|
||||
'''
|
||||
Return list of fields to update
|
||||
'''
|
||||
fields = dict()
|
||||
fields['policy_name'] = self.policy_name
|
||||
fields['action'] = self.action
|
||||
fields['path'] = self.path
|
||||
fields['section'] = self.section
|
||||
fields['property'] = self.property
|
||||
fields['value'] = self.value
|
||||
|
||||
return fields
|
||||
|
||||
class networkshare_entry(object):
|
||||
'''
|
||||
Object mapping representing NETWORKSHARES.XML
|
||||
'''
|
||||
def __init__(self, sid, networkshareobj, policy_name):
|
||||
self.sid = sid
|
||||
self.policy_name = policy_name
|
||||
self.name = networkshareobj.name
|
||||
self.action = networkshareobj.action
|
||||
self.path = networkshareobj.path
|
||||
self.allRegular = networkshareobj.allRegular
|
||||
self.comment = networkshareobj.comment
|
||||
self.limitUsers = networkshareobj.limitUsers
|
||||
self.abe = networkshareobj.abe
|
||||
|
||||
|
||||
def update_fields(self):
|
||||
'''
|
||||
Return list of fields to update
|
||||
'''
|
||||
fields = dict()
|
||||
fields['policy_name'] = self.policy_name
|
||||
fields['name'] = self.name
|
||||
fields['action'] = self.action
|
||||
fields['path'] = self.path
|
||||
fields['allRegular'] = self.allRegular
|
||||
fields['comment'] = self.comment
|
||||
fields['limitUsers'] = self.limitUsers
|
||||
fields['abe'] = self.abe
|
||||
|
||||
return fields
|
||||
|
@ -18,6 +18,7 @@
|
||||
|
||||
from .cache import cache
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
from sqlalchemy import (
|
||||
@ -26,12 +27,14 @@ from sqlalchemy import (
|
||||
Column,
|
||||
Integer,
|
||||
String,
|
||||
MetaData
|
||||
)
|
||||
from sqlalchemy.orm import (
|
||||
mapper,
|
||||
sessionmaker
|
||||
)
|
||||
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from .sqlite_registry_compat import sqlite_registry_compat
|
||||
|
||||
from util.logging import log
|
||||
from util.logging import slogm
|
||||
from util.paths import cache_dir
|
||||
|
||||
def mapping_factory(mapper_suffix):
|
||||
@ -50,11 +53,9 @@ class sqlite_cache(cache):
|
||||
self.cache_name = cache_name
|
||||
self.mapper_obj = mapping_factory(self.cache_name)
|
||||
self.storage_uri = os.path.join('sqlite:///{}/{}.sqlite'.format(cache_dir(), self.cache_name))
|
||||
logdata = dict({'cache_file': self.storage_uri})
|
||||
log('D20', logdata)
|
||||
logging.debug(slogm('Initializing cache {}'.format(self.storage_uri)))
|
||||
self.db_cnt = create_engine(self.storage_uri, echo=False)
|
||||
self.__compat = sqlite_registry_compat(self.db_cnt)
|
||||
self.__metadata = self.__compat.metadata()
|
||||
self.__metadata = MetaData(self.db_cnt)
|
||||
self.cache_table = Table(
|
||||
self.cache_name,
|
||||
self.__metadata,
|
||||
@ -66,8 +67,7 @@ class sqlite_cache(cache):
|
||||
self.__metadata.create_all(self.db_cnt)
|
||||
Session = sessionmaker(bind=self.db_cnt)
|
||||
self.db_session = Session()
|
||||
mapper_reg = self.__compat
|
||||
mapper_reg.map_imperatively(self.mapper_obj, self.cache_table)
|
||||
mapper(self.mapper_obj, self.cache_table)
|
||||
|
||||
def store(self, str_id, value):
|
||||
obj = self.mapper_obj(str_id, value)
|
||||
@ -80,9 +80,7 @@ class sqlite_cache(cache):
|
||||
def get_default(self, obj_id, default_value):
|
||||
result = self.get(obj_id)
|
||||
if result == None:
|
||||
logdata = dict()
|
||||
logdata['object'] = obj_id
|
||||
log('D43', logdata)
|
||||
logging.debug(slogm('No value cached for {}'.format(obj_id)))
|
||||
self.store(obj_id, default_value)
|
||||
return str(default_value)
|
||||
return result.value
|
||||
@ -91,11 +89,9 @@ class sqlite_cache(cache):
|
||||
try:
|
||||
self.db_session.add(obj)
|
||||
self.db_session.commit()
|
||||
except Exception as exc:
|
||||
except:
|
||||
self.db_session.rollback()
|
||||
logdata = dict()
|
||||
logdata['msg'] = str(exc)
|
||||
log('D44', logdata)
|
||||
logging.error(slogm('Error inserting value into cache, will update the value'))
|
||||
self.db_session.query(self.mapper_obj).filter(self.mapper_obj.str_id == obj.str_id).update({ 'value': obj.value })
|
||||
self.db_session.commit()
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
from sqlalchemy import (
|
||||
@ -24,13 +25,15 @@ from sqlalchemy import (
|
||||
Column,
|
||||
Integer,
|
||||
String,
|
||||
MetaData,
|
||||
UniqueConstraint
|
||||
)
|
||||
from sqlalchemy.orm import (
|
||||
mapper,
|
||||
sessionmaker
|
||||
)
|
||||
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from .sqlite_registry_compat import sqlite_registry_compat
|
||||
|
||||
from util.logging import log
|
||||
from util.logging import slogm
|
||||
from util.paths import cache_dir
|
||||
from .registry import registry
|
||||
from .record_types import (
|
||||
@ -39,13 +42,6 @@ from .record_types import (
|
||||
, ad_shortcut
|
||||
, info_entry
|
||||
, printer_entry
|
||||
, drive_entry
|
||||
, folder_entry
|
||||
, envvar_entry
|
||||
, script_entry
|
||||
, file_entry
|
||||
, ini_entry
|
||||
, networkshare_entry
|
||||
)
|
||||
|
||||
class sqlite_registry(registry):
|
||||
@ -56,8 +52,7 @@ class sqlite_registry(registry):
|
||||
cdir = cache_dir()
|
||||
self.db_path = os.path.join('sqlite:///{}/{}.sqlite'.format(cdir, self.db_name))
|
||||
self.db_cnt = create_engine(self.db_path, echo=False)
|
||||
self.__compat = sqlite_registry_compat(self.db_cnt)
|
||||
self.__metadata = self.__compat.metadata()
|
||||
self.__metadata = MetaData(self.db_cnt)
|
||||
self.__info = Table(
|
||||
'info',
|
||||
self.__metadata,
|
||||
@ -66,167 +61,50 @@ class sqlite_registry(registry):
|
||||
Column('value', String(65536))
|
||||
)
|
||||
self.__hklm = Table(
|
||||
'HKLM'
|
||||
, self.__metadata
|
||||
, Column('id', Integer, primary_key=True)
|
||||
, Column('hive_key', String(65536, collation='NOCASE'),
|
||||
unique=True)
|
||||
, Column('keyname', String(collation='NOCASE'))
|
||||
, Column('valuename', String(collation='NOCASE'))
|
||||
, Column('policy_name', String)
|
||||
, Column('type', Integer)
|
||||
, Column('data', String)
|
||||
'HKLM',
|
||||
self.__metadata,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('hive_key', String(65536), unique=True),
|
||||
Column('type', Integer),
|
||||
Column('data', String)
|
||||
)
|
||||
self.__hkcu = Table(
|
||||
'HKCU'
|
||||
, self.__metadata
|
||||
, Column('id', Integer, primary_key=True)
|
||||
, Column('sid', String)
|
||||
, Column('hive_key', String(65536, collation='NOCASE'))
|
||||
, Column('keyname', String(collation='NOCASE'))
|
||||
, Column('valuename', String(collation='NOCASE'))
|
||||
, Column('policy_name', String)
|
||||
, Column('type', Integer)
|
||||
, Column('data', String)
|
||||
, UniqueConstraint('sid', 'hive_key')
|
||||
'HKCU',
|
||||
self.__metadata,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('sid', String),
|
||||
Column('hive_key', String(65536)),
|
||||
Column('type', Integer),
|
||||
Column('data', String),
|
||||
UniqueConstraint('sid', 'hive_key')
|
||||
)
|
||||
self.__shortcuts = Table(
|
||||
'Shortcuts'
|
||||
, self.__metadata
|
||||
, Column('id', Integer, primary_key=True)
|
||||
, Column('sid', String)
|
||||
, Column('path', String)
|
||||
, Column('policy_name', String)
|
||||
, Column('shortcut', String)
|
||||
, UniqueConstraint('sid', 'path')
|
||||
'Shortcuts',
|
||||
self.__metadata,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('sid', String),
|
||||
Column('path', String),
|
||||
Column('shortcut', String),
|
||||
UniqueConstraint('sid', 'path')
|
||||
)
|
||||
self.__printers = Table(
|
||||
'Printers'
|
||||
, self.__metadata
|
||||
, Column('id', Integer, primary_key=True)
|
||||
, Column('sid', String)
|
||||
, Column('name', String)
|
||||
, Column('policy_name', String)
|
||||
, Column('printer', String)
|
||||
, UniqueConstraint('sid', 'name')
|
||||
'Printers',
|
||||
self.__metadata,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('sid', String),
|
||||
Column('name', String),
|
||||
Column('printer', String),
|
||||
UniqueConstraint('sid', 'name')
|
||||
)
|
||||
self.__drives = Table(
|
||||
'Drives'
|
||||
, self.__metadata
|
||||
, Column('id', Integer, primary_key=True)
|
||||
, Column('sid', String)
|
||||
, Column('login', String)
|
||||
, Column('password', String)
|
||||
, Column('dir', String)
|
||||
, Column('policy_name', String)
|
||||
, Column('path', String)
|
||||
, Column('action', String)
|
||||
, Column('thisDrive', String)
|
||||
, Column('allDrives', String)
|
||||
, Column('label', String)
|
||||
, Column('persistent', String)
|
||||
, Column('useLetter', String)
|
||||
, UniqueConstraint('sid', 'dir')
|
||||
)
|
||||
self.__folders = Table(
|
||||
'Folders'
|
||||
, self.__metadata
|
||||
, Column('id', Integer, primary_key=True)
|
||||
, Column('sid', String)
|
||||
, Column('path', String)
|
||||
, Column('policy_name', String)
|
||||
, Column('action', String)
|
||||
, Column('delete_folder', String)
|
||||
, Column('delete_sub_folders', String)
|
||||
, Column('delete_files', String)
|
||||
, Column('hidden_folder', String)
|
||||
, UniqueConstraint('sid', 'path')
|
||||
)
|
||||
self.__envvars = Table(
|
||||
'Envvars'
|
||||
, self.__metadata
|
||||
, Column('id', Integer, primary_key=True)
|
||||
, Column('sid', String)
|
||||
, Column('name', String)
|
||||
, Column('policy_name', String)
|
||||
, Column('action', String)
|
||||
, Column('value', String)
|
||||
, UniqueConstraint('sid', 'name')
|
||||
)
|
||||
self.__scripts = Table(
|
||||
'Scripts'
|
||||
, self.__metadata
|
||||
, Column('id', Integer, primary_key=True)
|
||||
, Column('sid', String)
|
||||
, Column('policy_name', String)
|
||||
, Column('number', String)
|
||||
, Column('action', String)
|
||||
, Column('path', String)
|
||||
, Column('arg', String)
|
||||
, UniqueConstraint('sid', 'path', 'arg')
|
||||
)
|
||||
self.__files = Table(
|
||||
'Files'
|
||||
, self.__metadata
|
||||
, Column('id', Integer, primary_key=True)
|
||||
, Column('sid', String)
|
||||
, Column('policy_name', String)
|
||||
, Column('action', String)
|
||||
, Column('fromPath', String)
|
||||
, Column('targetPath', String)
|
||||
, Column('readOnly', String)
|
||||
, Column('archive', String)
|
||||
, Column('hidden', String)
|
||||
, Column('suppress', String)
|
||||
, Column('executable', String)
|
||||
, UniqueConstraint('sid', 'policy_name', 'targetPath', 'fromPath')
|
||||
)
|
||||
self.__ini = Table(
|
||||
'Ini'
|
||||
, self.__metadata
|
||||
, Column('id', Integer, primary_key=True)
|
||||
, Column('sid', String)
|
||||
, Column('policy_name', String)
|
||||
, Column('action', String)
|
||||
, Column('path', String)
|
||||
, Column('section', String)
|
||||
, Column('property', String)
|
||||
, Column('value', String)
|
||||
, UniqueConstraint('sid', 'action', 'path', 'section', 'property', 'value')
|
||||
)
|
||||
self.__networkshare = Table(
|
||||
'Networkshare'
|
||||
, self.__metadata
|
||||
, Column('id', Integer, primary_key=True)
|
||||
, Column('sid', String)
|
||||
, Column('policy_name', String)
|
||||
, Column('name', String)
|
||||
, Column('action', String)
|
||||
, Column('path', String)
|
||||
, Column('allRegular', String)
|
||||
, Column('comment', String)
|
||||
, Column('limitUsers', String)
|
||||
, Column('abe', String)
|
||||
, UniqueConstraint('sid', 'name', 'path')
|
||||
)
|
||||
|
||||
self.__metadata.create_all(self.db_cnt)
|
||||
Session = sessionmaker(bind=self.db_cnt)
|
||||
self.db_session = Session()
|
||||
mapper_reg = self.__compat
|
||||
try:
|
||||
mapper_reg.map_imperatively(info_entry, self.__info)
|
||||
mapper_reg.map_imperatively(samba_preg, self.__hklm)
|
||||
mapper_reg.map_imperatively(samba_hkcu_preg, self.__hkcu)
|
||||
mapper_reg.map_imperatively(ad_shortcut, self.__shortcuts)
|
||||
mapper_reg.map_imperatively(printer_entry, self.__printers)
|
||||
mapper_reg.map_imperatively(drive_entry, self.__drives)
|
||||
mapper_reg.map_imperatively(folder_entry, self.__folders)
|
||||
mapper_reg.map_imperatively(envvar_entry, self.__envvars)
|
||||
mapper_reg.map_imperatively(script_entry, self.__scripts)
|
||||
mapper_reg.map_imperatively(file_entry, self.__files)
|
||||
mapper_reg.map_imperatively(ini_entry, self.__ini)
|
||||
mapper_reg.map_imperatively(networkshare_entry, self.__networkshare)
|
||||
mapper(info_entry, self.__info)
|
||||
mapper(samba_preg, self.__hklm)
|
||||
mapper(samba_hkcu_preg, self.__hkcu)
|
||||
mapper(ad_shortcut, self.__shortcuts)
|
||||
mapper(printer_entry, self.__printers)
|
||||
except:
|
||||
pass
|
||||
#logging.error('Error creating mapper')
|
||||
@ -243,327 +121,133 @@ class sqlite_registry(registry):
|
||||
try:
|
||||
self._add(row)
|
||||
except:
|
||||
update_obj = dict({ 'value': row.value })
|
||||
(self
|
||||
.db_session.query(info_entry)
|
||||
.filter(info_entry.name == row.name)
|
||||
.update(row.update_fields()))
|
||||
.update(update_obj))
|
||||
self.db_session.commit()
|
||||
|
||||
def _hklm_upsert(self, row):
|
||||
try:
|
||||
self._add(row)
|
||||
except:
|
||||
update_obj = dict({'type': row.type, 'data': row.data })
|
||||
(self
|
||||
.db_session
|
||||
.query(samba_preg)
|
||||
.filter(samba_preg.hive_key == row.hive_key)
|
||||
.update(row.update_fields()))
|
||||
.update(update_obj))
|
||||
self.db_session.commit()
|
||||
|
||||
def _hkcu_upsert(self, row):
|
||||
try:
|
||||
self._add(row)
|
||||
except Exception as exc:
|
||||
except:
|
||||
update_obj = dict({'type': row.type, 'data': row.data })
|
||||
(self
|
||||
.db_session
|
||||
.query(samba_hkcu_preg)
|
||||
.query(samba_preg)
|
||||
.filter(samba_hkcu_preg.sid == row.sid)
|
||||
.filter(samba_hkcu_preg.hive_key == row.hive_key)
|
||||
.update(row.update_fields()))
|
||||
.update(update_obj))
|
||||
self.db_session.commit()
|
||||
|
||||
def _shortcut_upsert(self, row):
|
||||
try:
|
||||
self._add(row)
|
||||
except:
|
||||
update_obj = dict({ 'shortcut': row.shortcut })
|
||||
(self
|
||||
.db_session
|
||||
.query(ad_shortcut)
|
||||
.filter(ad_shortcut.sid == row.sid)
|
||||
.filter(ad_shortcut.path == row.path)
|
||||
.update(row.update_fields()))
|
||||
.update(update_obj))
|
||||
self.db_session.commit()
|
||||
|
||||
def _printer_upsert(self, row):
|
||||
try:
|
||||
self._add(row)
|
||||
except:
|
||||
update_obj = dict({ 'printer': row.printer })
|
||||
(self
|
||||
.db_session
|
||||
.query(printer_entry)
|
||||
.filter(printer_entry.sid == row.sid)
|
||||
.filter(printer_entry.name == row.name)
|
||||
.update(row.update_fields()))
|
||||
self.db_session.commit()
|
||||
|
||||
def _drive_upsert(self, row):
|
||||
try:
|
||||
self._add(row)
|
||||
except:
|
||||
(self
|
||||
.db_session
|
||||
.query(drive_entry)
|
||||
.filter(drive_entry.sid == row.sid)
|
||||
.filter(drive_entry.dir == row.dir)
|
||||
.update(row.update_fields()))
|
||||
.update(update_obj))
|
||||
self.db_session.commit()
|
||||
|
||||
def set_info(self, name, value):
|
||||
ientry = info_entry(name, value)
|
||||
logdata = dict()
|
||||
logdata['varname'] = name
|
||||
logdata['value'] = value
|
||||
log('D19', logdata)
|
||||
logging.debug(slogm('Setting info {}:{}'.format(name, value)))
|
||||
self._info_upsert(ientry)
|
||||
|
||||
def _delete_hklm_keyname(self, keyname):
|
||||
'''
|
||||
Delete PReg hive_key from HKEY_LOCAL_MACHINE
|
||||
'''
|
||||
logdata = dict({'keyname': keyname})
|
||||
try:
|
||||
(self
|
||||
.db_session
|
||||
.query(samba_preg)
|
||||
.filter(samba_preg.keyname == keyname)
|
||||
.delete(synchronize_session=False))
|
||||
self.db_session.commit()
|
||||
log('D65', logdata)
|
||||
except Exception as exc:
|
||||
log('D63', logdata)
|
||||
|
||||
def add_hklm_entry(self, preg_entry, policy_name):
|
||||
def add_hklm_entry(self, preg_entry):
|
||||
'''
|
||||
Write PReg entry to HKEY_LOCAL_MACHINE
|
||||
'''
|
||||
pentry = samba_preg(preg_entry, policy_name)
|
||||
if not pentry.valuename.startswith('**'):
|
||||
pentry = samba_preg(preg_entry)
|
||||
if not pentry.hive_key.rpartition('\\')[2].startswith('**'):
|
||||
self._hklm_upsert(pentry)
|
||||
else:
|
||||
logdata = dict({'key': pentry.hive_key})
|
||||
if pentry.valuename.lower() == '**delvals.':
|
||||
self._delete_hklm_keyname(pentry.keyname)
|
||||
else:
|
||||
log('D27', logdata)
|
||||
logging.warning(slogm('Skipping branch deletion key: {}'.format(pentry.hive_key)))
|
||||
|
||||
def _delete_hkcu_keyname(self, keyname, sid):
|
||||
'''
|
||||
Delete PReg hive_key from HKEY_CURRENT_USER
|
||||
'''
|
||||
logdata = dict({'sid': sid, 'keyname': keyname})
|
||||
try:
|
||||
(self
|
||||
.db_session
|
||||
.query(samba_hkcu_preg)
|
||||
.filter(samba_hkcu_preg.sid == sid)
|
||||
.filter(samba_hkcu_preg.keyname == keyname)
|
||||
.delete(synchronize_session=False))
|
||||
self.db_session.commit()
|
||||
log('D66', logdata)
|
||||
except:
|
||||
log('D64', logdata)
|
||||
|
||||
def add_hkcu_entry(self, preg_entry, sid, policy_name):
|
||||
def add_hkcu_entry(self, preg_entry, sid):
|
||||
'''
|
||||
Write PReg entry to HKEY_CURRENT_USER
|
||||
'''
|
||||
hkcu_pentry = samba_hkcu_preg(sid, preg_entry, policy_name)
|
||||
logdata = dict({'sid': sid, 'policy': policy_name, 'key': hkcu_pentry.hive_key})
|
||||
if not hkcu_pentry.valuename.startswith('**'):
|
||||
log('D26', logdata)
|
||||
hkcu_pentry = samba_hkcu_preg(sid, preg_entry)
|
||||
if not hkcu_pentry.hive_key.rpartition('\\')[2].startswith('**'):
|
||||
logging.debug(slogm('Adding HKCU entry for {}'.format(sid)))
|
||||
self._hkcu_upsert(hkcu_pentry)
|
||||
else:
|
||||
if hkcu_pentry.valuename.lower() == '**delvals.':
|
||||
self._delete_hkcu_keyname(hkcu_pentry.keyname, sid)
|
||||
else:
|
||||
log('D51', logdata)
|
||||
logging.warning(slogm('Skipping branch deletion key: {}'.format(hkcu_pentry.hive_key)))
|
||||
|
||||
def add_shortcut(self, sid, sc_obj, policy_name):
|
||||
def add_shortcut(self, sid, sc_obj):
|
||||
'''
|
||||
Store shortcut information in the database
|
||||
'''
|
||||
sc_entry = ad_shortcut(sid, sc_obj, policy_name)
|
||||
logdata = dict()
|
||||
logdata['link'] = sc_entry.path
|
||||
logdata['sid'] = sid
|
||||
log('D41', logdata)
|
||||
sc_entry = ad_shortcut(sid, sc_obj)
|
||||
logging.debug(slogm('Saving info about {} link for {}'.format(sc_entry.path, sid)))
|
||||
self._shortcut_upsert(sc_entry)
|
||||
|
||||
def add_printer(self, sid, pobj, policy_name):
|
||||
def add_printer(self, sid, pobj):
|
||||
'''
|
||||
Store printer configuration in the database
|
||||
'''
|
||||
prn_entry = printer_entry(sid, pobj, policy_name)
|
||||
logdata = dict()
|
||||
logdata['printer'] = prn_entry.name
|
||||
logdata['sid'] = sid
|
||||
log('D40', logdata)
|
||||
prn_entry = printer_entry(sid, pobj)
|
||||
logging.debug(slogm('Saving info about printer {} for {}'.format(prn_entry.name, sid)))
|
||||
self._printer_upsert(prn_entry)
|
||||
|
||||
def add_drive(self, sid, dobj, policy_name):
|
||||
drv_entry = drive_entry(sid, dobj, policy_name)
|
||||
logdata = dict()
|
||||
logdata['uri'] = drv_entry.path
|
||||
logdata['sid'] = sid
|
||||
log('D39', logdata)
|
||||
self._drive_upsert(drv_entry)
|
||||
|
||||
def add_folder(self, sid, fobj, policy_name):
|
||||
fld_entry = folder_entry(sid, fobj, policy_name)
|
||||
logdata = dict()
|
||||
logdata['folder'] = fld_entry.path
|
||||
logdata['sid'] = sid
|
||||
log('D42', logdata)
|
||||
try:
|
||||
self._add(fld_entry)
|
||||
except Exception as exc:
|
||||
(self
|
||||
._filter_sid_obj(folder_entry, sid)
|
||||
.filter(folder_entry.path == fld_entry.path)
|
||||
.update(fld_entry.update_fields()))
|
||||
self.db_session.commit()
|
||||
|
||||
def add_envvar(self, sid, evobj, policy_name):
|
||||
ev_entry = envvar_entry(sid, evobj, policy_name)
|
||||
logdata = dict()
|
||||
logdata['envvar'] = ev_entry.name
|
||||
logdata['sid'] = sid
|
||||
log('D53', logdata)
|
||||
try:
|
||||
self._add(ev_entry)
|
||||
except Exception as exc:
|
||||
(self
|
||||
._filter_sid_obj(envvar_entry, sid)
|
||||
.filter(envvar_entry.name == ev_entry.name)
|
||||
.update(ev_entry.update_fields()))
|
||||
self.db_session.commit()
|
||||
def add_script(self, sid, scrobj, policy_name):
|
||||
scr_entry = script_entry(sid, scrobj, policy_name)
|
||||
logdata = dict()
|
||||
logdata['script path'] = scrobj.path
|
||||
logdata['sid'] = sid
|
||||
log('D153', logdata)
|
||||
try:
|
||||
self._add(scr_entry)
|
||||
except Exception as exc:
|
||||
(self
|
||||
._filter_sid_obj(script_entry, sid)
|
||||
.filter(script_entry.path == scr_entry.path)
|
||||
.update(scr_entry.update_fields()))
|
||||
self.db_session.commit()
|
||||
|
||||
def add_file(self, sid, fileobj, policy_name):
|
||||
f_entry = file_entry(sid, fileobj, policy_name)
|
||||
logdata = dict()
|
||||
logdata['targetPath'] = f_entry.targetPath
|
||||
logdata['fromPath'] = f_entry.fromPath
|
||||
log('D162', logdata)
|
||||
try:
|
||||
self._add(f_entry)
|
||||
except Exception as exc:
|
||||
(self
|
||||
._filter_sid_obj(file_entry, sid)
|
||||
.filter(file_entry.targetPath == f_entry.targetPath)
|
||||
.update(f_entry.update_fields()))
|
||||
self.db_session.commit()
|
||||
|
||||
|
||||
def add_ini(self, sid, iniobj, policy_name):
|
||||
inientry = ini_entry(sid, iniobj, policy_name)
|
||||
logdata = dict()
|
||||
logdata['path'] = inientry.path
|
||||
logdata['action'] = inientry.action
|
||||
log('D177', logdata)
|
||||
try:
|
||||
self._add(inientry)
|
||||
except Exception as exc:
|
||||
(self
|
||||
._filter_sid_obj(ini_entry, sid)
|
||||
.filter(ini_entry.path == inientry.path)
|
||||
.update(inientry.update_fields()))
|
||||
self.db_session.commit()
|
||||
|
||||
def add_networkshare(self, sid, networkshareobj, policy_name):
|
||||
networkshareentry = networkshare_entry(sid, networkshareobj, policy_name)
|
||||
logdata = dict()
|
||||
logdata['name'] = networkshareentry.name
|
||||
logdata['path'] = networkshareentry.path
|
||||
logdata['action'] = networkshareentry.action
|
||||
log('D186', logdata)
|
||||
try:
|
||||
self._add(networkshareentry)
|
||||
except Exception as exc:
|
||||
(self
|
||||
._filter_sid_obj(networkshare_entry, sid)
|
||||
.filter(networkshare_entry.path == networkshareentry.path)
|
||||
.update(networkshareentry.update_fields()))
|
||||
self.db_session.commit()
|
||||
|
||||
|
||||
def _filter_sid_obj(self, row_object, sid):
|
||||
def get_shortcuts(self, sid):
|
||||
res = (self
|
||||
.db_session
|
||||
.query(row_object)
|
||||
.filter(row_object.sid == sid))
|
||||
return res
|
||||
|
||||
def _filter_sid_list(self, row_object, sid):
|
||||
res = (self
|
||||
.db_session
|
||||
.query(row_object)
|
||||
.filter(row_object.sid == sid)
|
||||
.order_by(row_object.id)
|
||||
.query(ad_shortcut)
|
||||
.filter(ad_shortcut.sid == sid)
|
||||
.all())
|
||||
return res
|
||||
|
||||
def get_shortcuts(self, sid):
|
||||
return self._filter_sid_list(ad_shortcut, sid)
|
||||
|
||||
def get_printers(self, sid):
|
||||
return self._filter_sid_list(printer_entry, sid)
|
||||
|
||||
def get_drives(self, sid):
|
||||
return self._filter_sid_list(drive_entry, sid)
|
||||
|
||||
def get_folders(self, sid):
|
||||
return self._filter_sid_list(folder_entry, sid)
|
||||
|
||||
def get_envvars(self, sid):
|
||||
return self._filter_sid_list(envvar_entry, sid)
|
||||
|
||||
def _filter_scripts_list(self, row_object, sid, action):
|
||||
res = (self
|
||||
.db_session
|
||||
.query(row_object)
|
||||
.filter(row_object.sid == sid)
|
||||
.filter(row_object.action == action)
|
||||
.order_by(row_object.id)
|
||||
.query(printer_entry)
|
||||
.filter(printer_entry.sid == sid)
|
||||
.all())
|
||||
return res
|
||||
|
||||
def get_scripts(self, sid, action):
|
||||
return self._filter_scripts_list(script_entry, sid, action)
|
||||
|
||||
def get_files(self, sid):
|
||||
return self._filter_sid_list(file_entry, sid)
|
||||
|
||||
def get_networkshare(self, sid):
|
||||
return self._filter_sid_list(networkshare_entry, sid)
|
||||
|
||||
def get_ini(self, sid):
|
||||
return self._filter_sid_list(ini_entry, sid)
|
||||
|
||||
def get_hkcu_entry(self, sid, hive_key):
|
||||
res = (self
|
||||
.db_session
|
||||
.query(samba_hkcu_preg)
|
||||
.query(samba_preg)
|
||||
.filter(samba_hkcu_preg.sid == sid)
|
||||
.filter(samba_hkcu_preg.hive_key == hive_key)
|
||||
.first())
|
||||
# Try to get the value from machine SID as a default if no option is set.
|
||||
if not res:
|
||||
machine_sid = self.get_info('machine_sid')
|
||||
res = self.db_session.query(samba_hkcu_preg).filter(samba_hkcu_preg.sid == machine_sid).filter(samba_hkcu_preg.hive_key == hive_key).first()
|
||||
res = self.db_session.query(samba_preg).filter(samba_hkcu_preg.sid == machine_sid).filter(samba_hkcu_preg.hive_key == hive_key).first()
|
||||
return res
|
||||
|
||||
def filter_hkcu_entries(self, sid, startswith):
|
||||
@ -598,20 +282,31 @@ class sqlite_registry(registry):
|
||||
return res
|
||||
|
||||
def wipe_user(self, sid):
|
||||
self._wipe_sid(samba_hkcu_preg, sid)
|
||||
self._wipe_sid(ad_shortcut, sid)
|
||||
self._wipe_sid(printer_entry, sid)
|
||||
self._wipe_sid(drive_entry, sid)
|
||||
self._wipe_sid(script_entry, sid)
|
||||
self._wipe_sid(file_entry, sid)
|
||||
self._wipe_sid(ini_entry, sid)
|
||||
self._wipe_sid(networkshare_entry, sid)
|
||||
self.wipe_hkcu(sid)
|
||||
self.wipe_shortcuts(sid)
|
||||
self.wipe_printers(sid)
|
||||
|
||||
def _wipe_sid(self, row_object, sid):
|
||||
def wipe_shortcuts(self, sid):
|
||||
(self
|
||||
.db_session
|
||||
.query(row_object)
|
||||
.filter(row_object.sid == sid)
|
||||
.query(ad_shortcut)
|
||||
.filter(ad_shortcut.sid == sid)
|
||||
.delete())
|
||||
self.db_session.commit()
|
||||
|
||||
def wipe_printers(self, sid):
|
||||
(self
|
||||
.db_session
|
||||
.query(printer_entry)
|
||||
.filter(printer_entry.sid == sid)
|
||||
.delete())
|
||||
self.db_session.commit()
|
||||
|
||||
def wipe_hkcu(self, sid):
|
||||
(self
|
||||
.db_session
|
||||
.query(samba_hkcu_preg)
|
||||
.filter(samba_hkcu_preg.sid == sid)
|
||||
.delete())
|
||||
self.db_session.commit()
|
||||
|
||||
|
@ -1,45 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2024 BaseALT Ltd.
|
||||
# Copyright (C) 2024 Evgeny SInelnikov <sin@altlinux.org>.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
__compat__ = False
|
||||
|
||||
from sqlalchemy import MetaData
|
||||
|
||||
try:
|
||||
from sqlalchemy.orm import registry
|
||||
except:
|
||||
from sqlalchemy.orm import mapper
|
||||
__compat__ = True
|
||||
|
||||
class sqlite_registry_compat:
|
||||
def __init__(self, db_cnt):
|
||||
if not __compat__:
|
||||
self.__registry = registry()
|
||||
self.__metadata = MetaData()
|
||||
else:
|
||||
self.__metadata = MetaData(db_cnt)
|
||||
|
||||
def metadata(self):
|
||||
return self.__metadata
|
||||
|
||||
def map_imperatively(self, obj, table):
|
||||
if __compat__:
|
||||
mapper(obj, table)
|
||||
else:
|
||||
self.__registry.map_imperatively(obj, table)
|
@ -1,63 +0,0 @@
|
||||
{#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2022 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#}
|
||||
|
||||
{% if No|length %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if ({% for res in No -%}
|
||||
action.id == "{{res}}"{% if No|length == loop.index %}){ {% else %} ||{% endif %}
|
||||
{% endfor %} return polkit.Result.NO;
|
||||
}
|
||||
});
|
||||
{% endif %}{% if Yes|length %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if ({% for res in Yes -%}
|
||||
action.id == "{{res}}"{% if Yes|length == loop.index %}){ {% else %} ||{% endif %}
|
||||
{% endfor %} return polkit.Result.YES;
|
||||
}
|
||||
});
|
||||
{% endif %}{% if Auth_self|length %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if ({% for res in Auth_self -%}
|
||||
action.id == "{{res}}"{% if Auth_self|length == loop.index %}){ {% else %} ||{% endif %}
|
||||
{% endfor %} return polkit.Result.AUTH_SELF;
|
||||
}
|
||||
});
|
||||
{% endif %}{% if Auth_admin|length %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if ({% for res in Auth_admin -%}
|
||||
action.id == "{{res}}"{% if Auth_admin|length == loop.index %}){ {% else %} ||{% endif %}
|
||||
{% endfor %} return polkit.Result.AUTH_ADMIN;
|
||||
}
|
||||
});
|
||||
{% endif %}{% if Auth_self_keep|length %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if ({% for res in Auth_self_keep -%}
|
||||
action.id == "{{res}}"{% if Auth_self_keep|length == loop.index %}){ {% else %} ||{% endif %}
|
||||
{% endfor %} return polkit.Result.AUTH_SELF_KEEP;
|
||||
}
|
||||
});
|
||||
{% endif %}{% if Auth_admin_keep|length %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if ({% for res in Auth_admin_keep -%}
|
||||
action.id == "{{res}}"{% if Auth_admin_keep|length == loop.index %}){ {% else %} ||{% endif %}
|
||||
{% endfor %} return polkit.Result.AUTH_ADMIN_KEEP;
|
||||
}
|
||||
});
|
||||
|
||||
{% endif %}
|
@ -1,63 +0,0 @@
|
||||
{#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2022 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#}
|
||||
|
||||
{% if No|length %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if ({% for res in No -%}
|
||||
action.id == "{{res}}" {% if No|length == loop.index %}&&{% else %}||{% endif %}
|
||||
{% endfor %}subject.user == "{{User}}") {
|
||||
return polkit.Result.NO;
|
||||
}
|
||||
});{% endif %}{% if Yes|length %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if ({% for res in Yes -%}
|
||||
action.id == "{{res}}" {% if Yes|length == loop.index %}&&{% else %}||{% endif %}
|
||||
{% endfor %}subject.user == "{{User}}") {
|
||||
return polkit.Result.YES;
|
||||
}
|
||||
});{% endif %}{% if Auth_self|length %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if ({% for res in Auth_self -%}
|
||||
action.id == "{{res}}" {% if Auth_self|length == loop.index %}&&{% else %}||{% endif %}
|
||||
{% endfor %}subject.user == "{{User}}") {
|
||||
return polkit.Result.AUTH_SELF;
|
||||
}
|
||||
});{% endif %}{% if Auth_admin|length %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if ({% for res in Auth_admin -%}
|
||||
action.id == "{{res}}" {% if Auth_admin|length == loop.index %}&&{% else %}||{% endif %}
|
||||
{% endfor %}subject.user == "{{User}}") {
|
||||
return polkit.Result.AUTH_ADMIN;
|
||||
}
|
||||
});{% endif %}{% if Auth_self_keep|length %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if ({% for res in Auth_self_keep -%}
|
||||
action.id == "{{res}}" {% if Auth_self_keep|length == loop.index %}&&{% else %}||{% endif %}
|
||||
{% endfor %}subject.user == "{{User}}") {
|
||||
return polkit.Result.AUTH_SELF_KEEP;
|
||||
}
|
||||
});{% endif %}{% if Auth_admin_keep|length %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if ({% for res in Auth_admin_keep -%}
|
||||
action.id == "{{res}}" {% if Auth_admin_keep|length == loop.index %}&&{% else %}||{% endif %}
|
||||
{% endfor %}subject.user == "{{User}}") {
|
||||
return polkit.Result.AUTH_ADMIN_KEEP;
|
||||
}
|
||||
});
|
||||
{% endif %}
|
@ -1,29 +0,0 @@
|
||||
{#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#}
|
||||
|
||||
{% if Deny_All == 1 %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if ((action.id == "org.freedesktop.udisks2.filesystem-mount" ||
|
||||
action.id == "org.freedesktop.udisks2.filesystem-mount-system" ||
|
||||
action.id == "org.freedesktop.udisks2.filesystem-mount-other-seat") &&
|
||||
subject.user == "{{User}}" ) {
|
||||
return polkit.Result.NO;
|
||||
}
|
||||
});
|
||||
{% endif %}
|
@ -1,63 +0,0 @@
|
||||
{#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2022 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#}
|
||||
|
||||
{% if No|length %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if ({% for res in No -%}
|
||||
action.id == "{{res}}"{% if No|length == loop.index %}){ {% else %} ||{% endif %}
|
||||
{% endfor %} return polkit.Result.NO;
|
||||
}
|
||||
});
|
||||
{% endif %}{% if Yes|length %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if ({% for res in Yes -%}
|
||||
action.id == "{{res}}"{% if Yes|length == loop.index %}){ {% else %} ||{% endif %}
|
||||
{% endfor %} return polkit.Result.YES;
|
||||
}
|
||||
});
|
||||
{% endif %}{% if Auth_self|length %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if ({% for res in Auth_self -%}
|
||||
action.id == "{{res}}"{% if Auth_self|length == loop.index %}){ {% else %} ||{% endif %}
|
||||
{% endfor %} return polkit.Result.AUTH_SELF;
|
||||
}
|
||||
});
|
||||
{% endif %}{% if Auth_admin|length %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if ({% for res in Auth_admin -%}
|
||||
action.id == "{{res}}"{% if Auth_admin|length == loop.index %}){ {% else %} ||{% endif %}
|
||||
{% endfor %} return polkit.Result.AUTH_ADMIN;
|
||||
}
|
||||
});
|
||||
{% endif %}{% if Auth_self_keep|length %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if ({% for res in Auth_self_keep -%}
|
||||
action.id == "{{res}}"{% if Auth_self_keep|length == loop.index %}){ {% else %} ||{% endif %}
|
||||
{% endfor %} return polkit.Result.AUTH_SELF_KEEP;
|
||||
}
|
||||
});
|
||||
{% endif %}{% if Auth_admin_keep|length %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if ({% for res in Auth_admin_keep -%}
|
||||
action.id == "{{res}}"{% if Auth_admin_keep|length == loop.index %}){ {% else %} ||{% endif %}
|
||||
{% endfor %} return polkit.Result.AUTH_ADMIN_KEEP;
|
||||
}
|
||||
});
|
||||
|
||||
{% endif %}
|
@ -17,7 +17,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#}
|
||||
|
||||
{% if Deny_All == 1 %}
|
||||
{% if Deny_All == '1' %}
|
||||
polkit.addRule(function (action, subject) {
|
||||
if (action.id == "org.freedesktop.udisks2.filesystem-mount" ||
|
||||
action.id == "org.freedesktop.udisks2.filesystem-mount-system" ||
|
@ -1,20 +0,0 @@
|
||||
{#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#}
|
||||
{{ home_dir }}/{{mntTarget}} {{ mount_file }} -t 120 --browse
|
||||
|
@ -1,20 +0,0 @@
|
||||
{#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2022 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#}
|
||||
{{ home_dir }}/.{{mntTarget}} {{ mount_file }} -t 120
|
||||
|
@ -1,25 +0,0 @@
|
||||
{#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#}
|
||||
{% if login %}
|
||||
username={{ login }}
|
||||
{% endif %}
|
||||
{% if password %}
|
||||
password={{ password }}
|
||||
{% endif %}
|
||||
|
@ -1,27 +0,0 @@
|
||||
{#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2022 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#}
|
||||
{%- for drv in drives %}
|
||||
{% if (drv.thisDrive != 'HIDE') %}
|
||||
{% if drv.label %}
|
||||
"{{ drv.label }}" -fstype=cifs,cruid=$USER,sec=krb5,noperm,cifsacl :{{ drv.path }}
|
||||
{% else %}
|
||||
"{{ drv.dir }}" -fstype=cifs,cruid=$USER,sec=krb5,noperm,cifsacl :{{ drv.path }}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
@ -1,27 +0,0 @@
|
||||
{#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2022 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#}
|
||||
{%- for drv in drives %}
|
||||
{% if (drv.thisDrive == 'HIDE') %}
|
||||
{% if drv.label %}
|
||||
"{{ drv.label }}" -fstype=cifs,cruid=$USER,sec=krb5,noperm,cifsacl :{{ drv.path }}
|
||||
{% else %}
|
||||
"{{ drv.dir }}" -fstype=cifs,cruid=$USER,sec=krb5,noperm,cifsacl :{{ drv.path }}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
@ -1,39 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import unittest
|
||||
|
||||
from frontend.appliers.rpm import rpm
|
||||
|
||||
class PackageTestCase(unittest.TestCase):
|
||||
'''
|
||||
Semi-integrational tests for packages installation/removing
|
||||
'''
|
||||
def test_package_not_exist(self):
|
||||
packages_for_install = 'dummy1 dummy2'
|
||||
packages_for_remove = 'dummy3'
|
||||
|
||||
test_rpm = rpm(packages_for_install, packages_for_remove)
|
||||
test_rpm.apply()
|
||||
|
||||
def test_install_remove_same_package(self):
|
||||
packages_for_install = 'gotop'
|
||||
packages_for_remove = 'gotop'
|
||||
|
||||
test_rpm = rpm(packages_for_install, packages_for_remove)
|
||||
test_rpm.apply()
|
@ -1,3 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Printers clsid="{1F577D12-3D1B-471e-A1B7-060317597B9C}"><PortPrinter clsid="{C3A739D2-4A44-401e-9F9D-88E5E77DFB3E}" name="10.64.128.250" status="10.64.128.250" image="0" changed="2020-01-23 11:48:07" uid="{88D998C2-9875-4278-A607-EC828839EFCE}" userContext="1" bypassErrors="1"><Properties lprQueue="" snmpCommunity="public" protocol="PROTOCOL_RAWTCP_TYPE" portNumber="9100" doubleSpool="0" snmpEnabled="0" snmpDevIndex="1" ipAddress="10.64.128.250" action="C" location="" localName="printer" comment="" default="1" skipLocal="0" useDNS="0" path="\\prnt" deleteAll="0"/><Filters><FilterGroup bool="AND" not="0" name="DOMAIN\Domain Users" sid="S-1-5-21-3359553909-270469630-9462315-513" userContext="1" primaryGroup="0" localGroup="0"/></Filters></PortPrinter>
|
||||
</Printers>
|
Binary file not shown.
Binary file not shown.
@ -1,3 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<ScheduledTasks clsid="{CC63F200-7309-4ba0-B154-A71CD118DBCC}"><TaskV2 clsid="{D8896631-B747-47a7-84A6-C155337F3BC8}" name="mytask" image="2" changed="2020-01-24 13:06:25" uid="{0DBF3CAA-3DCF-4AAA-A52F-82B010B35380}"><Properties action="U" name="mytask" runAs="%LogonDomain%\%LogonUser%" logonType="InteractiveToken"><Task version="1.3"><RegistrationInfo><Author>DOMAIN\samba</Author><Description></Description></RegistrationInfo><Principals><Principal id="Author"><UserId>%LogonDomain%\%LogonUser%</UserId><LogonType>InteractiveToken</LogonType><RunLevel>HighestAvailable</RunLevel></Principal></Principals><Settings><IdleSettings><Duration>PT10M</Duration><WaitTimeout>PT1H</WaitTimeout><StopOnIdleEnd>true</StopOnIdleEnd><RestartOnIdle>false</RestartOnIdle></IdleSettings><MultipleInstancesPolicy>IgnoreNew</MultipleInstancesPolicy><DisallowStartIfOnBatteries>true</DisallowStartIfOnBatteries><StopIfGoingOnBatteries>true</StopIfGoingOnBatteries><AllowHardTerminate>true</AllowHardTerminate><AllowStartOnDemand>true</AllowStartOnDemand><Enabled>true</Enabled><Hidden>false</Hidden><ExecutionTimeLimit>P3D</ExecutionTimeLimit><Priority>7</Priority></Settings><Triggers><CalendarTrigger><StartBoundary>2020-01-24T14:59:48</StartBoundary><Enabled>true</Enabled><ScheduleByDay><DaysInterval>1</DaysInterval></ScheduleByDay></CalendarTrigger></Triggers><Actions><Exec><Command>C:\Program Files (x86)\Google\Chrome\Application\chrome.exe</Command></Exec></Actions></Task></Properties></TaskV2>
|
||||
</ScheduledTasks>
|
@ -1,42 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import unittest
|
||||
import unittest.mock
|
||||
|
||||
import os
|
||||
|
||||
import util.paths
|
||||
import json
|
||||
|
||||
|
||||
class GptDrivesTestCase(unittest.TestCase):
|
||||
@unittest.mock.patch('util.paths.cache_dir')
|
||||
def test_drive_reader(self, cdir_mock):
|
||||
'''
|
||||
Test functionality to read objects from Shortcuts.xml
|
||||
'''
|
||||
cdir_mock.return_value = '/var/cache/gpupdate'
|
||||
|
||||
import gpt.drives
|
||||
testdata_path = '{}/test/gpt/data/Drives.xml'.format(os.getcwd())
|
||||
drvs = gpt.drives.read_drives(testdata_path)
|
||||
|
||||
json_obj = json.loads(drvs[0].to_json())
|
||||
self.assertIsNotNone(json_obj['drive'])
|
||||
|
@ -1,32 +0,0 @@
|
||||
#
|
||||
# GPOA - GPO Applier for Linux
|
||||
#
|
||||
# Copyright (C) 2019-2020 BaseALT Ltd.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
import unittest
|
||||
|
||||
from util.xdg import (
|
||||
xdg_get_desktop_user
|
||||
)
|
||||
|
||||
class XDGTestCase(unittest.TestCase):
|
||||
def test_get_desktop_dir(self):
|
||||
print('Machine desktop:')
|
||||
print(xdg_get_desktop_user(None))
|
||||
print('Users desktop:')
|
||||
print(xdg_get_desktop_user('nir'))
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user