1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-01-18 10:04:20 +03:00

lvmdbusd: Make lvm shell read more robust

Make sure JSON is correct before we stop trying to read.
This commit is contained in:
Tony Asleson 2016-11-29 11:07:21 -06:00
parent b0bda09005
commit 25b5413f89

View File

@ -42,18 +42,22 @@ def _quote_arg(arg):
class LVMShellProxy(object): class LVMShellProxy(object):
# Read until we get prompt back and a result
# @param: no_output Caller expects no output to report FD
# Returns stdout, report, stderr (report is JSON!)
def _read_until_prompt(self, no_output=False): def _read_until_prompt(self, no_output=False):
stdout = "" stdout = ""
report = "" report = ""
stderr = "" stderr = ""
keep_reading = True keep_reading = True
extra_passes = 2 extra_passes = 3
report_json = {}
prev_report_len = 0
# Try reading from all FDs to prevent one from filling up and causing # Try reading from all FDs to prevent one from filling up and causing
# a hang. We were assuming that we won't get the lvm prompt back # a hang. Keep reading until we get the prompt back and the report
# until we have already received all the output from stderr and the # FD does not contain valid JSON
# report descriptor too, this is an incorrect assumption. Lvm will
# return the prompt before we get the report!
while keep_reading: while keep_reading:
try: try:
rd_fd = [ rd_fd = [
@ -87,26 +91,39 @@ class LVMShellProxy(object):
raise Exception(self.lvm_shell.returncode, "%s" % stderr) raise Exception(self.lvm_shell.returncode, "%s" % stderr)
if stdout.endswith(SHELL_PROMPT): if stdout.endswith(SHELL_PROMPT):
# It appears that lvm doesn't write the report and flush
# that before it writes the shell prompt as occasionally
# we get the prompt with no report.
if no_output: if no_output:
keep_reading = False keep_reading = False
else: else:
# Most of the time we have data, if we have none lets cur_report_len = len(report)
# take another spin and hope we get it. if cur_report_len != 0:
if len(report) != 0: # Only bother to parse if we have more data
if prev_report_len != cur_report_len:
prev_report_len = cur_report_len
# Parse the JSON if it's good we are done,
# if not we will try to read some more.
try:
report_json = json.loads(report)
keep_reading = False keep_reading = False
except ValueError:
pass
else: else:
log_error("RACE!", 'bg_black', 'fg_light_red')
if keep_reading:
extra_passes -= 1 extra_passes -= 1
if extra_passes <= 0: if extra_passes <= 0:
keep_reading = False if len(report):
raise ValueError("Invalid json: %s" %
report)
else:
raise ValueError(
"lvm returned no JSON output!")
except IOError as ioe: except IOError as ioe:
log_debug(str(ioe)) log_debug(str(ioe))
pass pass
return stdout, report, stderr return stdout, report_json, stderr
def _write_cmd(self, cmd): def _write_cmd(self, cmd):
cmd_bytes = bytes(cmd, "utf-8") cmd_bytes = bytes(cmd, "utf-8")
@ -169,15 +186,11 @@ class LVMShellProxy(object):
self._write_cmd('lastlog\n') self._write_cmd('lastlog\n')
# read everything from the STDOUT to the next prompt # read everything from the STDOUT to the next prompt
stdout, report, stderr = self._read_until_prompt() stdout, report_json, stderr = self._read_until_prompt()
if 'log' in report_json:
try:
log = json.loads(report)
if 'log' in log:
error_msg = "" error_msg = ""
# Walk the entire log array and build an error string # Walk the entire log array and build an error string
for log_entry in log['log']: for log_entry in report_json['log']:
if log_entry['log_type'] == "error": if log_entry['log_type'] == "error":
if error_msg: if error_msg:
error_msg += ', ' + log_entry['log_message'] error_msg += ', ' + log_entry['log_message']
@ -187,15 +200,10 @@ class LVMShellProxy(object):
return error_msg return error_msg
return 'No error reason provided! (missing "log" section)' return 'No error reason provided! (missing "log" section)'
except ValueError:
log_error("Invalid JSON returned from LVM")
log_error("BEGIN>>\n%s\n<<END" % report)
return "Invalid JSON returned from LVM when retrieving exit code"
def call_lvm(self, argv, debug=False): def call_lvm(self, argv, debug=False):
rc = 1 rc = 1
error_msg = "" error_msg = ""
json_result = ""
if self.lvm_shell.poll(): if self.lvm_shell.poll():
raise Exception( raise Exception(
@ -210,27 +218,21 @@ class LVMShellProxy(object):
self._write_cmd(cmd) self._write_cmd(cmd)
# read everything from the STDOUT to the next prompt # read everything from the STDOUT to the next prompt
stdout, report, stderr = self._read_until_prompt() stdout, report_json, stderr = self._read_until_prompt()
# Parse the report to see what happened # Parse the report to see what happened
if report and len(report): if 'log' in report_json:
try: if report_json['log'][-1:][0]['log_ret_code'] == '1':
json_result = json.loads(report)
if 'log' in json_result:
if json_result['log'][-1:][0]['log_ret_code'] == '1':
rc = 0 rc = 0
else: else:
error_msg = self.get_error_msg() error_msg = self.get_error_msg()
except ValueError:
# Bubble up the invalid json.
error_msg = "Invalid json %s" % report
if debug or rc != 0: if debug or rc != 0:
log_error(('CMD: %s' % cmd)) log_error(('CMD: %s' % cmd))
log_error(("EC = %d" % rc)) log_error(("EC = %d" % rc))
log_error(("ERROR_MSG=\n %s\n" % error_msg)) log_error(("ERROR_MSG=\n %s\n" % error_msg))
return rc, json_result, error_msg return rc, report_json, error_msg
def exit_shell(self): def exit_shell(self):
try: try: