diff --git a/api/api_helpers.py b/api/api_helpers.py index c8d5848b2..6d97ea62e 100644 --- a/api/api_helpers.py +++ b/api/api_helpers.py @@ -331,7 +331,7 @@ def get_phase_stats(ids): query = """ SELECT a.phase, a.metric, a.detail_name, a.value, a.type, a.max_value, a.min_value, a.unit, - b.uri, c.description, b.filename, b.commit_hash, b.branch + b.uri, c.description, b.filename, b.commit_hash, b.branch, b.monitor_run FROM phase_stats as a LEFT JOIN runs as b on b.id = a.run_id LEFT JOIN machines as c on c.id = b.machine_id @@ -452,12 +452,14 @@ def get_phase_stats_object(phase_stats, case): for phase_stat in phase_stats: [ phase, metric_name, detail_name, value, metric_type, max_value, min_value, unit, - repo, machine_description, filename, commit_hash, branch + repo, machine_description, filename, commit_hash, branch, monitor_run ] = phase_stat # unpack phase = phase.split('_', maxsplit=1)[1] # remove the 001_ prepended stuff again, which is only for ordering - if case == 'Repository': + if monitor_run: + key = 'monitor' + elif case == 'Repository': key = repo # Case D : RequirementsEngineering Case elif case == 'Branch': key = branch # Case C_3 : SoftwareDeveloper Case diff --git a/docker/structure.sql b/docker/structure.sql index c8dedcbff..91d8072b7 100644 --- a/docker/structure.sql +++ b/docker/structure.sql @@ -67,6 +67,7 @@ CREATE TABLE runs ( logs text, invalid_run text, failed boolean DEFAULT false, + monitor_run boolean DEFAULT false, created_at timestamp with time zone DEFAULT now(), updated_at timestamp with time zone ); diff --git a/metric_providers/base.py b/metric_providers/base.py index d2fec7aab..1e29db3b8 100644 --- a/metric_providers/base.py +++ b/metric_providers/base.py @@ -24,6 +24,8 @@ def __init__( sudo=False, disable_buffer=True, skip_check=False, + rootless=False, + monitor=False, ): self._metric_name = metric_name self._metrics = metrics @@ -34,7 +36,8 @@ def __init__( self._sudo = sudo self._has_started = False self._disable_buffer = disable_buffer - self._rootless = None + self._monitor = monitor + self._rootless = rootless self._skip_check = skip_check self._tmp_folder = '/tmp/green-metrics-tool' @@ -159,14 +162,18 @@ def start_profiling(self, containers=None): call_string += ' ' # space at start call_string += ' '.join(self._extra_switches) + if self._monitor is True: + call_string += ' --monitor ' # This needs refactoring see https://github.com/green-coding-berlin/green-metrics-tool/issues/45 - if (self._metrics.get('container_id') is not None) and (containers is not None): + elif (self._metrics.get('container_id') is not None) and (containers is not None): call_string += ' -s ' call_string += ','.join(containers.keys()) if self._rootless is True: call_string += ' --rootless ' + + call_string += f" > {self._filename}" if platform.system() == "Linux": diff --git a/metric_providers/cpu/time/cgroup/container/provider.py b/metric_providers/cpu/time/cgroup/container/provider.py index f377f73bc..09c6e336a 100644 --- a/metric_providers/cpu/time/cgroup/container/provider.py +++ b/metric_providers/cpu/time/cgroup/container/provider.py @@ -3,7 +3,7 @@ from metric_providers.base import BaseMetricProvider class CpuTimeCgroupContainerProvider(BaseMetricProvider): - def __init__(self, resolution, rootless=False, skip_check=False): + def __init__(self, resolution, rootless=False, skip_check=False, monitor=False): super().__init__( metric_name='cpu_time_cgroup_container', metrics={'time': int, 'value': int, 'container_id': str}, @@ -11,5 +11,6 @@ def __init__(self, resolution, rootless=False, skip_check=False): unit='us', current_dir=os.path.dirname(os.path.abspath(__file__)), skip_check=skip_check, + rootless=rootless, + monitor=monitor, ) - self._rootless = rootless diff --git a/metric_providers/cpu/utilization/cgroup/container/provider.py b/metric_providers/cpu/utilization/cgroup/container/provider.py index 068756f30..3a2fec6fe 100644 --- a/metric_providers/cpu/utilization/cgroup/container/provider.py +++ b/metric_providers/cpu/utilization/cgroup/container/provider.py @@ -3,13 +3,14 @@ from metric_providers.base import BaseMetricProvider class CpuUtilizationCgroupContainerProvider(BaseMetricProvider): - def __init__(self, resolution, rootless=False, skip_check=False): + def __init__(self, resolution, rootless=False, skip_check=False, monitor=False): super().__init__( metric_name='cpu_utilization_cgroup_container', metrics={'time': int, 'value': int, 'container_id': str}, resolution=resolution, unit='Ratio', current_dir=os.path.dirname(os.path.abspath(__file__)), - skip_check = skip_check, + skip_check=skip_check, + rootless=rootless, + monitor=monitor, ) - self._rootless = rootless diff --git a/metric_providers/cpu/utilization/cgroup/container/source.c b/metric_providers/cpu/utilization/cgroup/container/source.c index f30f15452..95c5b1680 100644 --- a/metric_providers/cpu/utilization/cgroup/container/source.c +++ b/metric_providers/cpu/utilization/cgroup/container/source.c @@ -6,10 +6,12 @@ #include #include // for strtok #include +#include typedef struct container_t { // struct is a specification and this static makes no sense here char path[BUFSIZ]; char *id; + int active; } container_t; // All variables are made static, because we believe that this will @@ -40,27 +42,57 @@ static long int read_cpu_cgroup(FILE *fd) { return cpu_usage; } -static long int get_cpu_stat(char* filename, int mode) { - FILE* fd = NULL; - long int result=-1; +static int scan_directory(container_t** containers, int rootless_mode) { + struct dirent* entry; + size_t docker_prefix_len = strlen("docker-"); + size_t scope_suffix_len = strlen(".scope"); + int length = 0; + DIR* dir = NULL; + char my_path[BUFSIZ] = ""; - fd = fopen(filename, "r"); - if ( fd == NULL) { - fprintf(stderr, "Error - Could not open path for reading: %s. Maybe the container is not running anymore? Are you using --rootless mode? Errno: %d\n", filename, errno); - exit(1); - } - if(mode == 1) { - result = read_cpu_cgroup(fd); - // printf("Got cgroup: %ld", result); + if(rootless_mode) { + sprintf(my_path, "/sys/fs/cgroup/user.slice/user-%d.slice/user@%d.service/user.slice/", user_id, user_id); } else { - result = read_cpu_proc(fd); - // printf("Got /proc/stat: %ld", result); + sprintf(my_path, "/sys/fs/cgroup/system.slice/"); } - fclose(fd); - return result; -} + dir = opendir(my_path); + if (!dir) { + fprintf(stderr,"Unable to scan directory for containers. Could not find folder: %s\n", my_path); + exit(-1); + } + + *containers = malloc(sizeof(container_t)); + //printf("old length: %d\n", length); + + while ((entry = readdir(dir)) != NULL) { + // Check if the entry is a directory and matches the format + if (entry->d_type == DT_DIR && + strstr(entry->d_name, "docker-") == entry->d_name && + strstr(entry->d_name + docker_prefix_len, ".scope") != NULL && + strcmp(entry->d_name + strlen(entry->d_name) - scope_suffix_len, ".scope") == 0) { + // printf("Entry %s\n", entry->d_name); + length++; + *containers = realloc(*containers, length * sizeof(container_t)); + (*containers)[length-1].id = strdup(entry->d_name); + (*containers)[length-1].active = 1; + if(rootless_mode) { + sprintf((*containers)[length-1].path, + "/sys/fs/cgroup/user.slice/user-%d.slice/user@%d.service/user.slice/%s/cpu.stat", + user_id, user_id, entry->d_name); + } else { + sprintf((*containers)[length-1].path, + "/sys/fs/cgroup/system.slice/%s/cpu.stat", + entry->d_name); + } + } + } + //printf("Found new length: %d\n", length); + + closedir(dir); + return length; +} static int output_stats(container_t* containers, int length) { @@ -69,28 +101,58 @@ static int output_stats(container_t* containers, int length) { long int cpu_readings_after[length]; long int container_reading; + FILE *fd = NULL; + struct timeval now; int i; - // Get Energy Readings, set timestamp mark gettimeofday(&now, NULL); + for(i=0; i 0: raise MetricProviderConfigurationError('Another instance of powermetrics is already running on the system!\n' diff --git a/migrations/2024_05_15_monitor_mode.sql b/migrations/2024_05_15_monitor_mode.sql new file mode 100644 index 000000000..4f06e55c9 --- /dev/null +++ b/migrations/2024_05_15_monitor_mode.sql @@ -0,0 +1 @@ +ALTER TABLE "runs" ADD COLUMN "monitor_run" boolean DEFAULT false; diff --git a/monitor.py b/monitor.py new file mode 100644 index 000000000..ec02c6770 --- /dev/null +++ b/monitor.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import faulthandler +faulthandler.enable() # will catch segfaults and write to stderr + +from lib.venv_checker import check_venv +check_venv() # this check must even run before __main__ as imports might not get resolved + +import subprocess +import os +import sys +import time +from pathlib import Path +import shutil + +CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) + +from lib import error_helpers +from lib.terminal_colors import TerminalColors +from lib.global_config import GlobalConfig + + +from runner import Runner + +class Monitor(Runner): + + def __init__(self, + name, debug_mode=False, + skip_system_checks=False, + verbose_provider_boot=False, + ): + + super().__init__(name=name, uri='http://metrics.green-coding.internal:9142/', uri_type=None, filename='not-set', branch='not-set', + debug_mode=debug_mode, skip_system_checks=skip_system_checks, verbose_provider_boot=verbose_provider_boot + ) + + def monitor(self): + ''' + The run function is just a wrapper for the intended sequential flow of a GMT run. + Mainly designed to call the functions individually for testing, but also + if the flow ever needs to repeat certain blocks. + + The runner is to be thought of as a state machine. + + Methods thus will behave differently given the runner was instantiated with different arguments. + + ''' + runtime_phase_started = False + try: + config = GlobalConfig().config + self.check_system('start') + # self.initialize_folder(self._tmp_folder) + # self.checkout_repository() + self.initialize_run() + #self.initial_parse() + self.import_metric_providers(monitor=True) + #self.populate_image_names() + self.prepare_docker() + # self.check_running_containers() + # self.remove_docker_images() + # self.download_dependencies() + self.register_machine_id() + self.update_and_insert_specs() + if self._debugger.active: + self._debugger.pause('Initial load complete. Waiting to start metric providers') + + self.start_metric_providers(allow_other=True, allow_container=False) + if self._debugger.active: + self._debugger.pause('metric-providers (non-container) start complete. Waiting to start measurement') + + self.custom_sleep(config['measurement']['idle-time-start']) + + self.start_measurement() + + self.start_metric_providers(allow_container=True, allow_other=False) + + + + self.start_phase('[RUNTIME]', transition=False) + runtime_phase_started = True + # TODO: Trigger + + print('Monitoring active ... press CTRL+C to stop and save data.') + while True: + time.sleep(3600) + + + + except KeyboardInterrupt as exc: + raise exc + except BaseException as exc: + self.add_to_log(exc.__class__.__name__, str(exc)) + self.set_run_failed() + raise exc + finally: + try: + + if runtime_phase_started: + self.end_phase('[RUNTIME]') + self.end_measurement() + self.store_phases() + self.update_start_and_end_times() + except BaseException as exc: + self.add_to_log(exc.__class__.__name__, str(exc)) + raise exc + finally: + self._handle_except() + + return self._run_id + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('--name', type=str, help='A name which will be stored to the database to discern this run from others') + parser.add_argument('--config-override', type=str, help='Override the configuration file with the passed in yml file. Must be located in the same directory as the regular configuration file. Pass in only the name.') + parser.add_argument('--file-cleanup', action='store_true', help='Delete all temporary files that the runner produced') + parser.add_argument('--debug', action='store_true', help='Activate steppable debug mode') + parser.add_argument('--print-logs', action='store_true', help='Prints the container and process logs to stdout') + parser.add_argument('--skip-system-checks', action='store_true', help='Skip checking the system if the GMT can run') + parser.add_argument('--verbose-provider-boot', action='store_true', help='Boot metric providers gradually') + + args = parser.parse_args() + + + if args.name is None: + parser.print_help() + error_helpers.log_error('Please supply --name') + sys.exit(1) + + + if args.config_override is not None: + if args.config_override[-4:] != '.yml': + parser.print_help() + error_helpers.log_error('Config override file must be a yml file') + sys.exit(1) + if not Path(f"{CURRENT_DIR}/{args.config_override}").is_file(): + parser.print_help() + error_helpers.log_error(f"Could not find config override file on local system. Please double check: {CURRENT_DIR}/{args.config_override}") + sys.exit(1) + GlobalConfig(config_name=args.config_override) + + runner = Monitor( + args.name, + debug_mode=args.debug, + skip_system_checks=args.skip_system_checks, + ) + + # Using a very broad exception makes sense in this case as we have excepted all the specific ones before + #pylint: disable=broad-except + try: + runner.monitor() # Start main code + + # this code should live at a different position. + # From a user perspective it makes perfect sense to run both jobs directly after each other + # In a cloud setup it however makes sense to free the measurement machine as soon as possible + # So this code should be individually callable, separate from the monitor + + except KeyboardInterrupt: + print(TerminalColors.HEADER, '\nCalculating and storing phases data. This can take a couple of seconds ...', TerminalColors.ENDC) + + # get all the metrics from the measurements table grouped by metric + # loop over them issuing separate queries to the DB + from tools.phase_stats import build_and_store_phase_stats + + print("Run id is", runner._run_id) + print("Aggregating and uploading phase_stats. This can take a while for longer runs ...") + build_and_store_phase_stats(runner._run_id, runner._sci) + + if not runner._dev_no_optimizations: + import optimization_providers.base + print(TerminalColors.HEADER, '\nImporting optimization reporters ...', TerminalColors.ENDC) + optimization_providers.base.import_reporters() + + print(TerminalColors.HEADER, '\nRunning optimization reporters ...', TerminalColors.ENDC) + + optimization_providers.base.run_reporters(runner._run_id, runner._tmp_folder, runner.get_optimizations_ignore()) + + if args.file_cleanup: + shutil.rmtree(runner._tmp_folder) + + print(TerminalColors.OKGREEN,'\n\n####################################################################################') + print(f"Please access your report on the URL {GlobalConfig().config['cluster']['metrics_url']}/stats.html?id={runner._run_id}") + print('####################################################################################\n\n', TerminalColors.ENDC) + + except FileNotFoundError as e: + error_helpers.log_error('File or executable not found', exception=e, run_id=runner._run_id) + except subprocess.CalledProcessError as e: + error_helpers.log_error('Command failed', stdout=e.stdout, stderr=e.stderr, run_id=runner._run_id) + except RuntimeError as e: + error_helpers.log_error('RuntimeError occured in runner.py', exception=e, run_id=runner._run_id) + except BaseException as e: + error_helpers.log_error('Base exception occured in runner.py', exception=e, run_id=runner._run_id) + finally: + if args.print_logs: + for container_id_outer, std_out in runner.get_logs().items(): + print(f"Container logs of '{container_id_outer}':") + print(std_out) + print('\n-----------------------------\n') diff --git a/optimization_providers/base.py b/optimization_providers/base.py index ef41472a0..0e7ef14f9 100644 --- a/optimization_providers/base.py +++ b/optimization_providers/base.py @@ -83,7 +83,7 @@ async def fetch_url(session, url): run, measurements, network, notes, phase_stats = await asyncio.gather(*tasks) #pylint: disable=no-member - return orjson.loads(run)['data'], orjson.loads(measurements)['data'], orjson.loads(network)['data'], orjson.loads(notes)['data'], orjson.loads(phase_stats)['data'] + return orjson.loads(run)['data'], orjson.loads(measurements)['data'], orjson.loads(network).get('data', {}), orjson.loads(notes).get('data', {}), orjson.loads(phase_stats).get('data', {}) #pylint: disable=dangerous-default-value def run_reporters(run_id, repo_path, optimizations_ignore=[]): diff --git a/optimization_providers/durations/container.py b/optimization_providers/durations/container.py index 3d1fd0a62..f456d344c 100644 --- a/optimization_providers/durations/container.py +++ b/optimization_providers/durations/container.py @@ -1,5 +1,4 @@ from optimization_providers.base import Criticality, register_reporter -from lib import error_helpers REPORTER_NAME = 'container-timings' REPORTER_ICON = 'clock' @@ -11,10 +10,13 @@ @register_reporter('container-build-time', Criticality.INFO, REPORTER_NAME, REPORTER_ICON, req_providers =[]) def container_build_time(self, run, measurements, repo_path, network, notes, phases): + if len(run['phases']) < 2 or run['phases'][1] != '[INSTALLATION]': + self.add_optimization( + 'Container build duration could not be analyzed', + 'INSTALLATION phase was not present' + ) + return installation_phase = run['phases'][1] - if installation_phase['name'] != '[INSTALLATION]': - error_helpers.log_error('Phase mapping in optimizations was not as expected', phases=run['phases'], run_id=run['id']) - raise RuntimeError('Phase mapping in optimizations was not as expected') duration = (installation_phase['end'] - installation_phase['start'])/1_000_000 # time is in microseconds @@ -28,10 +30,13 @@ def container_build_time(self, run, measurements, repo_path, network, notes, pha @register_reporter('container-boot-time', Criticality.INFO, REPORTER_NAME, REPORTER_ICON, req_providers =[]) def container_boot_time(self, run, measurements, repo_path, network, notes, phases): + if len(run['phases']) < 3 or run['phases'][2] != '[BOOT]': + self.add_optimization( + 'Container boot duration could not be analyzed', + 'BOOT phase was not present' + ) + return boot_phase = run['phases'][2] - if boot_phase['name'] != '[BOOT]': - error_helpers.log_error('Phase mapping in optimizations was not as expected', phases=run['phases'], run_id=run['id']) - raise RuntimeError('Phase mapping in optimizations was not as expected') duration = (boot_phase['end'] - boot_phase['start'])/1_000_000 # time is in microseconds diff --git a/optimization_providers/resources/utilization.py b/optimization_providers/resources/utilization.py index ab4f8b3ca..8ca8fac92 100644 --- a/optimization_providers/resources/utilization.py +++ b/optimization_providers/resources/utilization.py @@ -38,7 +38,7 @@ def container_memory_utilization(self, run, measurements, repo_path, network, no if x := d.get('deploy', {}).get('resources', {}).get('limits', {}).get('memory', None): mem[s] = memory_to_bytes(x) - for service, measurement_stats in phases.get('data').get('[RUNTIME]').get('memory_total_cgroup_container').get('data').items(): + for service, measurement_stats in phases.get('data', {}).get('[RUNTIME]', {}).get('memory_total_cgroup_container', {}).get('data', {}).items(): if not service in mem: self.add_optimization( f"You are not using Memory limits definitions on {service}", @@ -46,7 +46,7 @@ def container_memory_utilization(self, run, measurements, repo_path, network, no ) continue - data = measurement_stats.get('data') + data = measurement_stats.get('data', {}) first_item = next(iter(data)) actual_mem_max = data[first_item].get('max', None) if not actual_mem_max: @@ -67,12 +67,12 @@ def container_memory_utilization(self, run, measurements, repo_path, network, no def container_cpu_utilization(self, run, measurements, repo_path, network, notes, phases): cpus = {} - for s, d in run.get('usage_scenario').get('services').items(): + for s, d in run.get('usage_scenario', {}).get('services', {}).items(): if x := d.get('deploy', {}).get('resources', {}).get('limits', {}).get('cpus', None): cpus[s] = x - for service, measurement_stats in phases.get('data').get('[RUNTIME]').get('cpu_utilization_cgroup_container').get('data').items(): + for service, measurement_stats in phases.get('data', {}).get('[RUNTIME]', {}).get('cpu_utilization_cgroup_container', {}).get('data', {}).items(): if not service in cpus: self.add_optimization( f"You are not using CPU limits definitions on {service}", @@ -80,7 +80,7 @@ def container_cpu_utilization(self, run, measurements, repo_path, network, notes ) continue - data = measurement_stats.get('data') + data = measurement_stats.get('data', {}) first_item = next(iter(data)) actual_cpu_mean = data[first_item].get('mean', None) diff --git a/runner.py b/runner.py index 7cfcbe517..f7ca2f03a 100755 --- a/runner.py +++ b/runner.py @@ -157,12 +157,15 @@ def custom_sleep(self, sleep_time): def initialize_run(self): # We issue a fetch_one() instead of a query() here, cause we want to get the RUN_ID + monitor_run = self.__class__.__name__ == 'Monitor' # we also update the branch here again, as this might not be main in case of local filesystem self._run_id = DB().fetch_one(""" - INSERT INTO runs (job_id, name, uri, email, branch, filename, commit_hash, commit_timestamp, runner_arguments, created_at) - VALUES (%s, %s, %s, 'manual', %s, %s, %s, %s, %s, NOW()) + INSERT INTO runs (job_id, name, uri, email, branch, filename, commit_hash, commit_timestamp, runner_arguments, monitor_run, created_at) + VALUES (%s, %s, %s, 'manual', %s, %s, %s, %s, %s, %s, NOW()) RETURNING id - """, params=(self._job_id, self._name, self._uri, self._branch, self._original_filename, self._commit_hash, self._commit_timestamp, json.dumps(self._arguments)))[0] + """, + params=(self._job_id, self._name, self._uri, self._branch, self._original_filename, self._commit_hash, self._commit_timestamp, json.dumps(self._arguments), monitor_run), + )[0] return self._run_id def get_optimizations_ignore(self): @@ -459,7 +462,7 @@ def update_and_insert_specs(self): self._run_id) ) - def import_metric_providers(self): + def import_metric_providers(self, monitor=False): if self._dev_no_metrics: print(TerminalColors.HEADER, '\nSkipping import of metric providers', TerminalColors.ENDC) return @@ -487,6 +490,8 @@ def import_metric_providers(self): if rootless and '.cgroup.' in module_path: conf['rootless'] = True + if monitor and '.cgroup.' in module_path: + conf['monitor'] = True if self._skip_system_checks: conf['skip_check'] = True @@ -1040,8 +1045,9 @@ def start_metric_providers(self, allow_container=True, allow_other=True): raise RuntimeError(f"Stderr on {metric_provider.__class__.__name__} was NOT empty: {stderr_read}") - def start_phase(self, phase, transition = True): + def start_phase(self, phase, transition=True): config = GlobalConfig().config + print(TerminalColors.HEADER, f"\nStarting phase {phase}.", TerminalColors.ENDC) if transition: @@ -1448,6 +1454,39 @@ def cleanup(self, continue_measurement=False): print(TerminalColors.OKBLUE, '-Cleanup gracefully completed', TerminalColors.ENDC) + def _handle_except(self): + try: + self.read_container_logs() + except BaseException as exc: + self.add_to_log(exc.__class__.__name__, str(exc)) + raise exc + finally: + try: + self.read_and_cleanup_processes() + except BaseException as exc: + self.add_to_log(exc.__class__.__name__, str(exc)) + raise exc + finally: + try: + self.save_notes_runner() + except BaseException as exc: + self.add_to_log(exc.__class__.__name__, str(exc)) + raise exc + finally: + try: + self.stop_metric_providers() + except BaseException as exc: + self.add_to_log(exc.__class__.__name__, str(exc)) + raise exc + finally: + try: + self.save_stdout_logs() + except BaseException as exc: + self.add_to_log(exc.__class__.__name__, str(exc)) + raise exc + finally: + self.cleanup() # always run cleanup automatically after each run + def run(self): ''' The run method is just a wrapper for the intended sequential flow of a GMT run. @@ -1544,37 +1583,7 @@ def run(self): self.set_run_failed() raise exc finally: - try: - self.read_container_logs() - except BaseException as exc: - self.add_to_log(exc.__class__.__name__, str(exc)) - raise exc - finally: - try: - self.read_and_cleanup_processes() - except BaseException as exc: - self.add_to_log(exc.__class__.__name__, str(exc)) - raise exc - finally: - try: - self.save_notes_runner() - except BaseException as exc: - self.add_to_log(exc.__class__.__name__, str(exc)) - raise exc - finally: - try: - self.stop_metric_providers() - except BaseException as exc: - self.add_to_log(exc.__class__.__name__, str(exc)) - raise exc - finally: - try: - self.save_stdout_logs() - except BaseException as exc: - self.add_to_log(exc.__class__.__name__, str(exc)) - raise exc - finally: - self.cleanup() # always run cleanup automatically after each run + self._handle_except() print(TerminalColors.OKGREEN, arrows('MEASUREMENT SUCCESSFULLY COMPLETED'), TerminalColors.ENDC) @@ -1682,6 +1691,8 @@ def run(self): # loop over them issuing separate queries to the DB from tools.phase_stats import build_and_store_phase_stats + print("Run id is", runner._run_id) + print("Aggregating and uploading phase_stats. This can take a while for longer runs ...") build_and_store_phase_stats(runner._run_id, runner._sci) # We need to import this here as we need the correct config file diff --git a/tests/lib/test_diff.py b/tests/lib/test_diff.py index 86b27ce9c..904a18f43 100644 --- a/tests/lib/test_diff.py +++ b/tests/lib/test_diff.py @@ -8,10 +8,13 @@ # For the diffing to work as expected it is important that we include a known set of columns # It might happen that at some point a dev adds a column to the table, but forgets to also add it -# to the diffing. To prevent this, this Unit test checks if the table column signature is unchanged +# to the diffing. +# To prevent this, this Unit test checks if the table column signature is unchanged +# +# If this test fails and an additional column should be diffed please add it to the file lib.diff.py::get_diffable_row() def test_run_signature(): - expected_signature = 'id,job_id,name,uri,branch,commit_hash,commit_timestamp,email,categories,usage_scenario,filename,machine_specs,runner_arguments,machine_id,gmt_hash,measurement_config,start_measurement,end_measurement,phases,logs,invalid_run,failed,created_at,updated_at' + expected_signature = 'id,job_id,name,uri,branch,commit_hash,commit_timestamp,email,categories,usage_scenario,filename,machine_specs,runner_arguments,machine_id,gmt_hash,measurement_config,start_measurement,end_measurement,phases,logs,invalid_run,failed,monitor_run,created_at,updated_at' current_signature = DB().fetch_all("SELECT column_name FROM information_schema.columns WHERE table_name = 'runs' ORDER BY ordinal_position;") current_signature = ",".join([x[0] for x in current_signature])