Skip to content

Commit

Permalink
Merge pull request #46 from george0st/changes
Browse files Browse the repository at this point in the history
Tune output for human & ability to change output setting
  • Loading branch information
george0st authored Oct 2, 2024
2 parents 5c1118d + eeff27a commit 6273cbc
Show file tree
Hide file tree
Showing 5 changed files with 90 additions and 27 deletions.
34 changes: 34 additions & 0 deletions qgate_perf/output_setup.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
import datetime


class Singleton (type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]

class OutputSetup(metaclass=Singleton):

HUMAN_PRECISION = 4
HUMAN_JSON_SEPARATOR = (', ', ':')

def __init__(self):
self._human_precision = OutputSetup.HUMAN_PRECISION
self._human_json_separator = OutputSetup.HUMAN_JSON_SEPARATOR

@property
def human_precision(self):
return self._human_precision

@human_precision.setter
def human_precision(self, value):
self._human_precision = value

@property
def human_json_separator(self):
return self._human_json_separator

@human_json_separator.setter
def human_json_separator(self, value):
self._human_json_separator = value
15 changes: 8 additions & 7 deletions qgate_perf/parallel_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from platform import python_version
from packaging import version
from contextlib import suppress
from qgate_perf.output_setup import OutputSetup


def _executor_wrapper(func, run_return: RunReturn, run_setup: RunSetup):
Expand Down Expand Up @@ -151,7 +152,7 @@ def _print_header(self, file, run_setup: RunSetup=None):
FileFormat.HR_PRF_HDR_MEMORY: f"{total}/{free}"
}

self._print(file, json.dumps(out), json.dumps(readable_out))
self._print(file, json.dumps(out), json.dumps(readable_out, separators = OutputSetup().human_json_separator))

def _memory(self):

Expand Down Expand Up @@ -246,8 +247,8 @@ def _print_detail(self, file, run_setup: RunSetup, return_dict, processes, threa
count += 1
if (self._detail_output == True):
self._print(file,
f" {str(parallel_ret) if parallel_ret else ParallelProbe.dump_error('SYSTEM overloaded')}",
f" {parallel_ret.readable_str() if parallel_ret else ParallelProbe.dump_error('SYSTEM overloaded')}")
f" {str(parallel_ret) if parallel_ret else ParallelProbe.dump_error('SYSTEM overloaded')}",
f" {parallel_ret.readable_str() if parallel_ret else ParallelProbe.readable_dump_error('SYSTEM overloaded')}")

if (count > 0):
total_call_per_sec=0 if (sum_time / count) == 0 else (1 / (sum_time / count)) * count * run_setup._bulk_row
Expand All @@ -268,13 +269,13 @@ def _print_detail(self, file, run_setup: RunSetup, return_dict, processes, threa
FileFormat.HM_PRF_CORE_REAL_EXECUTOR: count,
FileFormat.HM_PRF_CORE_GROUP: group,
FileFormat.HM_PRF_CORE_TOTAL_CALL: sum_call,
FileFormat.HM_PRF_CORE_TOTAL_CALL_PER_SEC: round(total_call_per_sec, ParallelProbe.HUMAN_PRECISION),
FileFormat.HM_PRF_CORE_AVRG_TIME: 0 if count==0 else round(sum_time / count, ParallelProbe.HUMAN_PRECISION),
FileFormat.HM_PRF_CORE_STD_DEVIATION: 0 if count==0 else round (sum_deviation / count, ParallelProbe.HUMAN_PRECISION)
FileFormat.HM_PRF_CORE_TOTAL_CALL_PER_SEC: round(total_call_per_sec, OutputSetup().human_precision),
FileFormat.HM_PRF_CORE_AVRG_TIME: 0 if count==0 else round(sum_time / count, OutputSetup().human_precision),
FileFormat.HM_PRF_CORE_STD_DEVIATION: 0 if count==0 else round (sum_deviation / count, OutputSetup().human_precision)
}
self._print(file,
f" {json.dumps(out)}",
f" {json.dumps(readable_out)}")
f" {json.dumps(readable_out, separators = OutputSetup().human_json_separator)}")

def _open_output(self):
dirname = os.path.dirname(self._output_file)
Expand Down
25 changes: 16 additions & 9 deletions qgate_perf/parallel_probe.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,12 @@
from qgate_perf.standard_deviation import StandardDeviation
from qgate_perf.file_format import FileFormat
from qgate_perf.run_setup import RunSetup
from qgate_perf.output_setup import OutputSetup
from math import nan


class ParallelProbe:
""" Provider probe for parallel test tuning """
HUMAN_PRECISION = 4

def __init__(self, run_setup: RunSetup, exception=None):
"""
Expand Down Expand Up @@ -122,20 +122,20 @@ def __str__(self):
else:
return ParallelProbe.dump_error(self.exception, self.pid, self.counter)

def readable_str(self):
def readable_str(self, compact_form = True):
"""Provide view to return value in readable and shorter form (for human check)"""

if self.exception is None:
return json.dumps({
FileFormat.HR_PRF_DETAIL_CALLS: self.counter,
FileFormat.HR_PRF_DETAIL_AVRG: nan if self.counter == 0 else round(self.total_duration / self.counter, ParallelProbe.HUMAN_PRECISION),
FileFormat.PRF_DETAIL_MIN: round(self.min_duration, ParallelProbe.HUMAN_PRECISION),
FileFormat.PRF_DETAIL_MAX: round(self.max_duration, ParallelProbe.HUMAN_PRECISION),
FileFormat.HR_PRF_DETAIL_STDEV: round(self.standard_deviation, ParallelProbe.HUMAN_PRECISION),
FileFormat.PRF_DETAIL_TOTAL: round(self.total_duration, ParallelProbe.HUMAN_PRECISION)
})
FileFormat.HR_PRF_DETAIL_AVRG: nan if self.counter == 0 else round(self.total_duration / self.counter, OutputSetup().human_precision),
FileFormat.PRF_DETAIL_MIN: round(self.min_duration, OutputSetup().human_precision),
FileFormat.PRF_DETAIL_MAX: round(self.max_duration, OutputSetup().human_precision),
FileFormat.HR_PRF_DETAIL_STDEV: round(self.standard_deviation, OutputSetup().human_precision),
FileFormat.PRF_DETAIL_TOTAL: round(self.total_duration, OutputSetup().human_precision)
}, separators = OutputSetup().human_json_separator if compact_form else (', ', ': '))
else:
return ParallelProbe.dump_error(self.exception, self.pid, self.counter)
return ParallelProbe.readable_dump_error(self.exception, self.pid, self.counter)

@staticmethod
def dump_error(exception, pid=0, counter=0):
Expand All @@ -145,3 +145,10 @@ def dump_error(exception, pid=0, counter=0):
FileFormat.PRF_DETAIL_CALLS: counter,
FileFormat.PRF_DETAIL_ERR: str(exception)
})

@staticmethod
def readable_dump_error(exception, pid=0, counter=0):
return json.dumps({
FileFormat.PRF_DETAIL_CALLS: counter,
FileFormat.PRF_DETAIL_ERR: str(exception)
}, separators = OutputSetup().human_json_separator)
20 changes: 20 additions & 0 deletions tests/test_perf.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from qgate_perf.run_return import RunReturn
from qgate_perf.bundle_helper import BundleHelper
from qgate_perf.executor_helper import ExecutorHelper
from qgate_perf.output_setup import OutputSetup
import time
from os import path
import shutil
Expand Down Expand Up @@ -261,6 +262,25 @@ def test_general_exception2(self):
setup=RunSetup(duration_second=0, start_delay=0)
self.assertFalse(generator.run_executor([[1,1]], setup))

def test_different_output_precision(self):
generator = ParallelExecutor(prf_gil_impact,
label="GIL_impact",
detail_output=True,
output_file=path.join(self.OUTPUT_ADR, "perf_gil_impact_test.txt"))

setup=RunSetup(duration_second=4, start_delay=4)
OutputSetup().human_precision = 7
self.assertTrue(generator.run(2, 2, setup))

def test_different_output_json_separator(self):
generator = ParallelExecutor(prf_gil_impact,
label="GIL_impact",
detail_output=True,
output_file=path.join(self.OUTPUT_ADR, "perf_gil_impact_test.txt"))

setup = RunSetup(duration_second=4, start_delay=4)
OutputSetup().human_json_separator = (' - ', '::')
self.assertTrue(generator.run(2, 2, setup))

# if __name__ == '__main__':
# unittest.main()
23 changes: 12 additions & 11 deletions tests/test_simulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import time
from qgate_perf.parallel_probe import ParallelProbe
from qgate_perf.run_setup import RunSetup
from qgate_perf.output_setup import OutputSetup
import numpy as np
from numpy import random

Expand Down Expand Up @@ -100,21 +101,21 @@ def _check(self, simulate, sequence):
"""Check value from ParallelProbe vs calc from Numpy"""
expected ={}
expected['call'] = len(sequence)
expected['avr'] = float(round(np.average(sequence), ParallelProbe.HUMAN_PRECISION))
expected['min'] = float(round(np.min(sequence), ParallelProbe.HUMAN_PRECISION))
expected['max'] = float(round(np.max(sequence), ParallelProbe.HUMAN_PRECISION))
expected['std'] = float(round(np.std(sequence), ParallelProbe.HUMAN_PRECISION))
expected['total'] = float(round(np.sum(sequence),ParallelProbe.HUMAN_PRECISION))
expected['avr'] = float(round(np.average(sequence), OutputSetup().human_precision))
expected['min'] = float(round(np.min(sequence), OutputSetup().human_precision))
expected['max'] = float(round(np.max(sequence), OutputSetup().human_precision))
expected['std'] = float(round(np.std(sequence), OutputSetup().human_precision))
expected['total'] = float(round(np.sum(sequence), OutputSetup().human_precision))

print("Parallel probe :", simulate.readable_str())
print("Parallel probe :", simulate.readable_str(False))
print("Numpy calculation :", str(expected))

self.assertTrue(simulate.counter == expected['call'])
self.assertTrue(round(simulate.total_duration / simulate.counter, ParallelProbe.HUMAN_PRECISION) == expected['avr'])
self.assertTrue(round(simulate.min_duration, ParallelProbe.HUMAN_PRECISION) == expected['min'])
self.assertTrue(round(simulate.max_duration, ParallelProbe.HUMAN_PRECISION) == expected['max'])
self.assertTrue(round(simulate.standard_deviation,ParallelProbe.HUMAN_PRECISION) == expected['std'])
self.assertTrue(round(simulate.total_duration, ParallelProbe.HUMAN_PRECISION) == expected['total'])
self.assertTrue(round(simulate.total_duration / simulate.counter, OutputSetup().human_precision) == expected['avr'])
self.assertTrue(round(simulate.min_duration, OutputSetup().human_precision) == expected['min'])
self.assertTrue(round(simulate.max_duration, OutputSetup().human_precision) == expected['max'])
self.assertTrue(round(simulate.standard_deviation, OutputSetup().human_precision) == expected['std'])
self.assertTrue(round(simulate.total_duration, OutputSetup().human_precision) == expected['total'])

def test_basic_statistic1(self):
sequence = [0.24, 0.21, 0.34, 0.33]
Expand Down

0 comments on commit 6273cbc

Please sign in to comment.