Skip to content

Commit

Permalink
Merge pull request #64 from george0st/changes
Browse files Browse the repository at this point in the history
Init PercentileSummary
  • Loading branch information
george0st authored Oct 10, 2024
2 parents 6b38fba + cb8a9dc commit 1349874
Show file tree
Hide file tree
Showing 3 changed files with 81 additions and 29 deletions.
13 changes: 9 additions & 4 deletions qgate_perf/parallel_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,7 @@ def _print_detail(self, file, run_setup: RunSetup, return_dict, processes, threa
sum_avrg_time, sum_deviation, sum_call, executors = 0, 0, 0, 0
total_call_per_sec_raw, total_call_per_sec = 0, 0

# iteration cross results from all executors
for return_key in return_dict:
parallel_ret = return_dict[return_key]
if parallel_ret:
Expand All @@ -211,26 +212,30 @@ def _print_detail(self, file, run_setup: RunSetup, return_dict, processes, threa
f" {parallel_ret.readable_str() if parallel_ret else ParallelProbe.readable_dump_error('SYSTEM overloaded')}")

if (executors > 0):
# Calc clarification:
# Calc clarification (for better understanding):
# sum_avrg_time / count = average time for one executor (average is cross all calls and executors)
# 1 / (sum_avrg_time/count) = average amount of calls per one second (cross executors)
total_call_per_sec_raw = 0 if (sum_avrg_time / executors) == 0 else (1 / (sum_avrg_time / executors)) * executors
total_call_per_sec = total_call_per_sec_raw * run_setup._bulk_row

# A2A form
out = {
FileFormat.PRF_TYPE: FileFormat.PRF_CORE_TYPE,
FileFormat.PRF_CORE_PLAN_EXECUTOR_ALL: processes * threads,
FileFormat.PRF_CORE_PLAN_EXECUTOR: [processes, threads],
FileFormat.PRF_CORE_REAL_EXECUTOR: executors,
FileFormat.PRF_CORE_GROUP: group,

FileFormat.PRF_CORE_TOTAL_CALL: sum_call, # ok
FileFormat.PRF_CORE_TOTAL_CALL_PER_SEC_RAW: total_call_per_sec_raw, # ok
FileFormat.PRF_CORE_TOTAL_CALL_PER_SEC: total_call_per_sec, # ok
FileFormat.PRF_CORE_AVRG_TIME: 0 if executors == 0 else sum_avrg_time / executors, # ok
FileFormat.PRF_CORE_STD_DEVIATION: 0 if executors == 0 else sum_deviation / executors, # ok

FileFormat.PRF_CORE_TIME_END: datetime.utcnow().isoformat(' ')
}

# human readable form
if total_call_per_sec_raw == total_call_per_sec:
total_call_readable = f"{round(total_call_per_sec_raw, OutputSetup().human_precision)}"
else:
Expand All @@ -239,11 +244,14 @@ def _print_detail(self, file, run_setup: RunSetup, return_dict, processes, threa
FileFormat.HM_PRF_CORE_PLAN_EXECUTOR_ALL: f"{processes * threads} [{processes},{threads}]",
FileFormat.HM_PRF_CORE_REAL_EXECUTOR: executors,
FileFormat.HM_PRF_CORE_GROUP: group,

FileFormat.HM_PRF_CORE_TOTAL_CALL: sum_call,
FileFormat.HM_PRF_CORE_TOTAL_CALL_PER_SEC: total_call_readable,
FileFormat.HM_PRF_CORE_AVRG_TIME: 0 if executors == 0 else round(sum_avrg_time / executors, OutputSetup().human_precision),
FileFormat.HM_PRF_CORE_STD_DEVIATION: 0 if executors == 0 else round (sum_deviation / executors, OutputSetup().human_precision)
}

# final dump
self._print(file,
f" {json.dumps(out, separators = OutputSetup().json_separator)}",
f" {json.dumps(readable_out, separators = OutputSetup().human_json_separator)}")
Expand All @@ -259,10 +267,7 @@ def _open_output(self):

def _executeCore(self, run_setup: RunSetup, return_dict, processes=2, threads=2):

#from qgate_perf.run_return import RunReturn

proc = []

# define synch time for run of all executors
run_setup.set_start_time()

Expand Down
96 changes: 71 additions & 25 deletions qgate_perf/parallel_probe.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,17 +11,26 @@


class PercentileItem:

"""Detail data from one executor, split based on percentile"""
def __init__(self, percentile, count, total_duration, std, min, max):
self.percentile = percentile

self.count = count
self.total_duration = total_duration
self.min = min
self.max = max
self.std = std

def __str__(self):
pass
class PercentileSummary:
"""Summary data from all executors, split based on percentile"""
def __init__(self, percentile, count, call_per_sec_raw, call_per_sec, avrg, std):
self.percentile = percentile

self.count = count
self.call_per_sec_raw = call_per_sec_raw
self.call_per_sec = call_per_sec
self.avrg = avrg
self.std = std

class ParallelProbe:
""" Provider probe for parallel test tuning """
Expand Down Expand Up @@ -151,19 +160,38 @@ def __str__(self):

# TODO: return all percentile, not only 100 percentile
if self.exception is None:
return json.dumps({
FileFormat.PRF_TYPE: FileFormat.PRF_DETAIL_TYPE,
FileFormat.PRF_DETAIL_PROCESSID: self.pid, # info
FileFormat.PRF_DETAIL_CALLS: self.counter, # for perf graph
FileFormat.PRF_DETAIL_AVRG: nan if self.counter == 0 else self.total_duration / self.counter,
FileFormat.PRF_DETAIL_MIN: self.min_duration, # info
FileFormat.PRF_DETAIL_MAX: self.max_duration, # info
FileFormat.PRF_DETAIL_STDEV: self.standard_deviation, # for perf graph
FileFormat.PRF_DETAIL_TOTAL: self.total_duration, # for perf graph
FileFormat.PRF_DETAIL_TIME_INIT: self.track_init.isoformat(' '), # for executor graph
FileFormat.PRF_DETAIL_TIME_START: self.track_start.isoformat(' '), # for executor graph
FileFormat.PRF_DETAIL_TIME_END: self.track_end.isoformat(' ') # for executor graph
}, separators = OutputSetup().json_separator)
data = {}
data[FileFormat.PRF_TYPE] = FileFormat.PRF_DETAIL_TYPE
data[FileFormat.PRF_DETAIL_PROCESSID] = self.pid # info

for result in self.percentile_results:
suffix = f"_{result.percentile*100}" if result.percentile < 1 else ""
data[FileFormat.PRF_DETAIL_CALLS + suffix] = result.count # for perf graph
data[FileFormat.PRF_DETAIL_AVRG + suffix] = nan if result.count == 0 else result.total_duration / result.count
data[FileFormat.PRF_DETAIL_MIN + suffix] = result.min # info
data[FileFormat.PRF_DETAIL_MAX + suffix] = result.max # info
data[FileFormat.PRF_DETAIL_STDEV + suffix] = result.std # for perf graph
data[FileFormat.PRF_DETAIL_TOTAL + suffix] = result.total_duration # for perf graph

data[FileFormat.PRF_DETAIL_TIME_INIT] = self.track_init.isoformat(' ') # for executor graph
data[FileFormat.PRF_DETAIL_TIME_START] = self.track_start.isoformat(' ') # for executor graph
data[FileFormat.PRF_DETAIL_TIME_END] = self.track_end.isoformat(' ') # for executor graph

return json.dumps(data, separators = OutputSetup().json_separator)
# return json.dumps({
# FileFormat.PRF_TYPE: FileFormat.PRF_DETAIL_TYPE,
# FileFormat.PRF_DETAIL_PROCESSID: self.pid, # info
#
# FileFormat.PRF_DETAIL_CALLS: self.counter, # for perf graph
# FileFormat.PRF_DETAIL_AVRG: nan if self.counter == 0 else self.total_duration / self.counter,
# FileFormat.PRF_DETAIL_MIN: self.min_duration, # info
# FileFormat.PRF_DETAIL_MAX: self.max_duration, # info
# FileFormat.PRF_DETAIL_STDEV: self.standard_deviation, # for perf graph
# FileFormat.PRF_DETAIL_TOTAL: self.total_duration, # for perf graph
# FileFormat.PRF_DETAIL_TIME_INIT: self.track_init.isoformat(' '), # for executor graph
# FileFormat.PRF_DETAIL_TIME_START: self.track_start.isoformat(' '), # for executor graph
# FileFormat.PRF_DETAIL_TIME_END: self.track_end.isoformat(' ') # for executor graph
# }, separators = OutputSetup().json_separator)
else:
return ParallelProbe.dump_error(self.exception, self.pid, self.counter)

Expand All @@ -172,15 +200,33 @@ def readable_str(self, compact_form = True):

# TODO: return all percentile, not only 100 percentile
if self.exception is None:
if self.s
return json.dumps({
FileFormat.HR_PRF_DETAIL_CALLS: self.counter,
FileFormat.HR_PRF_DETAIL_AVRG: nan if self.counter == 0 else round(self.total_duration / self.counter, OutputSetup().human_precision),
FileFormat.PRF_DETAIL_MIN: round(self.min_duration, OutputSetup().human_precision),
FileFormat.PRF_DETAIL_MAX: round(self.max_duration, OutputSetup().human_precision),
FileFormat.HR_PRF_DETAIL_STDEV: round(self.standard_deviation, OutputSetup().human_precision),
FileFormat.HR_PRF_DETAIL_TOTAL: round(self.total_duration, OutputSetup().human_precision)
}, separators = OutputSetup().human_json_separator if compact_form else (', ', ': '))
data = {}
# data[FileFormat.PRF_TYPE] = FileFormat.PRF_DETAIL_TYPE
# data[FileFormat.PRF_DETAIL_PROCESSID] = self.pid # info

for result in self.percentile_results:
suffix = f"_{result.percentile*100}" if result.percentile < 1 else ""
data[FileFormat.HR_PRF_DETAIL_CALLS + suffix] = result.count # for perf graph
data[FileFormat.HR_PRF_DETAIL_CALLS + suffix] = nan if result.count == 0 else result.total_duration / result.count
data[FileFormat.PRF_DETAIL_MIN + suffix] = result.min # info
data[FileFormat.PRF_DETAIL_MAX + suffix] = result.max # info
data[FileFormat.HR_PRF_DETAIL_STDEV + suffix] = result.std # for perf graph
data[FileFormat.HR_PRF_DETAIL_TOTAL + suffix] = result.total_duration # for perf graph

# data[FileFormat.PRF_DETAIL_TIME_INIT] = self.track_init.isoformat(' ') # for executor graph
# data[FileFormat.PRF_DETAIL_TIME_START] = self.track_start.isoformat(' ') # for executor graph
# data[FileFormat.PRF_DETAIL_TIME_END] = self.track_end.isoformat(' ') # for executor graph

return json.dumps(data, separators = OutputSetup().human_json_separator if compact_form else (', ', ': '))

# return json.dumps({
# FileFormat.HR_PRF_DETAIL_CALLS: self.counter,
# FileFormat.HR_PRF_DETAIL_CALLS: nan if self.counter == 0 else round(self.total_duration / self.counter, OutputSetup().human_precision),
# FileFormat.PRF_DETAIL_MIN: round(self.min_duration, OutputSetup().human_precision),
# FileFormat.PRF_DETAIL_MAX: round(self.max_duration, OutputSetup().human_precision),
# FileFormat.HR_PRF_DETAIL_STDEV: round(self.standard_deviation, OutputSetup().human_precision),
# FileFormat.HR_PRF_DETAIL_TOTAL: round(self.total_duration, OutputSetup().human_precision)
# }, separators = OutputSetup().human_json_separator if compact_form else (', ', ': '))
else:
return ParallelProbe.readable_dump_error(self.exception, self.pid, self.counter)

Expand Down
1 change: 1 addition & 0 deletions tests/test_perf.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,6 +281,7 @@ def test_output_json_separator(self):
setup = RunSetup(duration_second=4, start_delay=4)
OutputSetup().human_json_separator = (' - ', '::')
self.assertTrue(generator.run(2, 2, setup))
OutputSetup().human_json_separator = OutputSetup.HUMAN_JSON_SEPARATOR

# if __name__ == '__main__':
# unittest.main()

0 comments on commit 1349874

Please sign in to comment.