Skip to content

Commit

Permalink
wp.processor: Automatically convert metrics to analysis
Browse files Browse the repository at this point in the history
  • Loading branch information
mrkajetanp committed Aug 10, 2023
1 parent 6122146 commit 190941c
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 38 deletions.
1 change: 0 additions & 1 deletion wp/cli.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import os
import argparse
import logging as log
import confuse
Expand Down
58 changes: 21 additions & 37 deletions wp/processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,31 +74,15 @@ def __init__(self, output_path, config=None):
self.label = self.wa_output.jobs[0].label

def run_metrics(self, metrics):
METRIC_TO_ANALYSIS = {
'power': self.trace_pixel6_emeter_analysis,
'idle': self.trace_cpu_idle_analysis,
'idle-miss': self.trace_cpu_idle_miss_analysis,
'freq': self.trace_frequency_analysis,
'overutil': self.trace_overutilized_analysis,
'pelt': self.trace_sched_pelt_cfs_analysis,
'capacity': self.trace_capacity_analysis,
'tasks-residency': self.trace_tasks_residency_time_analysis,
'tasks-activations': self.trace_tasks_activations_analysis,
'adpf': self.adpf_analysis,
'thermal': self.thermal_analysis,
'perf-trace-event': self.trace_perf_event_analysis,
'uclamp': self.trace_uclamp_analysis,
'energy-estimate': self.trace_energy_estimate_analysis,
'cgroup-attach': self.trace_cgroup_attach_task_analysis,
'wakeup-latency': self.trace_wakeup_latency_analysis,
'wakeup-latency-cgroup': self.trace_wakeup_latency_cgroup_analysis,
'tasks-residency-cgroup': self.trace_tasks_residency_cgroup_analysis,
}
# Analysis functions need to follow the pattern of being called the same as the corresponding metrics
# With '-' replaced by '_' and with the suffix `_analysis`.
def metric_to_analysis(metric: str):
return getattr(self, f"{metric.replace('-', '_')}_analysis")

for metric in metrics:
try:
analysis_start = time.time()
METRIC_TO_ANALYSIS[metric]()
metric_to_analysis(metric)()
log.debug(f"{metric} analysis complete, took {round(time.time() - analysis_start, 2)}s")
except (MissingTraceEventError, HostError, WPMetricFailedError) as e:
log.error(e)
Expand Down Expand Up @@ -151,7 +135,7 @@ def validate_traces(self):
trace.start
log.info('Traces validated successfully')

def trace_pixel6_emeter_analysis(self):
def power_analysis(self):
log.info('Collecting data from pixel6_emeter')
power = self.analysis.apply(self.analysis.trace_pixel6_emeter_df)
power.write_parquet(os.path.join(self.analysis_path, 'pixel6_emeter.pqt'))
Expand All @@ -160,7 +144,7 @@ def trace_pixel6_emeter_analysis(self):
power_mean.write_parquet(os.path.join(self.analysis_path, 'pixel6_emeter_mean.pqt'))
print(power_mean)

def trace_energy_estimate_analysis(self):
def energy_estimate_analysis(self):
log.info('Computing energy estimates')
df = self.analysis.apply(self.analysis.trace_energy_estimate_df)
df.write_parquet(os.path.join(self.analysis_path, 'energy_estimate.pqt'))
Expand All @@ -172,7 +156,7 @@ def trace_energy_estimate_analysis(self):
df.write_parquet(os.path.join(self.analysis_path, 'energy_estimate_mean.pqt'))
print(df)

def trace_cpu_idle_analysis(self):
def idle_analysis(self):
log.info('Collecting cpu_idle events')
idle = self.analysis.apply(self.analysis.trace_cpu_idle_df)
idle.write_parquet(os.path.join(self.analysis_path, 'cpu_idle.pqt'))
Expand All @@ -183,7 +167,7 @@ def trace_cpu_idle_analysis(self):
idle_res.write_parquet(os.path.join(self.analysis_path, 'idle_residency.pqt'))
print(idle_res)

def trace_cpu_idle_miss_analysis(self):
def idle_miss_analysis(self):
log.info('Collecting cpu_idle_miss events')
idle_miss = self.analysis.apply(self.analysis.trace_cpu_idle_miss_df)

Expand All @@ -196,7 +180,7 @@ def trace_cpu_idle_miss_analysis(self):
idle_miss.write_parquet(os.path.join(self.analysis_path, 'cpu_idle_miss_counts.pqt'))
print(idle_miss)

def trace_frequency_analysis(self):
def freq_analysis(self):
log.info('Collecting frequency data')
freq = self.analysis.apply(self.analysis.trace_frequency_df)
freq.write_parquet(os.path.join(self.analysis_path, 'freqs.pqt'))
Expand All @@ -217,7 +201,7 @@ def trace_frequency_analysis(self):
freq_mean.write_parquet(os.path.join(self.analysis_path, 'freqs_residency.pqt'))
print(freq_res)

def trace_overutilized_analysis(self):
def overutil_analysis(self):
log.info('Collecting overutilized data')
overutil = self.analysis.apply(self.analysis.trace_overutilized_df)
overutil.write_parquet(os.path.join(self.analysis_path, 'overutilized.pqt'))
Expand All @@ -233,7 +217,7 @@ def trace_overutilized_analysis(self):
overutil.write_parquet(os.path.join(self.analysis_path, 'overutilized_mean.pqt'))
print(overutil)

def trace_sched_pelt_cfs_analysis(self):
def pelt_analysis(self):
log.info('Collecting sched_pelt_cfs data')
pelt = self.analysis.apply(self.analysis.trace_sched_pelt_cfs_df)
pelt.write_parquet(os.path.join(self.analysis_path, 'sched_pelt_cfs.pqt'))
Expand All @@ -248,7 +232,7 @@ def trace_sched_pelt_cfs_analysis(self):
pelt.write_parquet(os.path.join(self.analysis_path, 'sched_pelt_cfs_mean.pqt'))
print(pelt)

def trace_tasks_residency_time_analysis(self):
def tasks_residency_analysis(self):
log.info('Collecting task residency data')
tasks = self.analysis.apply(self.analysis.trace_tasks_residency_time_df)
tasks.write_parquet(os.path.join(self.analysis_path, 'tasks_residency.pqt'))
Expand Down Expand Up @@ -302,7 +286,7 @@ def thermal_analysis(self):
thermals.write_parquet(os.path.join(self.analysis_path, 'thermal.pqt'))
print(thermals)

def trace_wakeup_latency_analysis(self):
def wakeup_latency_analysis(self):
log.info('Collecting task wakeup latencies')

if self.label not in SUPPORTED_WORKLOADS:
Expand All @@ -320,7 +304,7 @@ def trace_wakeup_latency_analysis(self):
df.write_parquet(os.path.join(self.analysis_path, 'wakeup_latency_mean.pqt'))
print(df)

def trace_tasks_activations_analysis(self):
def tasks_activations_analysis(self):
log.info('Collecting task activations')

if self.label not in SUPPORTED_WORKLOADS:
Expand Down Expand Up @@ -348,13 +332,13 @@ def trace_tasks_activations_analysis(self):
df.write_parquet(os.path.join(self.analysis_path, 'task_activations_stats_cluster.pqt'))
print(df)

def trace_cgroup_attach_task_analysis(self):
def cgroup_attach_analysis(self):
log.info('Collecting cgroup_attach_task events')
df = self.analysis.apply(self.analysis.trace_cgroup_attach_task_df)
df.write_parquet(os.path.join(self.analysis_path, 'cgroup_attach_task.pqt'))
print(df)

def trace_wakeup_latency_cgroup_analysis(self):
def wakeup_latency_cgroup_analysis(self):
log.info('Collecting per-cgroup task wakeup latency')
df = self.analysis.apply(self.analysis.trace_wakeup_latency_cgroup_df)
df.write_parquet(os.path.join(self.analysis_path, 'wakeup_latency_cgroup.pqt'))
Expand All @@ -367,7 +351,7 @@ def trace_wakeup_latency_cgroup_analysis(self):
df.write_parquet(os.path.join(self.analysis_path, 'wakeup_latency_cgroup_mean.pqt'))
print(df)

def trace_tasks_residency_cgroup_analysis(self):
def tasks_residency_cgroup_analysis(self):
log.info('Collecting per-cgroup tasks residency')
df = self.analysis.apply(self.analysis.trace_tasks_residency_cgroup_df)
df.write_parquet(os.path.join(self.analysis_path, 'tasks_residency_cgroup.pqt'))
Expand All @@ -380,22 +364,22 @@ def trace_tasks_residency_cgroup_analysis(self):
df.write_parquet(os.path.join(self.analysis_path, 'tasks_residency_cgroup_total.pqt'))
print(df)

def trace_uclamp_analysis(self):
def uclamp_analysis(self):
log.info('Collecting uclamp data')
df = self.analysis.apply(self.analysis.trace_uclamp_df)

df.write_parquet(os.path.join(self.analysis_path, 'uclamp_updates.pqt'))
print(df)

def trace_perf_event_analysis(self):
def perf_trace_event_analysis(self):
log.info('Collecting perf counter event data')
df = self.analysis.apply(self.analysis.trace_perf_counters_df)

log.debug('Saving the perf counter event analysis file')
df.write_parquet(os.path.join(self.analysis_path, 'perf_counters.pqt'))
print(df)

def trace_capacity_analysis(self):
def capacity_analysis(self):
log.info('Collecting capacity data')
df = self.analysis.apply(self.analysis.trace_capacity_df)

Expand Down

0 comments on commit 190941c

Please sign in to comment.