startop: Parse camera metrics from logcat for app_startup_runner

Parse ACTIVITY_FIRST_PREVIEW_FRAME_RECEIVED_ms,ACTIVITY_FIRST_PREVIEW_FRAME_RENDERED_ms
from logcat when running Camera app.

Also updates the scripts to support multiple metrics output (previously
only one allowed). Adding future per-app metrics is also possible by
just adding 1 file with 1 function call with a regex pattern.

Bug: 132649131
Change-Id: Ibfc620bb156f733b69faa7654b06ef35f1a34de3
This commit is contained in:
Igor Murashkin
2019-05-13 16:31:25 -07:00
parent 2cd5a231f9
commit ab37e6ec2d
9 changed files with 748 additions and 46 deletions

View File

@@ -192,7 +192,206 @@ def generate_group_run_combinations(run_combinations: Iterable[NamedTuple], dst_
(group_key_value, args_it) = args_list_it
yield (group_key_value, args_it)
def parse_run_script_csv_file(csv_file: TextIO) -> List[int]:
class DataFrame:
"""Table-like class for storing a 2D cells table with named columns."""
def __init__(self, data: Dict[str, List[object]] = {}):
"""
Create a new DataFrame from a dictionary (keys = headers,
values = columns).
"""
self._headers = [i for i in data.keys()]
self._rows = []
row_num = 0
def get_data_row(idx):
r = {}
for header, header_data in data.items():
if not len(header_data) > idx:
continue
r[header] = header_data[idx]
return r
while True:
row_dict = get_data_row(row_num)
if len(row_dict) == 0:
break
self._append_row(row_dict.keys(), row_dict.values())
row_num = row_num + 1
def concat_rows(self, other: 'DataFrame') -> None:
"""
In-place concatenate rows of other into the rows of the
current DataFrame.
None is added in pre-existing cells if new headers
are introduced.
"""
other_datas = other._data_only()
other_headers = other.headers
for d in other_datas:
self._append_row(other_headers, d)
def _append_row(self, headers: List[str], data: List[object]):
new_row = {k:v for k,v in zip(headers, data)}
self._rows.append(new_row)
for header in headers:
if not header in self._headers:
self._headers.append(header)
def __repr__(self):
# return repr(self._rows)
repr = ""
header_list = self._headers_only()
row_format = u""
for header in header_list:
row_format = row_format + u"{:>%d}" %(len(header) + 1)
repr = row_format.format(*header_list) + "\n"
for v in self._data_only():
repr = repr + row_format.format(*v) + "\n"
return repr
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.headers == other.headers and self.data_table == other.data_table
else:
print("wrong instance", other.__class__)
return False
@property
def headers(self) -> List[str]:
return [i for i in self._headers_only()]
@property
def data_table(self) -> List[List[object]]:
return list(self._data_only())
@property
def data_table_transposed(self) -> List[List[object]]:
return list(self._transposed_data())
@property
def data_row_len(self) -> int:
return len(self._rows)
def data_row_at(self, idx) -> List[object]:
"""
Return a single data row at the specified index (0th based).
Accepts negative indices, e.g. -1 is last row.
"""
row_dict = self._rows[idx]
l = []
for h in self._headers_only():
l.append(row_dict.get(h)) # Adds None in blank spots.
return l
def copy(self) -> 'DataFrame':
"""
Shallow copy of this DataFrame.
"""
return self.repeat(count=0)
def repeat(self, count: int) -> 'DataFrame':
"""
Returns a new DataFrame where each row of this dataframe is repeated count times.
A repeat of a row is adjacent to other repeats of that same row.
"""
df = DataFrame()
df._headers = self._headers.copy()
rows = []
for row in self._rows:
for i in range(count):
rows.append(row.copy())
df._rows = rows
return df
def merge_data_columns(self, other: 'DataFrame'):
"""
Merge self and another DataFrame by adding the data from other column-wise.
For any headers that are the same, data from 'other' is preferred.
"""
for h in other._headers:
if not h in self._headers:
self._headers.append(h)
append_rows = []
for self_dict, other_dict in itertools.zip_longest(self._rows, other._rows):
if not self_dict:
d = {}
append_rows.append(d)
else:
d = self_dict
d_other = other_dict
if d_other:
for k,v in d_other.items():
d[k] = v
for r in append_rows:
self._rows.append(r)
def data_row_reduce(self, fnc) -> 'DataFrame':
"""
Reduces the data row-wise by applying the fnc to each row (column-wise).
Empty cells are skipped.
fnc(Iterable[object]) -> object
fnc is applied over every non-empty cell in that column (descending row-wise).
Example:
DataFrame({'a':[1,2,3]}).data_row_reduce(sum) == DataFrame({'a':[6]})
Returns a new single-row DataFrame.
"""
df = DataFrame()
df._headers = self._headers.copy()
def yield_by_column(header_key):
for row_dict in self._rows:
val = row_dict.get(header_key)
if val:
yield val
new_row_dict = {}
for h in df._headers:
cell_value = fnc(yield_by_column(h))
new_row_dict[h] = cell_value
df._rows = [new_row_dict]
return df
def _headers_only(self):
return self._headers
def _data_only(self):
row_len = len(self._rows)
for i in range(row_len):
yield self.data_row_at(i)
def _transposed_data(self):
return zip(*self._data_only())
def parse_run_script_csv_file_flat(csv_file: TextIO) -> List[int]:
"""Parse a CSV file full of integers into a flat int list."""
csv_reader = csv.reader(csv_file)
arr = []
@@ -202,6 +401,38 @@ def parse_run_script_csv_file(csv_file: TextIO) -> List[int]:
arr.append(int(i))
return arr
def parse_run_script_csv_file(csv_file: TextIO) -> DataFrame:
"""Parse a CSV file full of integers into a DataFrame."""
csv_reader = csv.reader(csv_file)
try:
header_list = next(csv_reader)
except StopIteration:
header_list = []
if not header_list:
return None
headers = [i for i in header_list]
d = {}
for row in csv_reader:
header_idx = 0
for i in row:
v = i
if i:
v = int(i)
header_key = headers[header_idx]
l = d.get(header_key, [])
l.append(v)
d[header_key] = l
header_idx = header_idx + 1
return DataFrame(d)
def make_script_command_with_temp_output(script: str, args: List[str], **kwargs)\
-> Tuple[str, TextIO]:
"""
@@ -338,27 +569,49 @@ def execute_run_combos(grouped_run_combos: Iterable[Tuple[CollectorPackageInfo,
cmd, tmp_output_file = make_script_command_with_temp_output(_RUN_SCRIPT, args, count=loop_count, input=collector_tmp_output_file.name)
with tmp_output_file:
(passed, script_output) = execute_arbitrary_command(cmd, simulate, timeout)
parsed_output = simulate and [1,2,3] or parse_run_script_csv_file(tmp_output_file)
parsed_output = simulate and DataFrame({'fake_ms':[1,2,3]}) or parse_run_script_csv_file(tmp_output_file)
yield (passed, script_output, parsed_output)
def gather_results(commands: Iterable[Tuple[bool, str, List[int]]], key_list: List[str], value_list: List[Tuple[str, ...]]):
def gather_results(commands: Iterable[Tuple[bool, str, DataFrame]], key_list: List[str], value_list: List[Tuple[str, ...]]):
_debug_print("gather_results: key_list = ", key_list)
yield key_list + ["time(ms)"]
# yield key_list + ["time(ms)"]
stringify_none = lambda s: s is None and "<none>" or s
for ((passed, script_output, run_result_list), values) in itertools.zip_longest(commands, value_list):
_debug_print("run_result_list = ", run_result_list)
_debug_print("values = ", values)
if not passed:
continue
for result in run_result_list:
yield [stringify_none(i) for i in values] + [result]
yield ["; avg(%s), min(%s), max(%s), count(%s)" %(sum(run_result_list, 0.0) / len(run_result_list), min(run_result_list), max(run_result_list), len(run_result_list)) ]
# RunCommandArgs(package='com.whatever', readahead='warm', compiler_filter=None)
# -> {'package':['com.whatever'], 'readahead':['warm'], 'compiler_filter':[None]}
values_dict = {k:[v] for k,v in values._asdict().items()}
values_df = DataFrame(values_dict)
# project 'values_df' to be same number of rows as run_result_list.
values_df = values_df.repeat(run_result_list.data_row_len)
# the results are added as right-hand-side columns onto the existing labels for the table.
values_df.merge_data_columns(run_result_list)
yield values_df
def eval_and_save_to_csv(output, annotated_result_values):
printed_header = False
csv_writer = csv.writer(output)
for row in annotated_result_values:
csv_writer.writerow(row)
if not printed_header:
headers = row.headers
csv_writer.writerow(headers)
printed_header = True
# TODO: what about when headers change?
for data_row in row.data_table:
csv_writer.writerow(data_row)
output.flush() # see the output live.
def main():

View File

@@ -192,19 +192,140 @@ def test_make_script_command_with_temp_output():
with tmp_file:
assert cmd_str == ["fake_script", "a", "b", "--count", "2", "--output", tmp_file.name]
def test_parse_run_script_csv_file():
def test_parse_run_script_csv_file_flat():
# empty file -> empty list
f = io.StringIO("")
assert asr.parse_run_script_csv_file(f) == []
assert asr.parse_run_script_csv_file_flat(f) == []
# common case
f = io.StringIO("1,2,3")
assert asr.parse_run_script_csv_file(f) == [1,2,3]
assert asr.parse_run_script_csv_file_flat(f) == [1,2,3]
# ignore trailing comma
f = io.StringIO("1,2,3,4,5,")
assert asr.parse_run_script_csv_file(f) == [1,2,3,4,5]
assert asr.parse_run_script_csv_file_flat(f) == [1,2,3,4,5]
def test_data_frame():
# trivial empty data frame
df = asr.DataFrame()
assert df.headers == []
assert df.data_table == []
assert df.data_table_transposed == []
# common case, same number of values in each place.
df = asr.DataFrame({'TotalTime_ms':[1,2,3], 'Displayed_ms':[4,5,6]})
assert df.headers == ['TotalTime_ms', 'Displayed_ms']
assert df.data_table == [[1, 4], [2, 5], [3, 6]]
assert df.data_table_transposed == [(1, 2, 3), (4, 5, 6)]
# varying num values.
df = asr.DataFrame({'many':[1,2], 'none': []})
assert df.headers == ['many', 'none']
assert df.data_table == [[1, None], [2, None]]
assert df.data_table_transposed == [(1, 2), (None, None)]
df = asr.DataFrame({'many':[], 'none': [1,2]})
assert df.headers == ['many', 'none']
assert df.data_table == [[None, 1], [None, 2]]
assert df.data_table_transposed == [(None, None), (1, 2)]
# merge multiple data frames
df = asr.DataFrame()
df.concat_rows(asr.DataFrame())
assert df.headers == []
assert df.data_table == []
assert df.data_table_transposed == []
df = asr.DataFrame()
df2 = asr.DataFrame({'TotalTime_ms':[1,2,3], 'Displayed_ms':[4,5,6]})
df.concat_rows(df2)
assert df.headers == ['TotalTime_ms', 'Displayed_ms']
assert df.data_table == [[1, 4], [2, 5], [3, 6]]
assert df.data_table_transposed == [(1, 2, 3), (4, 5, 6)]
df = asr.DataFrame({'TotalTime_ms':[1,2]})
df2 = asr.DataFrame({'Displayed_ms':[4,5]})
df.concat_rows(df2)
assert df.headers == ['TotalTime_ms', 'Displayed_ms']
assert df.data_table == [[1, None], [2, None], [None, 4], [None, 5]]
df = asr.DataFrame({'TotalTime_ms':[1,2]})
df2 = asr.DataFrame({'TotalTime_ms': [3, 4], 'Displayed_ms':[5, 6]})
df.concat_rows(df2)
assert df.headers == ['TotalTime_ms', 'Displayed_ms']
assert df.data_table == [[1, None], [2, None], [3, 5], [4, 6]]
# data_row_at
df = asr.DataFrame({'TotalTime_ms':[1,2,3], 'Displayed_ms':[4,5,6]})
assert df.data_row_at(-1) == [3,6]
assert df.data_row_at(2) == [3,6]
assert df.data_row_at(1) == [2,5]
# repeat
df = asr.DataFrame({'TotalTime_ms':[1], 'Displayed_ms':[4]})
df2 = asr.DataFrame({'TotalTime_ms':[1,1,1], 'Displayed_ms':[4,4,4]})
assert df.repeat(3) == df2
# repeat
df = asr.DataFrame({'TotalTime_ms':[1,1,1], 'Displayed_ms':[4,4,4]})
assert df.data_row_len == 3
df = asr.DataFrame({'TotalTime_ms':[1,1]})
assert df.data_row_len == 2
# repeat
df = asr.DataFrame({'TotalTime_ms':[1,1,1], 'Displayed_ms':[4,4,4]})
assert df.data_row_len == 3
df = asr.DataFrame({'TotalTime_ms':[1,1]})
assert df.data_row_len == 2
# data_row_reduce
df = asr.DataFrame({'TotalTime_ms':[1,1,1], 'Displayed_ms':[4,4,4]})
df_sum = asr.DataFrame({'TotalTime_ms':[3], 'Displayed_ms':[12]})
assert df.data_row_reduce(sum) == df_sum
# merge_data_columns
df = asr.DataFrame({'TotalTime_ms':[1,2,3]})
df2 = asr.DataFrame({'Displayed_ms':[3,4,5,6]})
df.merge_data_columns(df2)
assert df == asr.DataFrame({'TotalTime_ms':[1,2,3], 'Displayed_ms':[3,4,5,6]})
df = asr.DataFrame({'TotalTime_ms':[1,2,3]})
df2 = asr.DataFrame({'Displayed_ms':[3,4]})
df.merge_data_columns(df2)
assert df == asr.DataFrame({'TotalTime_ms':[1,2,3], 'Displayed_ms':[3,4]})
df = asr.DataFrame({'TotalTime_ms':[1,2,3]})
df2 = asr.DataFrame({'TotalTime_ms':[10,11]})
df.merge_data_columns(df2)
assert df == asr.DataFrame({'TotalTime_ms':[10,11,3]})
df = asr.DataFrame({'TotalTime_ms':[]})
df2 = asr.DataFrame({'TotalTime_ms':[10,11]})
df.merge_data_columns(df2)
assert df == asr.DataFrame({'TotalTime_ms':[10,11]})
def test_parse_run_script_csv_file():
# empty file -> empty list
f = io.StringIO("")
assert asr.parse_run_script_csv_file(f) == None
# common case
f = io.StringIO("TotalTime_ms,Displayed_ms\n1,2")
df = asr.DataFrame({'TotalTime_ms': [1], 'Displayed_ms': [2]})
pf = asr.parse_run_script_csv_file(f)
assert pf == df
if __name__ == '__main__':
pytest.main()

View File

@@ -39,22 +39,9 @@ launch_application() {
# if the $ is not escaped, adb shell will try to evaluate $HomeActivity to a variable.
activity=${activity//\$/\\$}
local am_output="$(adb shell am start -S -W "$package"/"$activity")"
verbose_print adb shell am start -S -W "$package"/"$activity"
if [[ $? -ne 0 ]]; then
echo "am start failed" >&2
adb shell am start -S -W "$package"/"$activity"
return 1
fi
# for everything else use the am start "TotalTime" output.
verbose_print "$am_output"
local total_time="$(echo "$am_output" | grep 'TotalTime:' | sed 's/TotalTime: //g')"
verbose_print "total time: $total_time"
# TODO: Extract alternative metrics such as the #reportFullyDrawn.
echo "$total_time"
# pipe this into 'parse_metrics' to parse the output.
}
if [[ $# -lt 2 ]]; then

View File

@@ -83,17 +83,7 @@ logcat_save_timestamp() {
#
# First use 'logcat_save_timestamp'
# Then do whatever action you want.
# Then us 'logcat_from_timestamp $timestamp'
logcat_from_timestamp() {
local timestamp="$1"
shift # drop timestamp from args.
echo "DONT CALL THIS FUNCTION" >&2
exit 1
verbose_print adb logcat -T \"$timestamp\" \"$@\"
adb logcat -T "$timestamp" "$@"
}
# Then use 'logcat_from_timestamp_bg $timestamp'
logcat_from_timestamp_bg() {
local timestamp="$1"
shift # drop timestamp from args.
@@ -104,9 +94,10 @@ logcat_from_timestamp_bg() {
# Starting at timestamp $2, wait until we seen pattern $3
# or until a timeout happens in $1 seconds.
# If successful, also echo the line that matched the pattern.
#
# Set VERBOSE_LOGCAT=1 to debug every line of logcat it tries to parse.
logcat_wait_for_pattern() {
logcat_select_pattern() {
local timeout="$1"
local timestamp="$2"
local pattern="$3"
@@ -143,6 +134,7 @@ logcat_wait_for_pattern() {
if [[ "$logcat_output:" == *"$pattern"* ]]; then
verbose_print "LOGCAT: " "$logcat_output"
verbose_print "WE DID SEE PATTERN" '<<' "$pattern" '>>.'
echo "$logcat_output"
return_code=0
break
fi
@@ -162,3 +154,45 @@ logcat_wait_for_pattern() {
return $return_code
}
# Starting at timestamp $2, wait until we seen pattern $3
# or until a timeout happens in $1 seconds.
#
# Set VERBOSE_LOGCAT=1 to debug every line of logcat it tries to parse.
logcat_wait_for_pattern() {
logcat_select_pattern "$@" > /dev/null
}
# Starting at timestamp $2, wait until we seen pattern $3
# or until a timeout happens in $1 seconds.
# If successful, extract with the regular expression pattern in #4
# and return the first capture group.
#
# Set VERBOSE_LOGCAT=1 to debug every line of logcat it tries to parse.
logcat_extract_pattern() {
local timeout="$1"
local timestamp="$2"
local pattern="$3"
local re_pattern="$4"
local result
local exit_code
result="$(logcat_select_pattern "$@")"
exit_code=$?
if [[ $exit_code -ne 0 ]]; then
return $exit_code
fi
echo "$result" | sed 's/'"$re_pattern"'/\1/g'
}
# Join array
# FOO=(a b c)
# join_by , "${FOO[@]}" #a,b,c
join_by() {
local IFS="$1"
shift
echo "$*"
}

View File

@@ -0,0 +1,25 @@
#!/bin/bash
#
# Copyright 2019, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 05-06 14:51:17.688 29691 29897 I CAM_Timing: CameraActivity: START -> ACTIVITY_FIRST_PREVIEW_FRAME_RECEIVED: 385ms
pattern="CAM_Timing: CameraActivity: START -> ACTIVITY_FIRST_PREVIEW_FRAME_RECEIVED:"
re_pattern='.*ACTIVITY_FIRST_PREVIEW_FRAME_RECEIVED: \([[:digit:]]\+\)ms'
parse_metric_from_logcat "ACTIVITY_FIRST_PREVIEW_FRAME_RECEIVED_ms" "$pattern" "$re_pattern"
# 05-06 14:51:17.724 29691 29691 I CAM_Timing: CameraActivity: START -> ACTIVITY_FIRST_PREVIEW_FRAME_RENDERED: 421ms
pattern="CAM_Timing: CameraActivity: START -> ACTIVITY_FIRST_PREVIEW_FRAME_RENDERED:"
re_pattern='.*ACTIVITY_FIRST_PREVIEW_FRAME_RENDERED: \([[:digit:]]\+\)ms'
parse_metric_from_logcat "ACTIVITY_FIRST_PREVIEW_FRAME_RENDERED_ms" "$pattern" "$re_pattern"

View File

@@ -0,0 +1,181 @@
#!/bin/bash
#
# Copyright 2019, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
usage() {
cat <<EOF
Usage: launch_application package activity | parse_metrics --package <name> --timestamp <timestamp> [OPTIONS]...
Reads from stdin the result of 'am start' metrics. May also parse logcat
for additional metrics.
Output form:
MetricName_unit=numeric_value
MetricName2_unit=numeric_value2
This may block until all desired metrics are parsed from logcat.
To get a list of metrics without doing real parsing, use --simulate.
To add package-specific metrics, add a script called 'metrics/\$full_package_name'
that exposes additional metrics in same way as above.
(required)
-p, --package <name> package of the app that is being used
-ts, --timestamp <name> logcat timestamp [only looks at logcat entries after this timestamp].
(optional)
-s, --simulate prints dummy values instead of real metrics
-a, --activity <name> activity to use (default: inferred)
-h, --help usage information (this)
-v, --verbose enable extra verbose printing
-t, --timeout <sec> how many seconds to timeout when trying to wait for logcat to change
EOF
}
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "$DIR/lib/common"
package=""
activity=""
timeout=10
simulate="n"
parse_arguments() {
while [[ $# -gt 0 ]]; do
case "$1" in
-h|--help)
usage
exit 0
;;
-p|--package)
package="$2"
shift
;;
-a|--activity)
activity="$2"
shift
;;
-v|--verbose)
export verbose="y"
;;
-t|--timeout)
timeout="$2"
shift
;;
-ts|--timestamp)
timestamp="$2"
shift
;;
-s|--simulate)
simulate="y"
;;
*)
echo "Invalid argument: $1" >&2
exit 1
esac
shift
done
}
# Main entry point
if [[ $# -eq 0 ]]; then
usage
exit 1
else
parse_arguments "$@"
# if we do not have have package exit early with an error
[[ "$package" == "" ]] && echo "--package not specified" 1>&2 && exit 64
# ignore timestamp for --simulate. it's optional.
if [[ $simulate == y ]]; then
timestamp=0
fi
# if we do not have timestamp, exit early with an error
[[ "$timestamp" == "" ]] && echo "--timestamp not specified" 1>&2 && exit 64
if [[ "$activity" == "" ]] && [[ "$simulate" != "y" ]]; then
activity="$(get_activity_name "$package")"
if [[ "$activity" == "" ]]; then
echo "Activity name could not be found, invalid package name?" 1>&2
exit 64
else
verbose_print "Activity name inferred: " "$activity"
fi
fi
fi
parse_metric_from_logcat() {
local metric_name="$1"
local pattern="$2"
local re_pattern="$3"
local retcode
# parse logcat for 'Displayed...' and that other one...
# 05-06 14:34:08.854 29460 29481 I ActivityTaskManager: Displayed com.google.android.dialer/.extensions.GoogleDialtactsActivity: +361ms
verbose_print "parse_metric_from_logcat: $re_pattern"
echo -ne "$metric_name="
if [[ $simulate == y ]]; then
echo "-1"
return 0
fi
logcat_extract_pattern "$timeout" "$timestamp" "$pattern" "$re_pattern"
retcode=$?
if [[ $retcode -ne 0 ]]; then
# Timed out before finding the pattern. Could also mean the pattern is wrong.
echo "-$?"
fi
return $retcode
}
total_time="-1"
if [[ $simulate != y ]]; then
verbose_print 'logcat timestamp NOW: ' $(logcat_save_timestamp)
# parse stdin for 'am start' result
while read -t "$timeout" -r input_line; do
verbose_print 'stdin:' "$input_line"
if [[ $input_line == *TotalTime:* ]]; then
total_time="$(echo "$input_line" | sed 's/TotalTime: \([[:digit:]]\+\)/\1/g')"
# but keep reading the rest from stdin until <EOF>
fi
done
fi
echo "TotalTime_ms=$total_time"
# parse logcat for 'Displayed...' and that other one...
# 05-06 14:34:08.854 29460 29481 I ActivityTaskManager: Displayed com.google.android.dialer/.extensions.GoogleDialtactsActivity: +361ms
pattern="ActivityTaskManager: Displayed ${package}"
re_pattern='.*Displayed[[:blank:]]\+'"${package}"'[/][^[:blank:]]\+[[:blank:]]+\([[:digit:]]\+\).*'
parse_metric_from_logcat "Displayed_ms" "$pattern" "$re_pattern"
# also call into package-specific scripts if there are additional metrics
if [[ -x "$DIR/metrics/$package" ]]; then
source "$DIR/metrics/$package" "$timestamp"
else
verbose_print parse_metrics: no per-package metrics script found at "$DIR/metrics/$package"
fi

View File

@@ -317,6 +317,74 @@ configure_compiler_filter() {
# No-op if this option was not passed in.
configure_compiler_filter "$compiler_filter" "$package" "$activity" || exit 1
# convert 'a=b\nc=d\ne=f\n...' into 'b,d,f,...'
parse_metrics_output_string() {
# single string with newlines in it.
local input="$1"
local metric_name
local metric_value
local rest
local all_metrics=()
# (n1=v1 n2=v2 n3=v3 ...)
readarray -t all_metrics <<< "$input"
local kv_pair=()
local i
for i in "${all_metrics[@]}"
do
verbose_print "parse_metrics_output: element '$i'"
# name=value
IFS='=' read -r metric_name metric_value rest <<< "$i"
verbose_print "parse_metrics_output: metric_value '$metric_value'"
# (value1 value2 value3 ...)
all_metrics+=(${metric_value})
done
# "value1,value2,value3,..."
join_by ',' "${all_metrics[@]}"
}
# convert 'a=b\nc=d\ne=f\n... into b,d,f,...'
parse_metrics_output() {
local metric_name
local metric_value
local rest
local all_metrics=()
while IFS='=' read -r metric_name metric_value rest; do
verbose_print "metric: $metric_name, value: $metric_value; rest: $rest"
all_metrics+=($metric_value)
done
join_by ',' "${all_metrics[@]}"
}
# convert 'a=b\nc=d\ne=f\n... into b,d,f,...'
parse_metrics_header() {
local metric_name
local metric_value
local rest
local all_metrics=()
while IFS='=' read -r metric_name metric_value rest; do
verbose_print "metric: $metric_name, value: $metric_value; rest: $rest"
all_metrics+=($metric_name)
done
join_by ',' "${all_metrics[@]}"
}
metrics_header="$("$DIR/parse_metrics" --package "$package" --activity "$activity" --simulate | parse_metrics_header)"
# TODO: This loop logic could probably be moved into app_startup_runner.py
for ((i=0;i<count;++i)) do
verbose_print "=========================================="
@@ -335,7 +403,7 @@ for ((i=0;i<count;++i)) do
pre_launch_timestamp="$(logcat_save_timestamp)"
# TODO: multiple metrics output.
total_time="$(timeout $timeout $DIR/launch_application "$package" "$activity")"
total_time="$(timeout $timeout "$DIR/launch_application" "$package" "$activity" | "$DIR/parse_metrics" --package "$package" --activity "$activity" --timestamp "$pre_launch_timestamp" | parse_metrics_output)"
if [[ $? -ne 0 ]]; then
echo "WARNING: Skip bad result, try iteration again." >&2
@@ -353,22 +421,21 @@ for ((i=0;i<count;++i)) do
echo "Iteration $i. Total time was: $total_time"
timings_array+=($total_time)
timings_array+=("$total_time")
done
# drop the first result which is usually garbage.
timings_array=("${timings_array[@]:1}")
# Print the CSV header first.
echo_to_output_file "$metrics_header"
# Print out interactive/debugging timings and averages.
# Other scripts should use the --output flag and parse the CSV.
for tim in "${timings_array[@]}"; do
echo_to_output_file -ne "$tim,"
echo_to_output_file "$tim"
done
echo_to_output_file ""
average_string=$(echo "${timings_array[@]}" | awk '{s+=$0}END{print "Average:",s/NR}' RS=" ")
echo -ne ${average_string}.
if [[ x$output != x ]]; then
echo " Saved results to '$output'"
fi

View File

@@ -263,9 +263,19 @@ launch_application_and_wait_for_trace() {
# Ensure application isn't running already.
remote_pkill "$package"
# 5 second trace of Home screen causes
# a trace of the home screen.
# There is no way to abort the trace
# so just wait for it to complete instead.
sleep 30
local time_now="$(logcat_save_timestamp)"
local retcode=0
verbose_print "Drop caches for non-warm start."
# Drop all caches to get cold starts.
adb shell "echo 3 > /proc/sys/vm/drop_caches"
verbose_print "now launching application"
# Launch an application
"$APP_STARTUP_DIR"/launch_application "$package" "$activity"
@@ -317,6 +327,9 @@ collector_main() {
# Wait for perfetto trace to finished writing itself out.
launch_application_and_wait_for_trace "$package" "$activity" || return $?
# Pull the perfetto trace for manual inspection.
iorapd_perfetto_pull_trace_file "$package" "$activity" "perfetto_trace.pb"
# Compile the trace so that the next app run can use prefetching.
iorapd_compiler_for_app_trace "$package" "$activity" "$inodes" || return $?

View File

@@ -88,6 +88,26 @@ iorapd_perfetto_purge_app_trace() {
adb shell "[[ -f '$remote_path' ]] && rm -f '$remote_path' || exit 0"
}
# Pull the remote perfetto trace file into a local file.
iorapd_perfetto_pull_trace_file() {
local package="$1"
local activity="$2"
local output_file="$3" # local path
local compiled_path="$(_iorapd_path_to_data_file "$package" "$activity" "perfetto_trace.pb")"
if ! adb shell "[[ -f '$compiled_path' ]]"; then
echo "Error: Remote path '$compiled_path' invalid" >&2
return 1
fi
if ! mkdir -p "$(dirname "$output_file")"; then
echo "Error: Fail to make output directory for '$output_file'" >&2
return 1
fi
verbose_print adb pull "$compiled_path" "$output_file"
adb pull "$compiled_path" "$output_file"
}
# Compile a perfetto trace for a given application.
# This requires the app has run at least once with perfetto tracing enabled.
iorapd_compiler_for_app_trace() {
@@ -114,6 +134,7 @@ iorapd_compiler_for_app_trace() {
# inodes file needs to be on the device for iorap.cmd.compiler to access it
local remote_inodes=/data/local/tmp/prefetch/inodes.txt
adb shell "mkdir -p \"$(dirname "$remote_inodes")\"" || return 1
verbose_print adb push "$inodes" "$remote_inodes"
adb push "$inodes" "$remote_inodes"
verbose_print 'iorapd-compiler: compile app trace in ' "$input_path"
@@ -130,7 +151,7 @@ iorapd_compiler_for_app_trace() {
fi
# Clean up inodes file we just pushed.
adb shell "[[ -f '$remote_inodes' ]] && rm -f '$remote_inodes'"
# adb shell "[[ -f '$remote_inodes' ]] && rm -f '$remote_inodes'"
return $retcode
}