Merge "Translate Android log entries to stats_log proto Test: tested on local device"

This commit is contained in:
TreeHugger Robot
2017-09-19 08:14:55 +00:00
committed by Android (Google) Code Review
12 changed files with 311 additions and 501 deletions

View File

@@ -46,6 +46,7 @@ LOCAL_SRC_FILES := \
src/LogReader.cpp \
src/main.cpp \
src/DropboxWriter.cpp \
src/parse_util.cpp \
src/StatsLogProcessor.cpp \
src/stats_log.proto \
src/statsd_config.proto \

View File

@@ -15,9 +15,7 @@
*/
#include <android/os/DropBoxManager.h>
#include <android-base/file.h>
#include <cutils/log.h>
#include <androidfw/ZipUtils.h>
#include <stdio.h>
#include "DropboxReader.h"
@@ -25,14 +23,14 @@ using android::sp;
using android::String16;
using android::binder::Status;
using android::base::unique_fd;
using android::os::statsd::EventMetricData;
using android::os::DropBoxManager;
using android::os::statsd::StatsLogEntry;
using android::ZipUtils;
using std::vector;
status_t DropboxReader::readStatsLogs(FILE* out, const string& tag, long msec) {
sp<DropBoxManager> dropbox = new DropBoxManager();
StatsLogList logList;
StatsLogReport logReport;
long timestamp = msec;
// instead of while(true), put a hard limit 1000. Dropbox won't have more than 1000 files.
@@ -51,23 +49,23 @@ status_t DropboxReader::readStatsLogs(FILE* out, const string& tag, long msec) {
timestamp = entry.getTimestamp();
if (entry.getFlags() & DropBoxManager::IS_GZIPPED) {
if (!parseFromGzipFile(fd, logList)) {
if (!parseFromGzipFile(fd, logReport)) {
// Failed to parse from the file. Continue to fetch the next entry.
continue;
}
} else {
if (!parseFromFile(fd, logList)) {
if (!parseFromFile(fd, logReport)) {
// Failed to parse from the file. Continue to fetch the next entry.
continue;
}
}
printLog(out, logList);
printLog(out, logReport);
}
return android::OK;
}
bool DropboxReader::parseFromGzipFile(const unique_fd& fd, StatsLogList& list) {
bool DropboxReader::parseFromGzipFile(const unique_fd& fd, StatsLogReport& logReport) {
FILE *file = fdopen(fd, "r");
bool result = false;
bool scanResult;
@@ -80,7 +78,7 @@ bool DropboxReader::parseFromGzipFile(const unique_fd& fd, StatsLogList& list) {
if (scanResult && method == kCompressDeflated) {
vector<uint8_t> buf(uncompressedLen);
if (ZipUtils::inflateToBuffer(file, &buf[0], uncompressedLen, compressedLen)) {
if (list.ParseFromArray(&buf[0], uncompressedLen)) {
if (logReport.ParseFromArray(&buf[0], uncompressedLen)) {
result = true;
}
}
@@ -92,29 +90,30 @@ bool DropboxReader::parseFromGzipFile(const unique_fd& fd, StatsLogList& list) {
}
// parse a non zipped file.
bool DropboxReader::parseFromFile(const unique_fd& fd, StatsLogList& list) {
bool DropboxReader::parseFromFile(const unique_fd& fd, StatsLogReport& logReport) {
string content;
if (!android::base::ReadFdToString(fd, &content)) {
ALOGE("Failed to read file");
return false;
}
if (!list.ParseFromString(content)) {
if (!logReport.ParseFromString(content)) {
ALOGE("failed to parse log entry from data");
return false;
}
return true;
}
void DropboxReader::printLog(FILE* out, const StatsLogList& list) {
for (int i = 0; i < list.stats_log_entry_size(); i++) {
const StatsLogEntry entry = list.stats_log_entry(i);
// TODO: print pretty
fprintf(out, "time_msec=%lld, type=%d, aggregate_type=%d, uid=%d, pid=%d ",
entry.start_report_millis(), entry.type(), entry.aggregate_type(),
entry.uid(), entry.pid());
for (int j = 0; j < entry.pairs_size(); j++) {
fprintf(out, "msg=%s ", entry.pairs(j).value_str().c_str());
void DropboxReader::printLog(FILE* out, const StatsLogReport& logReport) {
fprintf(out, "start_time_msec=%lld, end_time_msec=%lld, ",
logReport.start_report_millis(), logReport.end_report_millis());
for (int i = 0; i < logReport.event_metrics().data_size(); i++) {
EventMetricData eventMetricData = logReport.event_metrics().data(i);
for (int j = 0; j < eventMetricData.key_value_pair_size(); j++) {
fprintf(out, "key=%d, ", eventMetricData.key_value_pair(j).key());
fprintf(out, "value_str=%s ", eventMetricData.key_value_pair(j).value_str().c_str());
fprintf(out, "value_int=%lld ", eventMetricData.key_value_pair(j).value_int());
fprintf(out, "value_float=%f ", eventMetricData.key_value_pair(j).value_float());
}
fprintf(out, "\n");
}
fprintf(out, "\n");
}

View File

@@ -23,7 +23,7 @@
#include <stdio.h>
using android::base::unique_fd;
using android::os::statsd::StatsLogList;
using android::os::statsd::StatsLogReport;
using android::status_t;
using std::string;
@@ -33,13 +33,13 @@ public:
static status_t readStatsLogs(FILE* out, const string& tag, long msec);
private:
static bool parseFromFile(const unique_fd& fd, StatsLogList& list);
static bool parseFromGzipFile(const unique_fd& fd, StatsLogList& list);
static void printLog(FILE* out, const StatsLogList& list);
static bool parseFromFile(const unique_fd& fd, StatsLogReport& logReport);
static bool parseFromGzipFile(const unique_fd& fd, StatsLogReport& logReport);
static void printLog(FILE* out, const StatsLogReport& logReport);
enum {
kCompressStored = 0, // no compression
kCompressDeflated = 8, // standard deflate
};
};
#endif //DROPBOX_READER_H
#endif //DROPBOX_READER_H

View File

@@ -15,7 +15,6 @@
*/
#include <android/os/DropBoxManager.h>
#include <cutils/log.h>
#include "DropboxWriter.h"
@@ -26,36 +25,35 @@ using android::String16;
using std::vector;
DropboxWriter::DropboxWriter(const string& tag)
: mTag(tag), mLogList(), mBufferSize(0) {
: mTag(tag), mLogReport(), mBufferSize(0) {
}
void DropboxWriter::addEntry(const StatsLogEntry& entry) {
flushIfNecessary(entry);
StatsLogEntry* newEntry = mLogList.add_stats_log_entry();
newEntry->CopyFrom(entry);
mBufferSize += entry.ByteSize();
void DropboxWriter::addStatsLogReport(const StatsLogReport& log) {
mLogReport = log;
flushIfNecessary(log);
mBufferSize += log.ByteSize();
}
void DropboxWriter::flushIfNecessary(const StatsLogEntry& entry) {
// The serialized size of the StatsLogList is approximately the sum of the serialized size of
// every StatsLogEntry inside it.
if (entry.ByteSize() + mBufferSize > kMaxSerializedBytes) {
flush();
}
void DropboxWriter::flushIfNecessary(const StatsLogReport& log) {
// TODO: Decide to flush depending on the serialized size of the StatsLogReport.
// if (entry.ByteSize() + mBufferSize > kMaxSerializedBytes) {
// flush();
// }
flush();
}
void DropboxWriter::flush() {
// now we get an exact byte size of the output
const int numBytes = mLogList.ByteSize();
const int numBytes = mLogReport.ByteSize();
vector<uint8_t> buffer(numBytes);
sp<DropBoxManager> dropbox = new DropBoxManager();
mLogList.SerializeToArray(&buffer[0], numBytes);
mLogReport.SerializeToArray(&buffer[0], numBytes);
Status status = dropbox->addData(String16(mTag.c_str()), &buffer[0],
numBytes, 0 /* no flag */);
if (!status.isOk()) {
ALOGE("failed to write to dropbox");
//TODO: What to do if flush fails??
}
mLogList.Clear();
mLogReport.Clear();
mBufferSize = 0;
}

View File

@@ -20,8 +20,7 @@
#include <frameworks/base/cmds/statsd/src/stats_log.pb.h>
using std::string;
using android::os::statsd::StatsLogEntry;
using android::os::statsd::StatsLogList;
using android::os::statsd::StatsLogReport;
class DropboxWriter {
public:
@@ -30,7 +29,7 @@ public:
*/
DropboxWriter(const string& tag);
void addEntry(const StatsLogEntry& entry);
void addStatsLogReport(const StatsLogReport& log);
/* Request a flush to dropbox. */
void flush();
@@ -46,11 +45,11 @@ private:
const string mTag;
/* StatsLogList is a wrapper for storing a list of StatsLogEntry */
StatsLogList mLogList;
/* Data that was captured for a single metric over a given interval of time. */
StatsLogReport mLogReport;
/* Current *serialized* size of the logs kept in memory.
To save computation, we will not calculate the size of the StatsLogList every time when a new
To save computation, we will not calculate the size of the StatsLogReport every time when a new
entry is added, which would recursively call ByteSize() on every log entry. Instead, we keep
the sum of all individual stats log entry sizes. The size of a proto is approximately the sum
of the size of all member protos.
@@ -59,7 +58,7 @@ private:
/* Check if the buffer size exceeds the max buffer size when the new entry is added, and flush
the logs to dropbox if true. */
void flushIfNecessary(const StatsLogEntry& entry);
void flushIfNecessary(const StatsLogReport& log);
};

View File

@@ -16,14 +16,13 @@
#include <StatsLogProcessor.h>
#include <log/event_tag_map.h>
#include <log/logprint.h>
#include <log/log_event_list.h>
#include <utils/Errors.h>
#include <cutils/log.h>
#include <frameworks/base/cmds/statsd/src/stats_log.pb.h>
#include <parse_util.h>
using namespace android;
using android::os::statsd::StatsLogEntry;
using android::os::statsd::EventMetricData;
using android::os::statsd::StatsLogReport;
StatsLogProcessor::StatsLogProcessor() : m_dropbox_writer("all-logs")
{
@@ -57,12 +56,12 @@ StatsLogProcessor::OnLogEvent(const log_msg& msg)
// dump all statsd logs to dropbox for now.
// TODO: Add filtering, aggregation, etc.
if (err == NO_ERROR) {
StatsLogEntry logEntry;
logEntry.set_uid(entry.uid);
logEntry.set_pid(entry.pid);
logEntry.set_start_report_millis(entry.tv_sec / 1000 + entry.tv_nsec / 1000 / 1000);
logEntry.add_pairs()->set_value_str(entry.message, entry.messageLen);
m_dropbox_writer.addEntry(logEntry);
StatsLogReport logReport;
logReport.set_start_report_millis(entry.tv_sec / 1000 + entry.tv_nsec / 1000 / 1000);
EventMetricData *eventMetricData = logReport.mutable_event_metrics()->add_data();
*eventMetricData = parse(msg);
m_dropbox_writer.addStatsLogReport(logReport);
}
}
@@ -71,4 +70,4 @@ StatsLogProcessor::UpdateConfig(const int config_source, StatsdConfig config)
{
m_configs[config_source] = config;
ALOGD("Updated configuration for source %i", config_source);
}
}

View File

@@ -16,12 +16,8 @@
#ifndef STATS_LOG_PROCESSOR_H
#define STATS_LOG_PROCESSOR_H
#include "LogReader.h"
#include "DropboxWriter.h"
#include "parse_util.h"
#include <frameworks/base/cmds/statsd/src/statsd_config.pb.h>
#include <log/logprint.h>
#include <stdio.h>
#include <unordered_map>
using android::os::statsd::StatsdConfig;

View File

@@ -0,0 +1,113 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <parse_util.h>
#include <log/log_event_list.h>
using android::os::statsd::EventMetricData;
using android::os::statsd::KeyId;
using android::os::statsd::KeyId_IsValid;
using android::os::statsd::KeyValuePair;
using android::os::statsd::TagId;
using android::os::statsd::TagId_IsValid;
EventMetricData parse(log_msg msg)
{
// dump all statsd logs to dropbox for now.
// TODO: Add filtering, aggregation, etc.
EventMetricData eventMetricData;
android_log_context context = create_android_log_parser(const_cast<log_msg*>(&msg)->msg()
+ sizeof(uint32_t),
const_cast<log_msg*>(&msg)->len()
- sizeof(uint32_t));
android_log_list_element elem;
if (context) {
memset(&elem, 0, sizeof(elem));
size_t index = 0;
int32_t key = -1;
int32_t tag = -1;
do {
elem = android_log_read_next(context);
switch ((int)elem.type) {
case EVENT_TYPE_INT:
if (index == 0) {
tag = elem.data.int32;
if (TagId_IsValid(tag)) {
eventMetricData.set_tag(static_cast<TagId>(tag));
} else {
break;
}
} else if (index % 2 == 1) {
key = elem.data.int32;
} else if (KeyId_IsValid(key)) {
int32_t val = elem.data.int32;
KeyValuePair *keyValuePair = eventMetricData.add_key_value_pair();
keyValuePair->set_key(static_cast<KeyId>(key));
keyValuePair->set_value_int(val);
} else {
}
index++;
break;
case EVENT_TYPE_FLOAT:
if (index % 2 == 0 && KeyId_IsValid(key)) {
float val = elem.data.float32;
KeyValuePair *keyValuePair = eventMetricData.add_key_value_pair();
keyValuePair->set_key(static_cast<KeyId>(key));
keyValuePair->set_value_float(val);
}
index++;
break;
case EVENT_TYPE_STRING:
if (index % 2 == 0 && KeyId_IsValid(key)) {
char* val = elem.data.string;
KeyValuePair *keyValuePair = eventMetricData.add_key_value_pair();
keyValuePair->set_key(static_cast<KeyId>(key));
keyValuePair->set_value_str(val);
}
index++;
break;
case EVENT_TYPE_LONG:
if (index % 2 == 0 && KeyId_IsValid(key)) {
int64_t val = elem.data.int64;
KeyValuePair *keyValuePair = eventMetricData.add_key_value_pair();
keyValuePair->set_key(static_cast<KeyId>(key));
keyValuePair->set_value_int(val);
}
index++;
break;
case EVENT_TYPE_LIST:
break;
case EVENT_TYPE_LIST_STOP:
break;
case EVENT_TYPE_UNKNOWN:
break;
default:
elem.complete = true;
break;
}
if (elem.complete) {
break;
}
} while ((elem.type != EVENT_TYPE_UNKNOWN) && !elem.complete);
android_log_destroy(&context);
}
return eventMetricData;
}

View File

@@ -0,0 +1,28 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PARSE_UTIL_H
#define PARSE_UTIL_H
#include "LogReader.h"
#include "DropboxWriter.h"
#include <log/logprint.h>
using android::os::statsd::EventMetricData;
EventMetricData parse(const log_msg msg);
#endif // PARSE_UTIL_H

View File

@@ -22,10 +22,17 @@ option optimize_for = LITE_RUNTIME;
option java_package = "com.android.internal.logging";
option java_outer_classname = "StatsConstantsProto";
message StatsConstants {
// Event type.
enum Type {
WAKELOCK = 1;
SCREEN= 2;
}
enum TagId {
WAKELOCK = 1;
SCREEN = 1003;
}
enum KeyId {
STATE = 1;
ANOTHER_STATE = 2;
EVENT_TIMESTAMP = 1001;
PACKAGE_NAME = 1002;
PACKAGE_VERSION = 1003;
PACKAGE_VERSION_STRING = 1004;
ATTRIBUTION_CHAIN = 1005;
}

View File

@@ -25,53 +25,53 @@ option java_outer_classname = "StatsLog";
import "frameworks/base/cmds/statsd/src/statsd_config.proto";
import "frameworks/base/cmds/statsd/src/stats_constants.proto";
// StatsLogEntry is a generic proto holding a single metrics data.
message StatsLogEntry {
// Type of stats.
optional android.os.statsd.StatsConstants.Type type = 1;
message KeyValuePair {
optional KeyId key = 1;
// Aggregation type of the data.
optional android.os.statsd.TrackedAggregateType aggregate_type = 2;
// Start timestamp of the interval. Timestamp for event-type data will have
// equal value for start_report_millis and end_report_millis.
optional int64 start_report_millis = 3;
// End timestamp of the interval.
optional int64 end_report_millis = 4;
// Package information for application-level data.
optional string package_name = 5;
optional int32 package_version = 6;
optional string package_version_string = 7;
// UID associated with the data.
optional int32 uid = 8;
// PID associated with the data.
optional int32 pid = 9;
// Payload contains key value pairs of the data from statsd.
message KeyValuePair {
// Integer representation of data type.
optional int32 key = 1;
oneof value {
string value_str = 2;
int64 value_int = 3;
bool value_bool = 4;
}
oneof value {
string value_str = 2;
int64 value_int = 3;
bool value_bool = 4;
float value_float = 5;
}
repeated KeyValuePair pairs = 10;
// Next tag: 11
}
// Data captured for a given metric during a given period of time.
message StatsLogList {
// Unique ID for this metric.
message EventMetricData {
optional TagId tag = 1;
repeated KeyValuePair key_value_pair = 2;
}
message CountBucketInfo {
optional int64 start_bucket_millis = 1;
optional int64 end_bucket_millis = 2;
optional int64 count = 3;
}
message CountMetricData {
repeated KeyValuePair dimension = 1;
repeated CountBucketInfo bucket_info = 2;
}
message StatsLogReport {
optional int32 metric_id = 1;
// List of stats log entry.
repeated StatsLogEntry stats_log_entry = 2;
optional int64 start_report_millis = 2;
optional int64 end_report_millis = 3;
message EventMetricDataWrapper {
repeated EventMetricData data = 1;
}
message CountMetricDataWrapper {
repeated CountMetricData data = 1;
}
oneof data {
EventMetricDataWrapper event_metrics = 4;
CountMetricDataWrapper count_metrics = 5;
}
}

View File

@@ -1,40 +1,3 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Version 1.
// Important: Update the version line above before copy-pasting this file
// from/to Google3 and Android repository.
// This proto needs to be manually synced between Google3 and Android versions.
/*
* Note about semantics of the buckets:
* In this current proto scheme, the buckets are updated only when an event
* occurs. In the case of durations, this means that we update at the end of a
* duration.
*
* For example, suppose we have buckets at every 10 min:
* 0, 10, 20, 30, 40, etc.
* And then suppose a wakelock is first held starting at min 5 and lasts for 21
* mins. Then the buckets for 0-10 and 10-20 don't contain anything and inside
* the bucket for 20-30, we add the value of 21 minutes.
*
* Also note that buckets are only aligned to wall-clock (no custom time-bases).
*/
syntax = "proto2";
package android.os.statsd;
@@ -43,30 +6,21 @@ option optimize_for = LITE_RUNTIME;
option java_package = "com.android.internal.os";
option java_outer_classname = "StatsdConfigProto";
// KeyMatcher specifies how to match the key.
message KeyMatcher {
oneof contents {
int32 key = 1; // ID of the key to match.
import "frameworks/base/cmds/statsd/src/stats_constants.proto";
// Special matcher for package name. This will match either the package name
// or the UID (statsD will map the UID of the source event to a package
// name). Specify the package name to match in eq_string.
bool use_package = 2;
}
message KeyMatcher {
optional KeyId key = 1;
optional bool as_package_name = 2 [ default = false ];
}
// FieldMatcher allows us to match specific fields/keys in an event.
message FieldMatcher {
message KeyValueMatcher {
optional KeyMatcher key_matcher = 1;
oneof value_matcher {
// Equality matchers
bool eq_bool = 2;
string eq_string = 3;
int32 eq_int32 = 4;
int64 eq_int64 = 5;
// Numeric comparisons;
int32 lt_int32 = 6;
int32 gt_int32 = 7;
int64 lt_int64 = 8;
@@ -76,374 +30,90 @@ message FieldMatcher {
}
}
enum OperationType {
enum LogicalOperation {
AND = 1;
OR = 2;
NOT = 3; // Must have only a single operand when using NOT operator.
NAND = 4; // NAND and NOR as conveniences to avoid NOT+(AND/OR)-layers.
NOT = 3;
NAND = 4;
NOR = 5;
}
enum TrackedAggregateType {
// IS_RUNNING; // whether it is currently running
VALUE_COUNT = 1; // count number of events
VALUE_SUM = 2;
VALUE_MAX = 3;
VALUE_MIN = 4;
DURATION_SUM = 5; // cumulative total time
DURATION_MAX = 6; // longest continuously-on time
DURATION_MIN = 7; // shortest continuously-on time
//DURATION_CURRENT = 6; // current continuously-on time (not bucketed)
message SimpleLogEntryMatcher {
repeated TagId tag = 1;
repeated KeyValueMatcher key_value_matcher = 2;
}
// Assume the events come in with a tag and an array of (key, value) tuples
// where the key must be an int32 and value can be any type.
message LineMatcher {
// For now, we assume that we don't flatten the tags (ie, one tag corresponds
// to screen-on and screen-off events and key 1 represents ON or OFF).
repeated int32 tag = 1; // Must match at least one of the tags.
message LogEntryMatcher {
optional string name = 1;
message Nested {
optional OperationType operation = 1;
repeated LineMatcher matcher = 2;
message Combination {
optional LogicalOperation operation = 1;
repeated LogEntryMatcher matcher = 2;
}
oneof contents {
FieldMatcher requirement = 2;
Nested nested = 3;
SimpleLogEntryMatcher simple_log_entry_matcher = 2;
Combination combination = 3;
}
}
// Defines when an AggregateCounter or EventMatcher applies.
message SimpleCondition {
optional string start = 1;
optional string stop = 2;
optional bool count_nesting = 3 [default = true];
optional string stop_all = 4;
}
message Condition {
message Nested {
optional OperationType operation = 1;
repeated Condition nested_conditions = 2; // operands that are themselves
// conditions (recursively)
}
optional string name = 1;
// Leaf node of condition.
message RangeMatcher {
optional LineMatcher start = 1;
optional LineMatcher stop = 2;
optional bool count_nesting = 3
[default = true]; // true if "start start stop" is still
// considered running
message Combination {
optional LogicalOperation operation = 1;
// Configure which fields define the slices. These fields must be present in
// both the start and stop lines. Note that this can be a subset of all the
// slices defined in the AggregateCounter.
// For example, if the counter slices on both app name and wake lock name,
// we can define that this range only slices on app name.
repeated KeyMatcher slice = 4;
repeated string condition = 2;
}
oneof contents {
RangeMatcher range = 1; // Represents a leaf node.
Nested nested = 2; // Represents a non-leaf node.
SimpleCondition simple_condition = 2;
Combination combination = 3;
}
}
// Emits matching events to statsd event buffer.
message EventMatcher {
// Tracks what configuration led to uploading of this event.
optional int32 metric_id = 1;
// LineMatcher for the event to emit.
optional LineMatcher what = 2;
optional Condition condition = 3;
// TODO: Have a clear use-case for this in P or-else drop this for P.
message Filter {
}
optional Filter filter = 4;
message Bucket {
optional int64 bucket_size_millis = 1;
}
// Hard-code the possible metrics that we can pull.
// For example, NETSTATS_BY_UID would provide network usage per uid.
// We should treat the results like a batch of individual log events, and we
// should process them one-by-one to re-use our LineMatcher logic.
enum PulledMetricSource {
NETSTATS = 1;
message EventMetric {
optional int64 metric_id = 1;
optional string what = 2;
optional string condition = 3;
}
message AggregateCounter { // previously called Timer
// Specifies which fields in the message act as dimensions.
// For both pushed and pulled metrics, we assume every record has all the
// dimensions set.
message Slicer {
repeated KeyMatcher keys = 1;
}
optional Slicer slicer = 1;
message CountMetric {
optional int64 metric_id = 1;
message ValueSource {
message PushedMetric {
// LineMatcher for the event to apply.
// Slicing (which keys act as dimensions) should not be specified here.
optional LineMatcher what = 1;
optional string what = 2;
// Only needed if one key should be treated as the value.
optional int32 value_key = 2;
}
optional string condition = 3;
// The values for pulled metrics are computed and aggregated at the end of
// the condition.
message PulledMetric {
optional bool compute_diff =
1; // If we want the diff (if this
// metric is pulled when condition opens/closes).
optional PulledMetricSource metric = 2;
repeated KeyMatcher dimension = 4;
// We treat the pulled metrics as a batch of log-records that look like
// they came from LogD.
optional LineMatcher what = 3;
optional int32 value_field = 4;
}
oneof value {
PushedMetric pushed_metric = 1;
// Pulled metrics are computed when the duration closes (and are also
// fetched at the open if we need to compute a diff).
// Pulled metrics require a condition being defined.
// These metrics are not pulled at the end of every bucket.
PulledMetric pulled_metric = 2;
// Polled Metrics are pulled at the end of every bucket.
// Since the buckets are only planned to be on wall-clock for Android P,
// condition is NOT supported for polled metrics.
PulledMetric polled_metric = 3;
}
}
optional ValueSource value = 2;
message TrackedAggregate {
// Must be an integer that is uniquely chosen so we can identify the metric
// on server. We will provide a tool on server to help generate this.
optional int32 metric_id = 1;
optional TrackedAggregateType type = 2;
// Alert if the value, when summed over the Counter's number_of_buckets
// most-recent bins, exceeds min_threshold or is below max_threshold. For
// Anomaly Detection.
message Alert {
message IncidentdDetails {
optional string
alert_name = 1; // for humans and incidentd to identify this issue
repeated int32 incidentd_sections = 2; // tells incidentd what to do if
// alert triggers
}
optional IncidentdDetails incidentd_details = 1;
optional int32 number_of_buckets = 2;
// NOTE: that we assume the aggregate is only int.
optional int64 trigger_if_gt = 3; // min threshold
optional int64 trigger_if_lt = 4; // max_threshold;
optional int32 refractory_period_secs = 5; // alarm cannot fire a second
// time until elapsed
}
repeated Alert alerts = 3; // Support diff alert params for same aggregate.
} // end TrackedAggregate
repeated TrackedAggregate tracked_aggregates = 3;
optional Condition condition = 4;
message Bucket {
// TODO: Consider switching to second granularity.
// In practice, this must be chosen from a pre-defined list. So that we have
// flexiblity, we don't hard-code this as an enum today.
optional int64 bucket_size_msec = 1;
optional int32 max_number_of_bits = 2; // Max bits per bucket.
}
optional Bucket bucket = 5;
message MiscellaneousEffect {
optional LineMatcher matcher = 1; // When to trigger the effect
enum Effect {
STOP_ALL = 1; // Needed for stop-all events, where nested start value is
// forced to 0.
}
repeated Effect effects = 2;
} // end MiscellaneousEffect
repeated MiscellaneousEffect misc_effects = 6;
} // end Counter
// Alarm configs not tied to a particular Counter.
message GlobalAlertParameters {
// No alarm can fire after any other alarm fired until this many seconds has
// elapsed.
optional int32 global_refractory_period_seconds = 1;
}
// The config defining all metrics to be captured.
message StatsdConfig {
// Event matchers.
repeated EventMatcher event_matchers = 1;
optional int64 config_id = 1;
// Aggregate counters.
repeated AggregateCounter aggregate_counters = 2;
repeated EventMetric event_metric = 2;
repeated CountMetric count_metric = 3;
repeated LogEntryMatcher log_entry_matcher = 4;
repeated Condition condition = 5;
}
/* Sample configurations start here:
----Screen on time----
AggregateCounter <
condition <
range <
start <
tag: SCREEN_ON
requirement <
key_matcher<
key: SCREEN_ON_VALUE
eq_bool: true
stop <
tag: SCREEN_ON
requirement <
key_matcher<
key: SCREEN_ON_VALUE
eq_bool: false
metric_id: # set on server
tracked_aggregates <
DURATION_SUM
(For brevity, omit the bucket options but they can also be set)
----Screen off time----
Should be like aboe but reversing start and stop
----Log the screen change events----
EventMatcher <
metric_id: # set on server
what <
tag: SCREEN_ON
----Number of crashes (across system)----
AggregateCounter <
metric_id: # set on server
tracked_aggregates <
VALUE_COUNT
value <
pushed_metric <
what <
tag: CRASH_TAG
----Network Usage in bytes Per App While in Background----
AggregateCounter <
metric_id: # set on server
slicer <
keys <
use_package_name: true
tracked_aggregates <
VALUE_SUM
value <
pulled_metric <
compute_diff: true
metric: Enum corresponding to network usage in bytes
condition <
range <
sliced: true
start <
tag: APP_FOREGROUND_TRANSITION (assume false means move to background)
requirement <
key_matcher<
key: APP_FOREGROUND_STATE
eq_bool: false
stop <
tag: APP_FOREGROUND_TRANSITION (assume false means move to background)
requirement <
key_matcher<
key: APP_FOREGROUND_STATE
eq_bool: true
----Wakelock Acquire time per app and wakelock
while unplugged and screen off and in background process state----
AggregateCounter <
metric_id: # set on server
slicer <
keys <
use_package_name: true
keys <
key: Key corresponding to wake_lock ID
tracked_aggregates <
DURATION_SUM
condition <
nested <
operation: AND
nested_conditions <
range <
start <
tag: PLUGGED_IN (assume false means uncharged)
requirement <
key_matcher<
key: PLUGGED_IN_STATE
eq_bool: false
stop <
tag: PLUGGED_IN (assume false means uncharged)
requirement <
key_matcher<
key: PLUGGED_IN_STATE
eq_bool: true
nested_conditions <
range <
start <
tag: SCREEN_ON
requirement <
key_matcher<
key: SCREEN_ON_STATE
eq_bool: false
stop <
tag: SCREEN_ON
requirement <
key_matcher<
key: SCREEN_ON_STATE
eq_bool: true
nested_conditions <
range <
start <
tag: PROCESS_CHANGE
requirement <
key_matcher<
key: PROCESS_STATE_VALUE
eq_int32: BACKGROUND_PROCESS
stop <
tag: PROCESS_CHANGE
nested <
operation: NOT
matcher< (This is an example of using the NOT to define stop)
requirement < (Note this requirement should match the start.)
key_matcher<
key: PROCESS_STATE_VALUE
eq_int32: BACKGROUND_PROCESS
slice<
use_package_name: true
----Number of crashes (per app) ----
AggregateCounter <
metric_id: # set on server
slicer <
keys<
use_package_name: true
tracked_aggregates <
VALUE_COUNT
value <
pushed_metric <
what <
tag: CRASH_TAG
---- Number of transitions to background (per app) ----
AggregateCounter <
metric_id: # set on server
slicer <
keys<
use_package_name: true
tracked_aggregates <
VALUE_COUNT
value <
pushed_metric <
what <
tag: APP_FOREGROUND_TRANSITION
requirement<
key: APP_FOREGROUND_TRANSITION_STATE
eq_bool: false
*/