Merge "Statsd config TTL" into pi-dev
This commit is contained in:
committed by
Android (Google) Code Review
commit
48920ab48d
@@ -209,7 +209,8 @@ LOCAL_SRC_FILES := \
|
||||
tests/e2e/DimensionInCondition_e2e_combination_OR_cond_test.cpp \
|
||||
tests/e2e/DimensionInCondition_e2e_simple_cond_test.cpp \
|
||||
tests/e2e/Anomaly_count_e2e_test.cpp \
|
||||
tests/e2e/Anomaly_duration_sum_e2e_test.cpp
|
||||
tests/e2e/Anomaly_duration_sum_e2e_test.cpp \
|
||||
tests/e2e/ConfigTtl_e2e_test.cpp
|
||||
|
||||
LOCAL_STATIC_LIBRARIES := \
|
||||
$(statsd_common_static_libraries) \
|
||||
|
||||
@@ -366,7 +366,7 @@ sp<StatsLogProcessor> CreateStatsLogProcessor(const long timeBaseSec, const Stat
|
||||
sp<AlarmMonitor> periodicAlarmMonitor;
|
||||
sp<StatsLogProcessor> processor = new StatsLogProcessor(
|
||||
uidMap, anomalyAlarmMonitor, periodicAlarmMonitor, timeBaseSec, [](const ConfigKey&){});
|
||||
processor->OnConfigUpdated(0, key, config);
|
||||
processor->OnConfigUpdated(timeBaseSec * NS_PER_SEC, key, config);
|
||||
return processor;
|
||||
}
|
||||
|
||||
|
||||
@@ -65,6 +65,7 @@ const int FIELD_ID_CURRENT_REPORT_ELAPSED_NANOS = 4;
|
||||
const int FIELD_ID_LAST_REPORT_WALL_CLOCK_NANOS = 5;
|
||||
const int FIELD_ID_CURRENT_REPORT_WALL_CLOCK_NANOS = 6;
|
||||
|
||||
#define NS_PER_HOUR 3600 * NS_PER_SEC
|
||||
|
||||
#define STATS_DATA_DIR "/data/misc/stats-data"
|
||||
|
||||
@@ -85,7 +86,7 @@ StatsLogProcessor::~StatsLogProcessor() {
|
||||
}
|
||||
|
||||
void StatsLogProcessor::onAnomalyAlarmFired(
|
||||
const uint64_t& timestampNs,
|
||||
const int64_t& timestampNs,
|
||||
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> alarmSet) {
|
||||
std::lock_guard<std::mutex> lock(mMetricsMutex);
|
||||
for (const auto& itr : mMetricsManagers) {
|
||||
@@ -93,7 +94,7 @@ void StatsLogProcessor::onAnomalyAlarmFired(
|
||||
}
|
||||
}
|
||||
void StatsLogProcessor::onPeriodicAlarmFired(
|
||||
const uint64_t& timestampNs,
|
||||
const int64_t& timestampNs,
|
||||
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> alarmSet) {
|
||||
|
||||
std::lock_guard<std::mutex> lock(mMetricsMutex);
|
||||
@@ -156,10 +157,14 @@ void StatsLogProcessor::onIsolatedUidChangedEventLocked(const LogEvent& event) {
|
||||
|
||||
void StatsLogProcessor::OnLogEvent(LogEvent* event) {
|
||||
std::lock_guard<std::mutex> lock(mMetricsMutex);
|
||||
if (event->GetElapsedTimestampNs() < mLastLogTimestamp) {
|
||||
const int64_t currentTimestampNs = event->GetElapsedTimestampNs();
|
||||
if (currentTimestampNs < mLastLogTimestamp) {
|
||||
return;
|
||||
}
|
||||
mLastLogTimestamp = event->GetElapsedTimestampNs();
|
||||
|
||||
resetIfConfigTtlExpiredLocked(currentTimestampNs);
|
||||
|
||||
mLastLogTimestamp = currentTimestampNs;
|
||||
StatsdStats::getInstance().noteAtomLogged(
|
||||
event->GetTagId(), event->GetElapsedTimestampNs() / NS_PER_SEC);
|
||||
|
||||
@@ -173,12 +178,13 @@ void StatsLogProcessor::OnLogEvent(LogEvent* event) {
|
||||
return;
|
||||
}
|
||||
|
||||
uint64_t curTimeSec = getElapsedRealtimeSec();
|
||||
int64_t curTimeSec = getElapsedRealtimeSec();
|
||||
if (curTimeSec - mLastPullerCacheClearTimeSec > StatsdStats::kPullerCacheClearIntervalSec) {
|
||||
mStatsPullerManager.ClearPullerCacheIfNecessary(curTimeSec * NS_PER_SEC);
|
||||
mLastPullerCacheClearTimeSec = curTimeSec;
|
||||
}
|
||||
|
||||
|
||||
if (event->GetTagId() != android::util::ISOLATED_UID_CHANGED) {
|
||||
// Map the isolated uid to host uid if necessary.
|
||||
mapIsolatedUidToHostUidIfNecessaryLocked(event);
|
||||
@@ -194,6 +200,11 @@ void StatsLogProcessor::OnLogEvent(LogEvent* event) {
|
||||
void StatsLogProcessor::OnConfigUpdated(const int64_t timestampNs, const ConfigKey& key,
|
||||
const StatsdConfig& config) {
|
||||
std::lock_guard<std::mutex> lock(mMetricsMutex);
|
||||
OnConfigUpdatedLocked(timestampNs, key, config);
|
||||
}
|
||||
|
||||
void StatsLogProcessor::OnConfigUpdatedLocked(
|
||||
const int64_t timestampNs, const ConfigKey& key, const StatsdConfig& config) {
|
||||
VLOG("Updated configuration for key %s", key.ToString().c_str());
|
||||
sp<MetricsManager> newMetricsManager =
|
||||
new MetricsManager(key, config, mTimeBaseSec, (timestampNs - 1) / NS_PER_SEC + 1, mUidMap,
|
||||
@@ -206,6 +217,7 @@ void StatsLogProcessor::OnConfigUpdated(const int64_t timestampNs, const ConfigK
|
||||
// not safe to create wp or sp from this pointer inside its constructor.
|
||||
mUidMap->addListener(newMetricsManager.get());
|
||||
}
|
||||
newMetricsManager->refreshTtl(timestampNs);
|
||||
mMetricsManagers[key] = newMetricsManager;
|
||||
VLOG("StatsdConfig valid");
|
||||
} else {
|
||||
@@ -235,7 +247,7 @@ void StatsLogProcessor::dumpStates(FILE* out, bool verbose) {
|
||||
/*
|
||||
* onDumpReport dumps serialized ConfigMetricsReportList into outData.
|
||||
*/
|
||||
void StatsLogProcessor::onDumpReport(const ConfigKey& key, const uint64_t dumpTimeStampNs,
|
||||
void StatsLogProcessor::onDumpReport(const ConfigKey& key, const int64_t dumpTimeStampNs,
|
||||
vector<uint8_t>* outData) {
|
||||
std::lock_guard<std::mutex> lock(mMetricsMutex);
|
||||
|
||||
@@ -290,7 +302,7 @@ void StatsLogProcessor::onDumpReport(const ConfigKey& key, const uint64_t dumpTi
|
||||
* onConfigMetricsReportLocked dumps serialized ConfigMetricsReport into outData.
|
||||
*/
|
||||
void StatsLogProcessor::onConfigMetricsReportLocked(const ConfigKey& key,
|
||||
const uint64_t dumpTimeStampNs,
|
||||
const int64_t dumpTimeStampNs,
|
||||
ProtoOutputStream* proto) {
|
||||
// We already checked whether key exists in mMetricsManagers in
|
||||
// WriteDataToDisk.
|
||||
@@ -317,7 +329,29 @@ void StatsLogProcessor::onConfigMetricsReportLocked(const ConfigKey& key,
|
||||
proto->write(FIELD_TYPE_INT64 | FIELD_ID_CURRENT_REPORT_WALL_CLOCK_NANOS,
|
||||
(long long)getWallClockNs());
|
||||
|
||||
}
|
||||
|
||||
void StatsLogProcessor::resetIfConfigTtlExpiredLocked(const int64_t timestampNs) {
|
||||
std::vector<ConfigKey> configKeysTtlExpired;
|
||||
for (auto it = mMetricsManagers.begin(); it != mMetricsManagers.end(); it++) {
|
||||
if (it->second != nullptr && !it->second->isInTtl(timestampNs)) {
|
||||
configKeysTtlExpired.push_back(it->first);
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto& key : configKeysTtlExpired) {
|
||||
StatsdConfig config;
|
||||
if (StorageManager::readConfigFromDisk(key, &config)) {
|
||||
OnConfigUpdatedLocked(timestampNs, key, config);
|
||||
StatsdStats::getInstance().noteConfigReset(key);
|
||||
} else {
|
||||
ALOGE("Failed to read backup config from disk for : %s", key.ToString().c_str());
|
||||
auto it = mMetricsManagers.find(key);
|
||||
if (it != mMetricsManagers.end()) {
|
||||
it->second->refreshTtl(timestampNs);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void StatsLogProcessor::OnConfigRemoved(const ConfigKey& key) {
|
||||
@@ -337,7 +371,7 @@ void StatsLogProcessor::OnConfigRemoved(const ConfigKey& key) {
|
||||
}
|
||||
|
||||
void StatsLogProcessor::flushIfNecessaryLocked(
|
||||
uint64_t timestampNs, const ConfigKey& key, MetricsManager& metricsManager) {
|
||||
int64_t timestampNs, const ConfigKey& key, MetricsManager& metricsManager) {
|
||||
auto lastCheckTime = mLastByteSizeTimes.find(key);
|
||||
if (lastCheckTime != mLastByteSizeTimes.end()) {
|
||||
if (timestampNs - lastCheckTime->second < StatsdStats::kMinByteSizeCheckPeriodNs) {
|
||||
|
||||
@@ -48,16 +48,16 @@ public:
|
||||
|
||||
size_t GetMetricsSize(const ConfigKey& key) const;
|
||||
|
||||
void onDumpReport(const ConfigKey& key, const uint64_t dumpTimeNs, vector<uint8_t>* outData);
|
||||
void onDumpReport(const ConfigKey& key, const int64_t dumpTimeNs, vector<uint8_t>* outData);
|
||||
|
||||
/* Tells MetricsManager that the alarms in alarmSet have fired. Modifies anomaly alarmSet. */
|
||||
void onAnomalyAlarmFired(
|
||||
const uint64_t& timestampNs,
|
||||
const int64_t& timestampNs,
|
||||
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> alarmSet);
|
||||
|
||||
/* Tells MetricsManager that the alarms in alarmSet have fired. Modifies periodic alarmSet. */
|
||||
void onPeriodicAlarmFired(
|
||||
const uint64_t& timestampNs,
|
||||
const int64_t& timestampNs,
|
||||
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> alarmSet);
|
||||
|
||||
/* Flushes data to disk. Data on memory will be gone after written to disk. */
|
||||
@@ -69,6 +69,7 @@ public:
|
||||
|
||||
void dumpStates(FILE* out, bool verbose);
|
||||
|
||||
|
||||
private:
|
||||
// For testing only.
|
||||
inline sp<AlarmMonitor> getAnomalyAlarmMonitor() const {
|
||||
@@ -96,12 +97,17 @@ private:
|
||||
|
||||
sp<AlarmMonitor> mPeriodicAlarmMonitor;
|
||||
|
||||
void onConfigMetricsReportLocked(const ConfigKey& key, const uint64_t dumpTimeStampNs,
|
||||
void resetIfConfigTtlExpiredLocked(const int64_t timestampNs);
|
||||
|
||||
void OnConfigUpdatedLocked(
|
||||
const int64_t currentTimestampNs, const ConfigKey& key, const StatsdConfig& config);
|
||||
|
||||
void onConfigMetricsReportLocked(const ConfigKey& key, const int64_t dumpTimeStampNs,
|
||||
util::ProtoOutputStream* proto);
|
||||
|
||||
/* Check if we should send a broadcast if approaching memory limits and if we're over, we
|
||||
* actually delete the data. */
|
||||
void flushIfNecessaryLocked(uint64_t timestampNs, const ConfigKey& key,
|
||||
void flushIfNecessaryLocked(int64_t timestampNs, const ConfigKey& key,
|
||||
MetricsManager& metricsManager);
|
||||
|
||||
// Maps the isolated uid in the log event to host uid if the log event contains uid fields.
|
||||
@@ -152,7 +158,9 @@ private:
|
||||
FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_single_bucket);
|
||||
FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_multiple_buckets);
|
||||
FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_long_refractory_period);
|
||||
|
||||
FRIEND_TEST(AlarmE2eTest, TestMultipleAlarms);
|
||||
FRIEND_TEST(ConfigTtlE2eTest, TestCountMetric);
|
||||
};
|
||||
|
||||
} // namespace statsd
|
||||
|
||||
@@ -677,7 +677,7 @@ Status StatsService::informAnomalyAlarmFired() {
|
||||
"Only system uid can call informAnomalyAlarmFired");
|
||||
}
|
||||
|
||||
uint64_t currentTimeSec = getElapsedRealtimeSec();
|
||||
int64_t currentTimeSec = getElapsedRealtimeSec();
|
||||
std::unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> alarmSet =
|
||||
mAnomalyAlarmMonitor->popSoonerThan(static_cast<uint32_t>(currentTimeSec));
|
||||
if (alarmSet.size() > 0) {
|
||||
@@ -698,7 +698,7 @@ Status StatsService::informAlarmForSubscriberTriggeringFired() {
|
||||
"Only system uid can call informAlarmForSubscriberTriggeringFired");
|
||||
}
|
||||
|
||||
uint64_t currentTimeSec = getElapsedRealtimeSec();
|
||||
int64_t currentTimeSec = getElapsedRealtimeSec();
|
||||
std::unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> alarmSet =
|
||||
mPeriodicAlarmMonitor->popSoonerThan(static_cast<uint32_t>(currentTimeSec));
|
||||
if (alarmSet.size() > 0) {
|
||||
|
||||
@@ -30,8 +30,8 @@ namespace android {
|
||||
namespace os {
|
||||
namespace statsd {
|
||||
|
||||
AlarmTracker::AlarmTracker(const uint64_t startMillis,
|
||||
const uint64_t currentMillis,
|
||||
AlarmTracker::AlarmTracker(const int64_t startMillis,
|
||||
const int64_t currentMillis,
|
||||
const Alarm& alarm, const ConfigKey& configKey,
|
||||
const sp<AlarmMonitor>& alarmMonitor)
|
||||
: mAlarmConfig(alarm),
|
||||
@@ -70,7 +70,7 @@ int64_t AlarmTracker::findNextAlarmSec(int64_t currentTimeSec) {
|
||||
}
|
||||
|
||||
void AlarmTracker::informAlarmsFired(
|
||||
const uint64_t& timestampNs,
|
||||
const int64_t& timestampNs,
|
||||
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& firedAlarms) {
|
||||
if (firedAlarms.empty() || mInternalAlarm == nullptr ||
|
||||
firedAlarms.find(mInternalAlarm) == firedAlarms.end()) {
|
||||
|
||||
@@ -34,8 +34,8 @@ namespace statsd {
|
||||
|
||||
class AlarmTracker : public virtual RefBase {
|
||||
public:
|
||||
AlarmTracker(const uint64_t startMillis,
|
||||
const uint64_t currentMillis,
|
||||
AlarmTracker(const int64_t startMillis,
|
||||
const int64_t currentMillis,
|
||||
const Alarm& alarm, const ConfigKey& configKey,
|
||||
const sp<AlarmMonitor>& subscriberAlarmMonitor);
|
||||
|
||||
@@ -45,12 +45,12 @@ public:
|
||||
|
||||
void addSubscription(const Subscription& subscription);
|
||||
|
||||
void informAlarmsFired(const uint64_t& timestampNs,
|
||||
void informAlarmsFired(const int64_t& timestampNs,
|
||||
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& firedAlarms);
|
||||
|
||||
protected:
|
||||
// For test only. Returns the alarm timestamp in seconds. Otherwise returns 0.
|
||||
inline uint32_t getAlarmTimestampSec() const {
|
||||
inline int32_t getAlarmTimestampSec() const {
|
||||
return mInternalAlarm == nullptr ? 0 : mInternalAlarm->timestampSec;
|
||||
}
|
||||
|
||||
|
||||
@@ -207,7 +207,7 @@ bool AnomalyTracker::detectAnomaly(const int64_t& currentBucketNum,
|
||||
getSumOverPastBuckets(key) + currentBucketValue > mAlert.trigger_if_sum_gt();
|
||||
}
|
||||
|
||||
void AnomalyTracker::declareAnomaly(const uint64_t& timestampNs, const MetricDimensionKey& key) {
|
||||
void AnomalyTracker::declareAnomaly(const int64_t& timestampNs, const MetricDimensionKey& key) {
|
||||
// TODO: Why receive timestamp? RefractoryPeriod should always be based on real time right now.
|
||||
if (isInRefractoryPeriod(timestampNs, key)) {
|
||||
VLOG("Skipping anomaly declaration since within refractory period");
|
||||
@@ -235,7 +235,7 @@ void AnomalyTracker::declareAnomaly(const uint64_t& timestampNs, const MetricDim
|
||||
mConfigKey.GetId(), mAlert.id());
|
||||
}
|
||||
|
||||
void AnomalyTracker::detectAndDeclareAnomaly(const uint64_t& timestampNs,
|
||||
void AnomalyTracker::detectAndDeclareAnomaly(const int64_t& timestampNs,
|
||||
const int64_t& currBucketNum,
|
||||
const MetricDimensionKey& key,
|
||||
const int64_t& currentBucketValue) {
|
||||
@@ -244,11 +244,11 @@ void AnomalyTracker::detectAndDeclareAnomaly(const uint64_t& timestampNs,
|
||||
}
|
||||
}
|
||||
|
||||
bool AnomalyTracker::isInRefractoryPeriod(const uint64_t& timestampNs,
|
||||
bool AnomalyTracker::isInRefractoryPeriod(const int64_t& timestampNs,
|
||||
const MetricDimensionKey& key) const {
|
||||
const auto& it = mRefractoryPeriodEndsSec.find(key);
|
||||
if (it != mRefractoryPeriodEndsSec.end()) {
|
||||
return timestampNs < it->second * NS_PER_SEC;
|
||||
return timestampNs < (it->second * (int64_t)NS_PER_SEC);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -67,13 +67,13 @@ public:
|
||||
const int64_t& currentBucketValue);
|
||||
|
||||
// Informs incidentd about the detected alert.
|
||||
void declareAnomaly(const uint64_t& timestampNs, const MetricDimensionKey& key);
|
||||
void declareAnomaly(const int64_t& timestampNs, const MetricDimensionKey& key);
|
||||
|
||||
// Detects if, based on past buckets plus the new currentBucketValue (which generally
|
||||
// represents the partially-filled current bucket), an anomaly has happened, and if so,
|
||||
// declares an anomaly and informs relevant subscribers.
|
||||
// Also advances to currBucketNum-1.
|
||||
void detectAndDeclareAnomaly(const uint64_t& timestampNs, const int64_t& currBucketNum,
|
||||
void detectAndDeclareAnomaly(const int64_t& timestampNs, const int64_t& currBucketNum,
|
||||
const MetricDimensionKey& key, const int64_t& currentBucketValue);
|
||||
|
||||
// Init the AlarmMonitor which is shared across anomaly trackers.
|
||||
@@ -107,7 +107,7 @@ public:
|
||||
|
||||
// Declares an anomaly for each alarm in firedAlarms that belongs to this AnomalyTracker,
|
||||
// and removes it from firedAlarms. Does NOT remove the alarm from the AlarmMonitor.
|
||||
virtual void informAlarmsFired(const uint64_t& timestampNs,
|
||||
virtual void informAlarmsFired(const int64_t& timestampNs,
|
||||
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& firedAlarms) {
|
||||
return; // The base AnomalyTracker class doesn't have alarms.
|
||||
}
|
||||
@@ -166,7 +166,7 @@ protected:
|
||||
void subtractValueFromSum(const MetricDimensionKey& key, const int64_t& bucketValue);
|
||||
|
||||
// Returns true if in the refractory period, else false.
|
||||
bool isInRefractoryPeriod(const uint64_t& timestampNs, const MetricDimensionKey& key) const;
|
||||
bool isInRefractoryPeriod(const int64_t& timestampNs, const MetricDimensionKey& key) const;
|
||||
|
||||
// Calculates the corresponding bucket index within the circular array.
|
||||
// Requires bucketNum >= 0.
|
||||
|
||||
@@ -36,7 +36,7 @@ DurationAnomalyTracker::~DurationAnomalyTracker() {
|
||||
}
|
||||
|
||||
void DurationAnomalyTracker::startAlarm(const MetricDimensionKey& dimensionKey,
|
||||
const uint64_t& timestampNs) {
|
||||
const int64_t& timestampNs) {
|
||||
// Alarms are stored in secs. Must round up, since if it fires early, it is ignored completely.
|
||||
uint32_t timestampSec = static_cast<uint32_t>((timestampNs -1) / NS_PER_SEC) + 1; // round up
|
||||
if (isInRefractoryPeriod(timestampNs, dimensionKey)) {
|
||||
@@ -57,14 +57,14 @@ void DurationAnomalyTracker::startAlarm(const MetricDimensionKey& dimensionKey,
|
||||
}
|
||||
|
||||
void DurationAnomalyTracker::stopAlarm(const MetricDimensionKey& dimensionKey,
|
||||
const uint64_t& timestampNs) {
|
||||
const int64_t& timestampNs) {
|
||||
const auto itr = mAlarms.find(dimensionKey);
|
||||
if (itr == mAlarms.end()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If the alarm is set in the past but hasn't fired yet (due to lag), catch it now.
|
||||
if (itr->second != nullptr && timestampNs >= NS_PER_SEC * itr->second->timestampSec) {
|
||||
if (itr->second != nullptr && timestampNs >= (int64_t)NS_PER_SEC * itr->second->timestampSec) {
|
||||
declareAnomaly(timestampNs, dimensionKey);
|
||||
}
|
||||
if (mAlarmMonitor != nullptr) {
|
||||
@@ -82,7 +82,7 @@ void DurationAnomalyTracker::cancelAllAlarms() {
|
||||
mAlarms.clear();
|
||||
}
|
||||
|
||||
void DurationAnomalyTracker::informAlarmsFired(const uint64_t& timestampNs,
|
||||
void DurationAnomalyTracker::informAlarmsFired(const int64_t& timestampNs,
|
||||
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& firedAlarms) {
|
||||
|
||||
if (firedAlarms.empty() || mAlarms.empty()) return;
|
||||
|
||||
@@ -34,12 +34,12 @@ public:
|
||||
|
||||
// Sets an alarm for the given timestamp.
|
||||
// Replaces previous alarm if one already exists.
|
||||
void startAlarm(const MetricDimensionKey& dimensionKey, const uint64_t& eventTime);
|
||||
void startAlarm(const MetricDimensionKey& dimensionKey, const int64_t& eventTime);
|
||||
|
||||
// Stops the alarm.
|
||||
// If it should have already fired, but hasn't yet (e.g. because the AlarmManager is delayed),
|
||||
// declare the anomaly now.
|
||||
void stopAlarm(const MetricDimensionKey& dimensionKey, const uint64_t& timestampNs);
|
||||
void stopAlarm(const MetricDimensionKey& dimensionKey, const int64_t& timestampNs);
|
||||
|
||||
// Stop all the alarms owned by this tracker. Does not declare any anomalies.
|
||||
void cancelAllAlarms();
|
||||
@@ -48,7 +48,7 @@ public:
|
||||
// and removes it from firedAlarms. The AlarmMonitor is not informed.
|
||||
// Note that this will generally be called from a different thread from the other functions;
|
||||
// the caller is responsible for thread safety.
|
||||
void informAlarmsFired(const uint64_t& timestampNs,
|
||||
void informAlarmsFired(const int64_t& timestampNs,
|
||||
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& firedAlarms) override;
|
||||
|
||||
protected:
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
#include "guardrail/StatsdStats.h"
|
||||
#include "stats_log_util.h"
|
||||
#include "stats_util.h"
|
||||
#include "stats_log_util.h"
|
||||
|
||||
#include <android-base/file.h>
|
||||
#include <dirent.h>
|
||||
@@ -112,7 +113,7 @@ void ConfigManager::UpdateConfig(const ConfigKey& key, const StatsdConfig& confi
|
||||
|
||||
const int64_t timestampNs = getElapsedRealtimeNs();
|
||||
// Tell everyone
|
||||
for (sp<ConfigListener> listener:broadcastList) {
|
||||
for (sp<ConfigListener> listener : broadcastList) {
|
||||
listener->OnConfigUpdated(timestampNs, key, config);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,6 +63,7 @@ const int FIELD_ID_LOGGER_STATS_ERROR_CODE = 2;
|
||||
const int FIELD_ID_CONFIG_STATS_UID = 1;
|
||||
const int FIELD_ID_CONFIG_STATS_ID = 2;
|
||||
const int FIELD_ID_CONFIG_STATS_CREATION = 3;
|
||||
const int FIELD_ID_CONFIG_STATS_RESET = 18;
|
||||
const int FIELD_ID_CONFIG_STATS_DELETION = 4;
|
||||
const int FIELD_ID_CONFIG_STATS_METRIC_COUNT = 5;
|
||||
const int FIELD_ID_CONFIG_STATS_CONDITION_COUNT = 6;
|
||||
@@ -165,6 +166,18 @@ void StatsdStats::noteConfigRemoved(const ConfigKey& key) {
|
||||
noteConfigRemovedInternalLocked(key);
|
||||
}
|
||||
|
||||
void StatsdStats::noteConfigResetInternalLocked(const ConfigKey& key) {
|
||||
auto it = mConfigStats.find(key);
|
||||
if (it != mConfigStats.end()) {
|
||||
it->second->reset_time_sec = getWallClockSec();
|
||||
}
|
||||
}
|
||||
|
||||
void StatsdStats::noteConfigReset(const ConfigKey& key) {
|
||||
lock_guard<std::mutex> lock(mLock);
|
||||
noteConfigResetInternalLocked(key);
|
||||
}
|
||||
|
||||
void StatsdStats::noteBroadcastSent(const ConfigKey& key) {
|
||||
noteBroadcastSent(key, getWallClockSec());
|
||||
}
|
||||
@@ -378,10 +391,11 @@ void StatsdStats::dumpStats(FILE* out) const {
|
||||
fprintf(out, "%lu Config in icebox: \n", (unsigned long)mIceBox.size());
|
||||
for (const auto& configStats : mIceBox) {
|
||||
fprintf(out,
|
||||
"Config {%d_%lld}: creation=%d, deletion=%d, #metric=%d, #condition=%d, "
|
||||
"Config {%d_%lld}: creation=%d, deletion=%d, reset=%d, #metric=%d, #condition=%d, "
|
||||
"#matcher=%d, #alert=%d, valid=%d\n",
|
||||
configStats->uid, (long long)configStats->id, configStats->creation_time_sec,
|
||||
configStats->deletion_time_sec, configStats->metric_count,
|
||||
configStats->deletion_time_sec, configStats->reset_time_sec,
|
||||
configStats->metric_count,
|
||||
configStats->condition_count, configStats->matcher_count, configStats->alert_count,
|
||||
configStats->is_valid);
|
||||
|
||||
@@ -485,6 +499,9 @@ void addConfigStatsToProto(const ConfigStats& configStats, ProtoOutputStream* pr
|
||||
proto->write(FIELD_TYPE_INT32 | FIELD_ID_CONFIG_STATS_UID, configStats.uid);
|
||||
proto->write(FIELD_TYPE_INT64 | FIELD_ID_CONFIG_STATS_ID, (long long)configStats.id);
|
||||
proto->write(FIELD_TYPE_INT32 | FIELD_ID_CONFIG_STATS_CREATION, configStats.creation_time_sec);
|
||||
if (configStats.reset_time_sec != 0) {
|
||||
proto->write(FIELD_TYPE_INT32 | FIELD_ID_CONFIG_STATS_RESET, configStats.reset_time_sec);
|
||||
}
|
||||
if (configStats.deletion_time_sec != 0) {
|
||||
proto->write(FIELD_TYPE_INT32 | FIELD_ID_CONFIG_STATS_DELETION,
|
||||
configStats.deletion_time_sec);
|
||||
|
||||
@@ -34,6 +34,7 @@ struct ConfigStats {
|
||||
int64_t id;
|
||||
int32_t creation_time_sec;
|
||||
int32_t deletion_time_sec = 0;
|
||||
int32_t reset_time_sec = 0;
|
||||
int32_t metric_count;
|
||||
int32_t condition_count;
|
||||
int32_t matcher_count;
|
||||
@@ -121,10 +122,10 @@ public:
|
||||
const static size_t kMaxBytesUsedUidMap = 50 * 1024;
|
||||
|
||||
/* Minimum period between two broadcasts in nanoseconds. */
|
||||
static const unsigned long long kMinBroadcastPeriodNs = 60 * NS_PER_SEC;
|
||||
static const int64_t kMinBroadcastPeriodNs = 60 * NS_PER_SEC;
|
||||
|
||||
/* Min period between two checks of byte size per config key in nanoseconds. */
|
||||
static const unsigned long long kMinByteSizeCheckPeriodNs = 10 * NS_PER_SEC;
|
||||
static const int64_t kMinByteSizeCheckPeriodNs = 10 * NS_PER_SEC;
|
||||
|
||||
// Maximum age (30 days) that files on disk can exist in seconds.
|
||||
static const int kMaxAgeSecond = 60 * 60 * 24 * 30;
|
||||
@@ -152,6 +153,10 @@ public:
|
||||
* Report a config has been removed.
|
||||
*/
|
||||
void noteConfigRemoved(const ConfigKey& key);
|
||||
/**
|
||||
* Report a config has been reset when ttl expires.
|
||||
*/
|
||||
void noteConfigReset(const ConfigKey& key);
|
||||
|
||||
/**
|
||||
* Report a broadcast has been sent to a config owner to collect the data.
|
||||
@@ -326,6 +331,7 @@ private:
|
||||
// Stores the number of times statsd registers the periodic alarm changes
|
||||
int mPeriodicAlarmRegisteredStats = 0;
|
||||
|
||||
void noteConfigResetInternalLocked(const ConfigKey& key);
|
||||
|
||||
void noteConfigRemovedInternalLocked(const ConfigKey& key);
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ const int FIELD_ID_COUNT = 3;
|
||||
CountMetricProducer::CountMetricProducer(const ConfigKey& key, const CountMetric& metric,
|
||||
const int conditionIndex,
|
||||
const sp<ConditionWizard>& wizard,
|
||||
const uint64_t startTimeNs)
|
||||
const int64_t startTimeNs)
|
||||
: MetricProducer(metric.id(), key, startTimeNs, conditionIndex, wizard) {
|
||||
// TODO: evaluate initial conditions. and set mConditionMet.
|
||||
if (metric.has_bucket()) {
|
||||
@@ -118,11 +118,11 @@ void CountMetricProducer::dumpStatesLocked(FILE* out, bool verbose) const {
|
||||
}
|
||||
|
||||
void CountMetricProducer::onSlicedConditionMayChangeLocked(bool overallCondition,
|
||||
const uint64_t eventTime) {
|
||||
const int64_t eventTime) {
|
||||
VLOG("Metric %lld onSlicedConditionMayChange", (long long)mMetricId);
|
||||
}
|
||||
|
||||
void CountMetricProducer::onDumpReportLocked(const uint64_t dumpTimeNs,
|
||||
void CountMetricProducer::onDumpReportLocked(const int64_t dumpTimeNs,
|
||||
ProtoOutputStream* protoOutput) {
|
||||
flushIfNeededLocked(dumpTimeNs);
|
||||
if (mPastBuckets.empty()) {
|
||||
@@ -173,13 +173,13 @@ void CountMetricProducer::onDumpReportLocked(const uint64_t dumpTimeNs,
|
||||
mPastBuckets.clear();
|
||||
}
|
||||
|
||||
void CountMetricProducer::dropDataLocked(const uint64_t dropTimeNs) {
|
||||
void CountMetricProducer::dropDataLocked(const int64_t dropTimeNs) {
|
||||
flushIfNeededLocked(dropTimeNs);
|
||||
mPastBuckets.clear();
|
||||
}
|
||||
|
||||
void CountMetricProducer::onConditionChangedLocked(const bool conditionMet,
|
||||
const uint64_t eventTime) {
|
||||
const int64_t eventTime) {
|
||||
VLOG("Metric %lld onConditionChanged", (long long)mMetricId);
|
||||
mCondition = conditionMet;
|
||||
}
|
||||
@@ -208,7 +208,7 @@ void CountMetricProducer::onMatchedLogEventInternalLocked(
|
||||
const size_t matcherIndex, const MetricDimensionKey& eventKey,
|
||||
const ConditionKey& conditionKey, bool condition,
|
||||
const LogEvent& event) {
|
||||
uint64_t eventTimeNs = event.GetElapsedTimestampNs();
|
||||
int64_t eventTimeNs = event.GetElapsedTimestampNs();
|
||||
flushIfNeededLocked(eventTimeNs);
|
||||
|
||||
if (condition == false) {
|
||||
@@ -244,23 +244,23 @@ void CountMetricProducer::onMatchedLogEventInternalLocked(
|
||||
|
||||
// When a new matched event comes in, we check if event falls into the current
|
||||
// bucket. If not, flush the old counter to past buckets and initialize the new bucket.
|
||||
void CountMetricProducer::flushIfNeededLocked(const uint64_t& eventTimeNs) {
|
||||
uint64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
|
||||
void CountMetricProducer::flushIfNeededLocked(const int64_t& eventTimeNs) {
|
||||
int64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
|
||||
if (eventTimeNs < currentBucketEndTimeNs) {
|
||||
return;
|
||||
}
|
||||
|
||||
flushCurrentBucketLocked(eventTimeNs);
|
||||
// Setup the bucket start time and number.
|
||||
uint64_t numBucketsForward = 1 + (eventTimeNs - currentBucketEndTimeNs) / mBucketSizeNs;
|
||||
int64_t numBucketsForward = 1 + (eventTimeNs - currentBucketEndTimeNs) / mBucketSizeNs;
|
||||
mCurrentBucketStartTimeNs = currentBucketEndTimeNs + (numBucketsForward - 1) * mBucketSizeNs;
|
||||
mCurrentBucketNum += numBucketsForward;
|
||||
VLOG("metric %lld: new bucket start time: %lld", (long long)mMetricId,
|
||||
(long long)mCurrentBucketStartTimeNs);
|
||||
}
|
||||
|
||||
void CountMetricProducer::flushCurrentBucketLocked(const uint64_t& eventTimeNs) {
|
||||
uint64_t fullBucketEndTimeNs = getCurrentBucketEndTimeNs();
|
||||
void CountMetricProducer::flushCurrentBucketLocked(const int64_t& eventTimeNs) {
|
||||
int64_t fullBucketEndTimeNs = getCurrentBucketEndTimeNs();
|
||||
CountBucket info;
|
||||
info.mBucketStartNs = mCurrentBucketStartTimeNs;
|
||||
if (eventTimeNs < fullBucketEndTimeNs) {
|
||||
|
||||
@@ -43,7 +43,7 @@ public:
|
||||
// TODO: Pass in the start time from MetricsManager, it should be consistent for all metrics.
|
||||
CountMetricProducer(const ConfigKey& key, const CountMetric& countMetric,
|
||||
const int conditionIndex, const sp<ConditionWizard>& wizard,
|
||||
const uint64_t startTimeNs);
|
||||
const int64_t startTimeNs);
|
||||
|
||||
virtual ~CountMetricProducer();
|
||||
|
||||
@@ -54,26 +54,26 @@ protected:
|
||||
const LogEvent& event) override;
|
||||
|
||||
private:
|
||||
void onDumpReportLocked(const uint64_t dumpTimeNs,
|
||||
void onDumpReportLocked(const int64_t dumpTimeNs,
|
||||
android::util::ProtoOutputStream* protoOutput) override;
|
||||
|
||||
// Internal interface to handle condition change.
|
||||
void onConditionChangedLocked(const bool conditionMet, const uint64_t eventTime) override;
|
||||
void onConditionChangedLocked(const bool conditionMet, const int64_t eventTime) override;
|
||||
|
||||
// Internal interface to handle sliced condition change.
|
||||
void onSlicedConditionMayChangeLocked(bool overallCondition, const uint64_t eventTime) override;
|
||||
void onSlicedConditionMayChangeLocked(bool overallCondition, const int64_t eventTime) override;
|
||||
|
||||
// Internal function to calculate the current used bytes.
|
||||
size_t byteSizeLocked() const override;
|
||||
|
||||
void dumpStatesLocked(FILE* out, bool verbose) const override;
|
||||
|
||||
void dropDataLocked(const uint64_t dropTimeNs) override;
|
||||
void dropDataLocked(const int64_t dropTimeNs) override;
|
||||
|
||||
// Util function to flush the old packet.
|
||||
void flushIfNeededLocked(const uint64_t& newEventTime) override;
|
||||
void flushIfNeededLocked(const int64_t& newEventTime) override;
|
||||
|
||||
void flushCurrentBucketLocked(const uint64_t& eventTimeNs) override;
|
||||
void flushCurrentBucketLocked(const int64_t& eventTimeNs) override;
|
||||
|
||||
// TODO: Add a lock to mPastBuckets.
|
||||
std::unordered_map<MetricDimensionKey, std::vector<CountBucket>> mPastBuckets;
|
||||
|
||||
@@ -61,7 +61,7 @@ DurationMetricProducer::DurationMetricProducer(const ConfigKey& key, const Durat
|
||||
const bool nesting,
|
||||
const sp<ConditionWizard>& wizard,
|
||||
const FieldMatcher& internalDimensions,
|
||||
const uint64_t startTimeNs)
|
||||
const int64_t startTimeNs)
|
||||
: MetricProducer(metric.id(), key, startTimeNs, conditionIndex, wizard),
|
||||
mAggregationType(metric.aggregation_type()),
|
||||
mStartIndex(startIndex),
|
||||
@@ -170,7 +170,7 @@ unique_ptr<DurationTracker> DurationMetricProducer::createDurationTracker(
|
||||
// 2. No condition in dimension
|
||||
// 3. The links covers all dimension fields in the sliced child condition predicate.
|
||||
void DurationMetricProducer::onSlicedConditionMayChangeLocked_opt1(bool condition,
|
||||
const uint64_t eventTime) {
|
||||
const int64_t eventTime) {
|
||||
if (mMetric2ConditionLinks.size() != 1 ||
|
||||
!mHasLinksToAllConditionDimensionsInTracker ||
|
||||
!mDimensionsInCondition.empty()) {
|
||||
@@ -243,7 +243,7 @@ void DurationMetricProducer::onSlicedConditionMayChangeLocked_opt1(bool conditio
|
||||
// 1. If combination condition, logical operation is AND, only one sliced child predicate.
|
||||
// 2. Has dimensions_in_condition and it equals to the output dimensions of the sliced predicate.
|
||||
void DurationMetricProducer::onSlicedConditionMayChangeLocked_opt2(bool condition,
|
||||
const uint64_t eventTime) {
|
||||
const int64_t eventTime) {
|
||||
if (mMetric2ConditionLinks.size() > 1 || !mSameConditionDimensionsInTracker) {
|
||||
return;
|
||||
}
|
||||
@@ -328,7 +328,7 @@ void DurationMetricProducer::onSlicedConditionMayChangeLocked_opt2(bool conditio
|
||||
}
|
||||
|
||||
void DurationMetricProducer::onSlicedConditionMayChangeLocked(bool overallCondition,
|
||||
const uint64_t eventTime) {
|
||||
const int64_t eventTime) {
|
||||
VLOG("Metric %lld onSlicedConditionMayChange", (long long)mMetricId);
|
||||
flushIfNeededLocked(eventTime);
|
||||
|
||||
@@ -420,7 +420,7 @@ void DurationMetricProducer::onSlicedConditionMayChangeLocked(bool overallCondit
|
||||
}
|
||||
|
||||
void DurationMetricProducer::onConditionChangedLocked(const bool conditionMet,
|
||||
const uint64_t eventTime) {
|
||||
const int64_t eventTime) {
|
||||
VLOG("Metric %lld onConditionChanged", (long long)mMetricId);
|
||||
mCondition = conditionMet;
|
||||
flushIfNeededLocked(eventTime);
|
||||
@@ -433,12 +433,12 @@ void DurationMetricProducer::onConditionChangedLocked(const bool conditionMet,
|
||||
}
|
||||
}
|
||||
|
||||
void DurationMetricProducer::dropDataLocked(const uint64_t dropTimeNs) {
|
||||
void DurationMetricProducer::dropDataLocked(const int64_t dropTimeNs) {
|
||||
flushIfNeededLocked(dropTimeNs);
|
||||
mPastBuckets.clear();
|
||||
}
|
||||
|
||||
void DurationMetricProducer::onDumpReportLocked(const uint64_t dumpTimeNs,
|
||||
void DurationMetricProducer::onDumpReportLocked(const int64_t dumpTimeNs,
|
||||
ProtoOutputStream* protoOutput) {
|
||||
flushIfNeededLocked(dumpTimeNs);
|
||||
if (mPastBuckets.empty()) {
|
||||
@@ -492,8 +492,8 @@ void DurationMetricProducer::onDumpReportLocked(const uint64_t dumpTimeNs,
|
||||
mPastBuckets.clear();
|
||||
}
|
||||
|
||||
void DurationMetricProducer::flushIfNeededLocked(const uint64_t& eventTimeNs) {
|
||||
uint64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
|
||||
void DurationMetricProducer::flushIfNeededLocked(const int64_t& eventTimeNs) {
|
||||
int64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
|
||||
|
||||
if (currentBucketEndTimeNs > eventTimeNs) {
|
||||
return;
|
||||
@@ -522,7 +522,7 @@ void DurationMetricProducer::flushIfNeededLocked(const uint64_t& eventTimeNs) {
|
||||
mCurrentBucketNum += numBucketsForward;
|
||||
}
|
||||
|
||||
void DurationMetricProducer::flushCurrentBucketLocked(const uint64_t& eventTimeNs) {
|
||||
void DurationMetricProducer::flushCurrentBucketLocked(const int64_t& eventTimeNs) {
|
||||
for (auto whatIt = mCurrentSlicedDurationTrackerMap.begin();
|
||||
whatIt != mCurrentSlicedDurationTrackerMap.end();) {
|
||||
for (auto it = whatIt->second.begin(); it != whatIt->second.end();) {
|
||||
@@ -644,7 +644,7 @@ void DurationMetricProducer::onMatchedLogEventInternalLocked(
|
||||
|
||||
void DurationMetricProducer::onMatchedLogEventLocked(const size_t matcherIndex,
|
||||
const LogEvent& event) {
|
||||
uint64_t eventTimeNs = event.GetElapsedTimestampNs();
|
||||
int64_t eventTimeNs = event.GetElapsedTimestampNs();
|
||||
if (eventTimeNs < mStartTimeNs) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ public:
|
||||
const int conditionIndex, const size_t startIndex,
|
||||
const size_t stopIndex, const size_t stopAllIndex, const bool nesting,
|
||||
const sp<ConditionWizard>& wizard,
|
||||
const FieldMatcher& internalDimensions, const uint64_t startTimeNs);
|
||||
const FieldMatcher& internalDimensions, const int64_t startTimeNs);
|
||||
|
||||
virtual ~DurationMetricProducer();
|
||||
|
||||
@@ -61,29 +61,29 @@ private:
|
||||
void handleStartEvent(const MetricDimensionKey& eventKey, const ConditionKey& conditionKeys,
|
||||
bool condition, const LogEvent& event);
|
||||
|
||||
void onDumpReportLocked(const uint64_t dumpTimeNs,
|
||||
void onDumpReportLocked(const int64_t dumpTimeNs,
|
||||
android::util::ProtoOutputStream* protoOutput) override;
|
||||
|
||||
// Internal interface to handle condition change.
|
||||
void onConditionChangedLocked(const bool conditionMet, const uint64_t eventTime) override;
|
||||
void onConditionChangedLocked(const bool conditionMet, const int64_t eventTime) override;
|
||||
|
||||
// Internal interface to handle sliced condition change.
|
||||
void onSlicedConditionMayChangeLocked(bool overallCondition, const uint64_t eventTime) override;
|
||||
void onSlicedConditionMayChangeLocked(bool overallCondition, const int64_t eventTime) override;
|
||||
|
||||
void onSlicedConditionMayChangeLocked_opt1(bool overallCondition, const uint64_t eventTime);
|
||||
void onSlicedConditionMayChangeLocked_opt2(bool overallCondition, const uint64_t eventTime);
|
||||
void onSlicedConditionMayChangeLocked_opt1(bool overallCondition, const int64_t eventTime);
|
||||
void onSlicedConditionMayChangeLocked_opt2(bool overallCondition, const int64_t eventTime);
|
||||
|
||||
// Internal function to calculate the current used bytes.
|
||||
size_t byteSizeLocked() const override;
|
||||
|
||||
void dumpStatesLocked(FILE* out, bool verbose) const override;
|
||||
|
||||
void dropDataLocked(const uint64_t dropTimeNs) override;
|
||||
void dropDataLocked(const int64_t dropTimeNs) override;
|
||||
|
||||
// Util function to flush the old packet.
|
||||
void flushIfNeededLocked(const uint64_t& eventTime);
|
||||
void flushIfNeededLocked(const int64_t& eventTime);
|
||||
|
||||
void flushCurrentBucketLocked(const uint64_t& eventTimeNs) override;
|
||||
void flushCurrentBucketLocked(const int64_t& eventTimeNs) override;
|
||||
|
||||
const DurationMetric_AggregationType mAggregationType;
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ const int FIELD_ID_WALL_CLOCK_TIMESTAMP_NANOS = 3;
|
||||
EventMetricProducer::EventMetricProducer(const ConfigKey& key, const EventMetric& metric,
|
||||
const int conditionIndex,
|
||||
const sp<ConditionWizard>& wizard,
|
||||
const uint64_t startTimeNs)
|
||||
const int64_t startTimeNs)
|
||||
: MetricProducer(metric.id(), key, startTimeNs, conditionIndex, wizard) {
|
||||
if (metric.links().size() > 0) {
|
||||
for (const auto& link : metric.links()) {
|
||||
@@ -75,12 +75,12 @@ EventMetricProducer::~EventMetricProducer() {
|
||||
VLOG("~EventMetricProducer() called");
|
||||
}
|
||||
|
||||
void EventMetricProducer::dropDataLocked(const uint64_t dropTimeNs) {
|
||||
void EventMetricProducer::dropDataLocked(const int64_t dropTimeNs) {
|
||||
mProto->clear();
|
||||
}
|
||||
|
||||
void EventMetricProducer::onSlicedConditionMayChangeLocked(bool overallCondition,
|
||||
const uint64_t eventTime) {
|
||||
const int64_t eventTime) {
|
||||
}
|
||||
|
||||
std::unique_ptr<std::vector<uint8_t>> serializeProtoLocked(ProtoOutputStream& protoOutput) {
|
||||
@@ -100,7 +100,7 @@ std::unique_ptr<std::vector<uint8_t>> serializeProtoLocked(ProtoOutputStream& pr
|
||||
return buffer;
|
||||
}
|
||||
|
||||
void EventMetricProducer::onDumpReportLocked(const uint64_t dumpTimeNs,
|
||||
void EventMetricProducer::onDumpReportLocked(const int64_t dumpTimeNs,
|
||||
ProtoOutputStream* protoOutput) {
|
||||
if (mProto->size() <= 0) {
|
||||
return;
|
||||
@@ -119,7 +119,7 @@ void EventMetricProducer::onDumpReportLocked(const uint64_t dumpTimeNs,
|
||||
}
|
||||
|
||||
void EventMetricProducer::onConditionChangedLocked(const bool conditionMet,
|
||||
const uint64_t eventTime) {
|
||||
const int64_t eventTime) {
|
||||
VLOG("Metric %lld onConditionChanged", (long long)mMetricId);
|
||||
mCondition = conditionMet;
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ public:
|
||||
// TODO: Pass in the start time from MetricsManager, it should be consistent for all metrics.
|
||||
EventMetricProducer(const ConfigKey& key, const EventMetric& eventMetric,
|
||||
const int conditionIndex, const sp<ConditionWizard>& wizard,
|
||||
const uint64_t startTimeNs);
|
||||
const int64_t startTimeNs);
|
||||
|
||||
virtual ~EventMetricProducer();
|
||||
|
||||
@@ -46,16 +46,16 @@ private:
|
||||
const ConditionKey& conditionKey, bool condition,
|
||||
const LogEvent& event) override;
|
||||
|
||||
void onDumpReportLocked(const uint64_t dumpTimeNs,
|
||||
void onDumpReportLocked(const int64_t dumpTimeNs,
|
||||
android::util::ProtoOutputStream* protoOutput) override;
|
||||
|
||||
// Internal interface to handle condition change.
|
||||
void onConditionChangedLocked(const bool conditionMet, const uint64_t eventTime) override;
|
||||
void onConditionChangedLocked(const bool conditionMet, const int64_t eventTime) override;
|
||||
|
||||
// Internal interface to handle sliced condition change.
|
||||
void onSlicedConditionMayChangeLocked(bool overallCondition, const uint64_t eventTime) override;
|
||||
void onSlicedConditionMayChangeLocked(bool overallCondition, const int64_t eventTime) override;
|
||||
|
||||
void dropDataLocked(const uint64_t dropTimeNs) override;
|
||||
void dropDataLocked(const int64_t dropTimeNs) override;
|
||||
|
||||
// Internal function to calculate the current used bytes.
|
||||
size_t byteSizeLocked() const override;
|
||||
|
||||
@@ -61,7 +61,7 @@ const int FIELD_ID_WALL_CLOCK_ATOM_TIMESTAMP = 5;
|
||||
GaugeMetricProducer::GaugeMetricProducer(const ConfigKey& key, const GaugeMetric& metric,
|
||||
const int conditionIndex,
|
||||
const sp<ConditionWizard>& wizard, const int pullTagId,
|
||||
const uint64_t startTimeNs,
|
||||
const int64_t startTimeNs,
|
||||
shared_ptr<StatsPullerManager> statsPullerManager)
|
||||
: MetricProducer(metric.id(), key, startTimeNs, conditionIndex, wizard),
|
||||
mStatsPullerManager(statsPullerManager),
|
||||
@@ -155,7 +155,7 @@ void GaugeMetricProducer::dumpStatesLocked(FILE* out, bool verbose) const {
|
||||
}
|
||||
}
|
||||
|
||||
void GaugeMetricProducer::onDumpReportLocked(const uint64_t dumpTimeNs,
|
||||
void GaugeMetricProducer::onDumpReportLocked(const int64_t dumpTimeNs,
|
||||
ProtoOutputStream* protoOutput) {
|
||||
VLOG("Gauge metric %lld report now...", (long long)mMetricId);
|
||||
|
||||
@@ -267,7 +267,7 @@ void GaugeMetricProducer::pullLocked() {
|
||||
}
|
||||
|
||||
void GaugeMetricProducer::onConditionChangedLocked(const bool conditionMet,
|
||||
const uint64_t eventTime) {
|
||||
const int64_t eventTime) {
|
||||
VLOG("GaugeMetric %lld onConditionChanged", (long long)mMetricId);
|
||||
flushIfNeededLocked(eventTime);
|
||||
mCondition = conditionMet;
|
||||
@@ -278,7 +278,7 @@ void GaugeMetricProducer::onConditionChangedLocked(const bool conditionMet,
|
||||
}
|
||||
|
||||
void GaugeMetricProducer::onSlicedConditionMayChangeLocked(bool overallCondition,
|
||||
const uint64_t eventTime) {
|
||||
const int64_t eventTime) {
|
||||
VLOG("GaugeMetric %lld onSlicedConditionMayChange overall condition %d", (long long)mMetricId,
|
||||
overallCondition);
|
||||
flushIfNeededLocked(eventTime);
|
||||
@@ -336,7 +336,7 @@ void GaugeMetricProducer::onMatchedLogEventInternalLocked(
|
||||
if (condition == false) {
|
||||
return;
|
||||
}
|
||||
uint64_t eventTimeNs = event.GetElapsedTimestampNs();
|
||||
int64_t eventTimeNs = event.GetElapsedTimestampNs();
|
||||
mTagId = event.GetTagId();
|
||||
if (eventTimeNs < mCurrentBucketStartTimeNs) {
|
||||
VLOG("Gauge Skip event due to late arrival: %lld vs %lld", (long long)eventTimeNs,
|
||||
@@ -391,7 +391,7 @@ void GaugeMetricProducer::updateCurrentSlicedBucketForAnomaly() {
|
||||
}
|
||||
}
|
||||
|
||||
void GaugeMetricProducer::dropDataLocked(const uint64_t dropTimeNs) {
|
||||
void GaugeMetricProducer::dropDataLocked(const int64_t dropTimeNs) {
|
||||
flushIfNeededLocked(dropTimeNs);
|
||||
mPastBuckets.clear();
|
||||
}
|
||||
@@ -401,8 +401,8 @@ void GaugeMetricProducer::dropDataLocked(const uint64_t dropTimeNs) {
|
||||
// bucket.
|
||||
// if data is pushed, onMatchedLogEvent will only be called through onConditionChanged() inside
|
||||
// the GaugeMetricProducer while holding the lock.
|
||||
void GaugeMetricProducer::flushIfNeededLocked(const uint64_t& eventTimeNs) {
|
||||
uint64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
|
||||
void GaugeMetricProducer::flushIfNeededLocked(const int64_t& eventTimeNs) {
|
||||
int64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
|
||||
|
||||
if (eventTimeNs < currentBucketEndTimeNs) {
|
||||
VLOG("Gauge eventTime is %lld, less than next bucket start time %lld",
|
||||
@@ -420,8 +420,8 @@ void GaugeMetricProducer::flushIfNeededLocked(const uint64_t& eventTimeNs) {
|
||||
(long long)mCurrentBucketStartTimeNs);
|
||||
}
|
||||
|
||||
void GaugeMetricProducer::flushCurrentBucketLocked(const uint64_t& eventTimeNs) {
|
||||
uint64_t fullBucketEndTimeNs = getCurrentBucketEndTimeNs();
|
||||
void GaugeMetricProducer::flushCurrentBucketLocked(const int64_t& eventTimeNs) {
|
||||
int64_t fullBucketEndTimeNs = getCurrentBucketEndTimeNs();
|
||||
|
||||
GaugeBucket info;
|
||||
info.mBucketStartNs = mCurrentBucketStartTimeNs;
|
||||
|
||||
@@ -65,7 +65,7 @@ public:
|
||||
void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data) override;
|
||||
|
||||
// GaugeMetric needs to immediately trigger another pull when we create the partial bucket.
|
||||
void notifyAppUpgrade(const uint64_t& eventTimeNs, const string& apk, const int uid,
|
||||
void notifyAppUpgrade(const int64_t& eventTimeNs, const string& apk, const int uid,
|
||||
const int64_t version) override {
|
||||
std::lock_guard<std::mutex> lock(mMutex);
|
||||
|
||||
@@ -87,32 +87,32 @@ protected:
|
||||
const LogEvent& event) override;
|
||||
|
||||
private:
|
||||
void onDumpReportLocked(const uint64_t dumpTimeNs,
|
||||
void onDumpReportLocked(const int64_t dumpTimeNs,
|
||||
android::util::ProtoOutputStream* protoOutput) override;
|
||||
|
||||
// for testing
|
||||
GaugeMetricProducer(const ConfigKey& key, const GaugeMetric& gaugeMetric,
|
||||
const int conditionIndex, const sp<ConditionWizard>& wizard,
|
||||
const int pullTagId, const uint64_t startTimeNs,
|
||||
const int pullTagId, const int64_t startTimeNs,
|
||||
std::shared_ptr<StatsPullerManager> statsPullerManager);
|
||||
|
||||
// Internal interface to handle condition change.
|
||||
void onConditionChangedLocked(const bool conditionMet, const uint64_t eventTime) override;
|
||||
void onConditionChangedLocked(const bool conditionMet, const int64_t eventTime) override;
|
||||
|
||||
// Internal interface to handle sliced condition change.
|
||||
void onSlicedConditionMayChangeLocked(bool overallCondition, const uint64_t eventTime) override;
|
||||
void onSlicedConditionMayChangeLocked(bool overallCondition, const int64_t eventTime) override;
|
||||
|
||||
// Internal function to calculate the current used bytes.
|
||||
size_t byteSizeLocked() const override;
|
||||
|
||||
void dumpStatesLocked(FILE* out, bool verbose) const override;
|
||||
|
||||
void dropDataLocked(const uint64_t dropTimeNs) override;
|
||||
void dropDataLocked(const int64_t dropTimeNs) override;
|
||||
|
||||
// Util function to flush the old packet.
|
||||
void flushIfNeededLocked(const uint64_t& eventTime) override;
|
||||
void flushIfNeededLocked(const int64_t& eventTime) override;
|
||||
|
||||
void flushCurrentBucketLocked(const uint64_t& eventTimeNs) override;
|
||||
void flushCurrentBucketLocked(const int64_t& eventTimeNs) override;
|
||||
|
||||
void pullLocked();
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ namespace statsd {
|
||||
using std::map;
|
||||
|
||||
void MetricProducer::onMatchedLogEventLocked(const size_t matcherIndex, const LogEvent& event) {
|
||||
uint64_t eventTimeNs = event.GetElapsedTimestampNs();
|
||||
int64_t eventTimeNs = event.GetElapsedTimestampNs();
|
||||
// this is old event, maybe statsd restarted?
|
||||
if (eventTimeNs < mStartTimeNs) {
|
||||
return;
|
||||
|
||||
@@ -64,7 +64,7 @@ public:
|
||||
* the flush again when the end timestamp is forced to be now, and then after flushing, update
|
||||
* the start timestamp to be now.
|
||||
*/
|
||||
void notifyAppUpgrade(const uint64_t& eventTimeNs, const string& apk, const int uid,
|
||||
void notifyAppUpgrade(const int64_t& eventTimeNs, const string& apk, const int uid,
|
||||
const int64_t version) override {
|
||||
std::lock_guard<std::mutex> lock(mMutex);
|
||||
|
||||
@@ -79,11 +79,11 @@ public:
|
||||
// is a partial bucket and can merge it with the previous bucket.
|
||||
};
|
||||
|
||||
void notifyAppRemoved(const uint64_t& eventTimeNs, const string& apk, const int uid) override{
|
||||
void notifyAppRemoved(const int64_t& eventTimeNs, const string& apk, const int uid) override{
|
||||
// TODO: Implement me.
|
||||
};
|
||||
|
||||
void onUidMapReceived(const uint64_t& eventTimeNs) override{
|
||||
void onUidMapReceived(const int64_t& eventTimeNs) override{
|
||||
// TODO: Implement me.
|
||||
};
|
||||
|
||||
@@ -93,12 +93,12 @@ public:
|
||||
onMatchedLogEventLocked(matcherIndex, event);
|
||||
}
|
||||
|
||||
void onConditionChanged(const bool condition, const uint64_t eventTime) {
|
||||
void onConditionChanged(const bool condition, const int64_t eventTime) {
|
||||
std::lock_guard<std::mutex> lock(mMutex);
|
||||
onConditionChangedLocked(condition, eventTime);
|
||||
}
|
||||
|
||||
void onSlicedConditionMayChange(bool overallCondition, const uint64_t eventTime) {
|
||||
void onSlicedConditionMayChange(bool overallCondition, const int64_t eventTime) {
|
||||
std::lock_guard<std::mutex> lock(mMutex);
|
||||
onSlicedConditionMayChangeLocked(overallCondition, eventTime);
|
||||
}
|
||||
@@ -110,7 +110,7 @@ public:
|
||||
|
||||
// Output the metrics data to [protoOutput]. All metrics reports end with the same timestamp.
|
||||
// This method clears all the past buckets.
|
||||
void onDumpReport(const uint64_t dumpTimeNs, android::util::ProtoOutputStream* protoOutput) {
|
||||
void onDumpReport(const int64_t dumpTimeNs, android::util::ProtoOutputStream* protoOutput) {
|
||||
std::lock_guard<std::mutex> lock(mMutex);
|
||||
return onDumpReportLocked(dumpTimeNs, protoOutput);
|
||||
}
|
||||
@@ -156,16 +156,16 @@ public:
|
||||
// We still need to keep future data valid and anomaly tracking work, which means we will
|
||||
// have to flush old data, informing anomaly trackers then safely drop old data.
|
||||
// We still keep current bucket data for future metrics' validity.
|
||||
void dropData(const uint64_t dropTimeNs) {
|
||||
void dropData(const int64_t dropTimeNs) {
|
||||
std::lock_guard<std::mutex> lock(mMutex);
|
||||
dropDataLocked(dropTimeNs);
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual void onConditionChangedLocked(const bool condition, const uint64_t eventTime) = 0;
|
||||
virtual void onConditionChangedLocked(const bool condition, const int64_t eventTime) = 0;
|
||||
virtual void onSlicedConditionMayChangeLocked(bool overallCondition,
|
||||
const uint64_t eventTime) = 0;
|
||||
virtual void onDumpReportLocked(const uint64_t dumpTimeNs,
|
||||
const int64_t eventTime) = 0;
|
||||
virtual void onDumpReportLocked(const int64_t dumpTimeNs,
|
||||
android::util::ProtoOutputStream* protoOutput) = 0;
|
||||
virtual size_t byteSizeLocked() const = 0;
|
||||
virtual void dumpStatesLocked(FILE* out, bool verbose) const = 0;
|
||||
@@ -173,7 +173,7 @@ protected:
|
||||
/**
|
||||
* Flushes the current bucket if the eventTime is after the current bucket's end time.
|
||||
*/
|
||||
virtual void flushIfNeededLocked(const uint64_t& eventTime){};
|
||||
virtual void flushIfNeededLocked(const int64_t& eventTime){};
|
||||
|
||||
/**
|
||||
* For metrics that aggregate (ie, every metric producer except for EventMetricProducer),
|
||||
@@ -185,15 +185,15 @@ protected:
|
||||
* flushIfNeededLocked or the app upgrade handler; the caller MUST update the bucket timestamp
|
||||
* and bucket number as needed.
|
||||
*/
|
||||
virtual void flushCurrentBucketLocked(const uint64_t& eventTimeNs){};
|
||||
virtual void flushCurrentBucketLocked(const int64_t& eventTimeNs){};
|
||||
|
||||
// Convenience to compute the current bucket's end time, which is always aligned with the
|
||||
// start time of the metric.
|
||||
uint64_t getCurrentBucketEndTimeNs() const {
|
||||
int64_t getCurrentBucketEndTimeNs() const {
|
||||
return mStartTimeNs + (mCurrentBucketNum + 1) * mBucketSizeNs;
|
||||
}
|
||||
|
||||
virtual void dropDataLocked(const uint64_t dropTimeNs) = 0;
|
||||
virtual void dropDataLocked(const int64_t dropTimeNs) = 0;
|
||||
|
||||
const int64_t mMetricId;
|
||||
|
||||
@@ -201,15 +201,15 @@ protected:
|
||||
|
||||
// The time when this metric producer was first created. The end time for the current bucket
|
||||
// can be computed from this based on mCurrentBucketNum.
|
||||
uint64_t mStartTimeNs;
|
||||
int64_t mStartTimeNs;
|
||||
|
||||
// Start time may not be aligned with the start of statsd if there is an app upgrade in the
|
||||
// middle of a bucket.
|
||||
uint64_t mCurrentBucketStartTimeNs;
|
||||
int64_t mCurrentBucketStartTimeNs;
|
||||
|
||||
// Used by anomaly detector to track which bucket we are in. This is not sent with the produced
|
||||
// report.
|
||||
uint64_t mCurrentBucketNum;
|
||||
int64_t mCurrentBucketNum;
|
||||
|
||||
int64_t mBucketSizeNs;
|
||||
|
||||
|
||||
@@ -59,8 +59,13 @@ MetricsManager::MetricsManager(const ConfigKey& key, const StatsdConfig& config,
|
||||
const sp<AlarmMonitor>& anomalyAlarmMonitor,
|
||||
const sp<AlarmMonitor>& periodicAlarmMonitor)
|
||||
: mConfigKey(key), mUidMap(uidMap),
|
||||
mTtlNs(config.has_ttl_in_seconds() ? config.ttl_in_seconds() * NS_PER_SEC : -1),
|
||||
mTtlEndNs(-1),
|
||||
mLastReportTimeNs(timeBaseSec * NS_PER_SEC),
|
||||
mLastReportWallClockNs(getWallClockNs()) {
|
||||
// Init the ttl end timestamp.
|
||||
refreshTtl(timeBaseSec * NS_PER_SEC);
|
||||
|
||||
mConfigValid =
|
||||
initStatsdConfig(key, config, *uidMap, anomalyAlarmMonitor, periodicAlarmMonitor,
|
||||
timeBaseSec, currentTimeSec, mTagIds, mAllAtomMatchers,
|
||||
@@ -136,7 +141,7 @@ bool MetricsManager::isConfigValid() const {
|
||||
return mConfigValid;
|
||||
}
|
||||
|
||||
void MetricsManager::notifyAppUpgrade(const uint64_t& eventTimeNs, const string& apk, const int uid,
|
||||
void MetricsManager::notifyAppUpgrade(const int64_t& eventTimeNs, const string& apk, const int uid,
|
||||
const int64_t version) {
|
||||
// check if we care this package
|
||||
if (std::find(mAllowedPkg.begin(), mAllowedPkg.end(), apk) == mAllowedPkg.end()) {
|
||||
@@ -147,7 +152,7 @@ void MetricsManager::notifyAppUpgrade(const uint64_t& eventTimeNs, const string&
|
||||
initLogSourceWhiteList();
|
||||
}
|
||||
|
||||
void MetricsManager::notifyAppRemoved(const uint64_t& eventTimeNs, const string& apk,
|
||||
void MetricsManager::notifyAppRemoved(const int64_t& eventTimeNs, const string& apk,
|
||||
const int uid) {
|
||||
// check if we care this package
|
||||
if (std::find(mAllowedPkg.begin(), mAllowedPkg.end(), apk) == mAllowedPkg.end()) {
|
||||
@@ -158,7 +163,7 @@ void MetricsManager::notifyAppRemoved(const uint64_t& eventTimeNs, const string&
|
||||
initLogSourceWhiteList();
|
||||
}
|
||||
|
||||
void MetricsManager::onUidMapReceived(const uint64_t& eventTimeNs) {
|
||||
void MetricsManager::onUidMapReceived(const int64_t& eventTimeNs) {
|
||||
if (mAllowedPkg.size() == 0) {
|
||||
return;
|
||||
}
|
||||
@@ -179,13 +184,13 @@ void MetricsManager::dumpStates(FILE* out, bool verbose) {
|
||||
}
|
||||
}
|
||||
|
||||
void MetricsManager::dropData(const uint64_t dropTimeNs) {
|
||||
void MetricsManager::dropData(const int64_t dropTimeNs) {
|
||||
for (const auto& producer : mAllMetricProducers) {
|
||||
producer->dropData(dropTimeNs);
|
||||
}
|
||||
}
|
||||
|
||||
void MetricsManager::onDumpReport(const uint64_t dumpTimeStampNs, ProtoOutputStream* protoOutput) {
|
||||
void MetricsManager::onDumpReport(const int64_t dumpTimeStampNs, ProtoOutputStream* protoOutput) {
|
||||
VLOG("=========================Metric Reports Start==========================");
|
||||
// one StatsLogReport per MetricProduer
|
||||
for (const auto& producer : mAllMetricProducers) {
|
||||
@@ -288,7 +293,7 @@ void MetricsManager::onLogEvent(const LogEvent& event) {
|
||||
}
|
||||
|
||||
int tagId = event.GetTagId();
|
||||
uint64_t eventTime = event.GetElapsedTimestampNs();
|
||||
int64_t eventTime = event.GetElapsedTimestampNs();
|
||||
if (mTagIds.find(tagId) == mTagIds.end()) {
|
||||
// not interesting...
|
||||
return;
|
||||
@@ -367,7 +372,7 @@ void MetricsManager::onLogEvent(const LogEvent& event) {
|
||||
}
|
||||
|
||||
void MetricsManager::onAnomalyAlarmFired(
|
||||
const uint64_t& timestampNs,
|
||||
const int64_t& timestampNs,
|
||||
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& alarmSet) {
|
||||
for (const auto& itr : mAllAnomalyTrackers) {
|
||||
itr->informAlarmsFired(timestampNs, alarmSet);
|
||||
@@ -375,7 +380,7 @@ void MetricsManager::onAnomalyAlarmFired(
|
||||
}
|
||||
|
||||
void MetricsManager::onPeriodicAlarmFired(
|
||||
const uint64_t& timestampNs,
|
||||
const int64_t& timestampNs,
|
||||
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& alarmSet) {
|
||||
for (const auto& itr : mAllPeriodicAlarmTrackers) {
|
||||
itr->informAlarmsFired(timestampNs, alarmSet);
|
||||
|
||||
@@ -49,19 +49,19 @@ public:
|
||||
void onLogEvent(const LogEvent& event);
|
||||
|
||||
void onAnomalyAlarmFired(
|
||||
const uint64_t& timestampNs,
|
||||
const int64_t& timestampNs,
|
||||
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& alarmSet);
|
||||
|
||||
void onPeriodicAlarmFired(
|
||||
const uint64_t& timestampNs,
|
||||
const int64_t& timestampNs,
|
||||
unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& alarmSet);
|
||||
|
||||
void notifyAppUpgrade(const uint64_t& eventTimeNs, const string& apk, const int uid,
|
||||
void notifyAppUpgrade(const int64_t& eventTimeNs, const string& apk, const int uid,
|
||||
const int64_t version) override;
|
||||
|
||||
void notifyAppRemoved(const uint64_t& eventTimeNs, const string& apk, const int uid) override;
|
||||
void notifyAppRemoved(const int64_t& eventTimeNs, const string& apk, const int uid) override;
|
||||
|
||||
void onUidMapReceived(const uint64_t& eventTimeNs) override;
|
||||
void onUidMapReceived(const int64_t& eventTimeNs) override;
|
||||
|
||||
bool shouldAddUidMapListener() const {
|
||||
return !mAllowedPkg.empty();
|
||||
@@ -69,6 +69,16 @@ public:
|
||||
|
||||
void dumpStates(FILE* out, bool verbose);
|
||||
|
||||
inline bool isInTtl(const int64_t timestampNs) const {
|
||||
return mTtlNs <= 0 || timestampNs < mTtlEndNs;
|
||||
};
|
||||
|
||||
void refreshTtl(const int64_t currentTimestampNs) {
|
||||
if (mTtlNs > 0) {
|
||||
mTtlEndNs = currentTimestampNs + mTtlNs;
|
||||
}
|
||||
};
|
||||
|
||||
// Returns the elapsed realtime when this metric manager last reported metrics.
|
||||
inline int64_t getLastReportTimeNs() const {
|
||||
return mLastReportTimeNs;
|
||||
@@ -78,22 +88,29 @@ public:
|
||||
return mLastReportWallClockNs;
|
||||
};
|
||||
|
||||
virtual void dropData(const uint64_t dropTimeNs);
|
||||
virtual void dropData(const int64_t dropTimeNs);
|
||||
|
||||
// Config source owner can call onDumpReport() to get all the metrics collected.
|
||||
virtual void onDumpReport(const uint64_t dumpTimeNs,
|
||||
virtual void onDumpReport(const int64_t dumpTimeNs,
|
||||
android::util::ProtoOutputStream* protoOutput);
|
||||
|
||||
// Computes the total byte size of all metrics managed by a single config source.
|
||||
// Does not change the state.
|
||||
virtual size_t byteSize();
|
||||
|
||||
private:
|
||||
// For test only.
|
||||
inline int64_t getTtlEndNs() const { return mTtlEndNs; }
|
||||
|
||||
const ConfigKey mConfigKey;
|
||||
|
||||
sp<UidMap> mUidMap;
|
||||
|
||||
bool mConfigValid = false;
|
||||
|
||||
const int64_t mTtlNs;
|
||||
int64_t mTtlEndNs;
|
||||
|
||||
int64_t mLastReportTimeNs;
|
||||
int64_t mLastReportWallClockNs;
|
||||
|
||||
@@ -189,6 +206,7 @@ private:
|
||||
FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_long_refractory_period);
|
||||
|
||||
FRIEND_TEST(AlarmE2eTest, TestMultipleAlarms);
|
||||
FRIEND_TEST(ConfigTtlE2eTest, TestCountMetric);
|
||||
};
|
||||
|
||||
} // namespace statsd
|
||||
|
||||
@@ -63,7 +63,7 @@ const int FIELD_ID_VALUE = 3;
|
||||
ValueMetricProducer::ValueMetricProducer(const ConfigKey& key, const ValueMetric& metric,
|
||||
const int conditionIndex,
|
||||
const sp<ConditionWizard>& wizard, const int pullTagId,
|
||||
const uint64_t startTimeNs,
|
||||
const int64_t startTimeNs,
|
||||
shared_ptr<StatsPullerManager> statsPullerManager)
|
||||
: MetricProducer(metric.id(), key, startTimeNs, conditionIndex, wizard),
|
||||
mValueField(metric.value_field()),
|
||||
@@ -124,7 +124,7 @@ ValueMetricProducer::ValueMetricProducer(const ConfigKey& key, const ValueMetric
|
||||
ValueMetricProducer::ValueMetricProducer(const ConfigKey& key, const ValueMetric& metric,
|
||||
const int conditionIndex,
|
||||
const sp<ConditionWizard>& wizard, const int pullTagId,
|
||||
const uint64_t startTimeNs)
|
||||
const int64_t startTimeNs)
|
||||
: ValueMetricProducer(key, metric, conditionIndex, wizard, pullTagId, startTimeNs,
|
||||
make_shared<StatsPullerManager>()) {
|
||||
}
|
||||
@@ -137,16 +137,16 @@ ValueMetricProducer::~ValueMetricProducer() {
|
||||
}
|
||||
|
||||
void ValueMetricProducer::onSlicedConditionMayChangeLocked(bool overallCondition,
|
||||
const uint64_t eventTime) {
|
||||
const int64_t eventTime) {
|
||||
VLOG("Metric %lld onSlicedConditionMayChange", (long long)mMetricId);
|
||||
}
|
||||
|
||||
void ValueMetricProducer::dropDataLocked(const uint64_t dropTimeNs) {
|
||||
void ValueMetricProducer::dropDataLocked(const int64_t dropTimeNs) {
|
||||
flushIfNeededLocked(dropTimeNs);
|
||||
mPastBuckets.clear();
|
||||
}
|
||||
|
||||
void ValueMetricProducer::onDumpReportLocked(const uint64_t dumpTimeNs,
|
||||
void ValueMetricProducer::onDumpReportLocked(const int64_t dumpTimeNs,
|
||||
ProtoOutputStream* protoOutput) {
|
||||
VLOG("metric %lld dump report now...", (long long)mMetricId);
|
||||
flushIfNeededLocked(dumpTimeNs);
|
||||
@@ -197,7 +197,7 @@ void ValueMetricProducer::onDumpReportLocked(const uint64_t dumpTimeNs,
|
||||
}
|
||||
|
||||
void ValueMetricProducer::onConditionChangedLocked(const bool condition,
|
||||
const uint64_t eventTimeNs) {
|
||||
const int64_t eventTimeNs) {
|
||||
mCondition = condition;
|
||||
|
||||
if (eventTimeNs < mCurrentBucketStartTimeNs) {
|
||||
@@ -231,8 +231,8 @@ void ValueMetricProducer::onDataPulled(const std::vector<std::shared_ptr<LogEven
|
||||
}
|
||||
// For scheduled pulled data, the effective event time is snap to the nearest
|
||||
// bucket boundary to make bucket finalize.
|
||||
uint64_t realEventTime = allData.at(0)->GetElapsedTimestampNs();
|
||||
uint64_t eventTime = mStartTimeNs +
|
||||
int64_t realEventTime = allData.at(0)->GetElapsedTimestampNs();
|
||||
int64_t eventTime = mStartTimeNs +
|
||||
((realEventTime - mStartTimeNs) / mBucketSizeNs) * mBucketSizeNs;
|
||||
|
||||
mCondition = false;
|
||||
@@ -290,7 +290,7 @@ void ValueMetricProducer::onMatchedLogEventInternalLocked(
|
||||
const size_t matcherIndex, const MetricDimensionKey& eventKey,
|
||||
const ConditionKey& conditionKey, bool condition,
|
||||
const LogEvent& event) {
|
||||
uint64_t eventTimeNs = event.GetElapsedTimestampNs();
|
||||
int64_t eventTimeNs = event.GetElapsedTimestampNs();
|
||||
if (eventTimeNs < mCurrentBucketStartTimeNs) {
|
||||
VLOG("Skip event due to late arrival: %lld vs %lld", (long long)eventTimeNs,
|
||||
(long long)mCurrentBucketStartTimeNs);
|
||||
@@ -349,8 +349,8 @@ void ValueMetricProducer::onMatchedLogEventInternalLocked(
|
||||
}
|
||||
}
|
||||
|
||||
void ValueMetricProducer::flushIfNeededLocked(const uint64_t& eventTimeNs) {
|
||||
uint64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
|
||||
void ValueMetricProducer::flushIfNeededLocked(const int64_t& eventTimeNs) {
|
||||
int64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
|
||||
|
||||
if (currentBucketEndTimeNs > eventTimeNs) {
|
||||
VLOG("eventTime is %lld, less than next bucket start time %lld", (long long)eventTimeNs,
|
||||
@@ -371,10 +371,10 @@ void ValueMetricProducer::flushIfNeededLocked(const uint64_t& eventTimeNs) {
|
||||
(long long)mCurrentBucketStartTimeNs);
|
||||
}
|
||||
|
||||
void ValueMetricProducer::flushCurrentBucketLocked(const uint64_t& eventTimeNs) {
|
||||
void ValueMetricProducer::flushCurrentBucketLocked(const int64_t& eventTimeNs) {
|
||||
VLOG("finalizing bucket for %ld, dumping %d slices", (long)mCurrentBucketStartTimeNs,
|
||||
(int)mCurrentSlicedBucket.size());
|
||||
uint64_t fullBucketEndTimeNs = getCurrentBucketEndTimeNs();
|
||||
int64_t fullBucketEndTimeNs = getCurrentBucketEndTimeNs();
|
||||
|
||||
ValueBucket info;
|
||||
info.mBucketStartNs = mCurrentBucketStartTimeNs;
|
||||
|
||||
@@ -40,14 +40,14 @@ class ValueMetricProducer : public virtual MetricProducer, public virtual PullDa
|
||||
public:
|
||||
ValueMetricProducer(const ConfigKey& key, const ValueMetric& valueMetric,
|
||||
const int conditionIndex, const sp<ConditionWizard>& wizard,
|
||||
const int pullTagId, const uint64_t startTimeNs);
|
||||
const int pullTagId, const int64_t startTimeNs);
|
||||
|
||||
virtual ~ValueMetricProducer();
|
||||
|
||||
void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data) override;
|
||||
|
||||
// ValueMetric needs special logic if it's a pulled atom.
|
||||
void notifyAppUpgrade(const uint64_t& eventTimeNs, const string& apk, const int uid,
|
||||
void notifyAppUpgrade(const int64_t& eventTimeNs, const string& apk, const int uid,
|
||||
const int64_t version) override {
|
||||
std::lock_guard<std::mutex> lock(mMutex);
|
||||
|
||||
@@ -86,14 +86,14 @@ protected:
|
||||
const LogEvent& event) override;
|
||||
|
||||
private:
|
||||
void onDumpReportLocked(const uint64_t dumpTimeNs,
|
||||
void onDumpReportLocked(const int64_t dumpTimeNs,
|
||||
android::util::ProtoOutputStream* protoOutput) override;
|
||||
|
||||
// Internal interface to handle condition change.
|
||||
void onConditionChangedLocked(const bool conditionMet, const uint64_t eventTime) override;
|
||||
void onConditionChangedLocked(const bool conditionMet, const int64_t eventTime) override;
|
||||
|
||||
// Internal interface to handle sliced condition change.
|
||||
void onSlicedConditionMayChangeLocked(bool overallCondition, const uint64_t eventTime) override;
|
||||
void onSlicedConditionMayChangeLocked(bool overallCondition, const int64_t eventTime) override;
|
||||
|
||||
// Internal function to calculate the current used bytes.
|
||||
size_t byteSizeLocked() const override;
|
||||
@@ -101,11 +101,11 @@ private:
|
||||
void dumpStatesLocked(FILE* out, bool verbose) const override;
|
||||
|
||||
// Util function to flush the old packet.
|
||||
void flushIfNeededLocked(const uint64_t& eventTime) override;
|
||||
void flushIfNeededLocked(const int64_t& eventTime) override;
|
||||
|
||||
void flushCurrentBucketLocked(const uint64_t& eventTimeNs) override;
|
||||
void flushCurrentBucketLocked(const int64_t& eventTimeNs) override;
|
||||
|
||||
void dropDataLocked(const uint64_t dropTimeNs) override;
|
||||
void dropDataLocked(const int64_t dropTimeNs) override;
|
||||
|
||||
const FieldMatcher mValueField;
|
||||
|
||||
@@ -114,7 +114,7 @@ private:
|
||||
// for testing
|
||||
ValueMetricProducer(const ConfigKey& key, const ValueMetric& valueMetric,
|
||||
const int conditionIndex, const sp<ConditionWizard>& wizard,
|
||||
const int pullTagId, const uint64_t startTimeNs,
|
||||
const int pullTagId, const int64_t startTimeNs,
|
||||
std::shared_ptr<StatsPullerManager> statsPullerManager);
|
||||
|
||||
// tagId for pulled data. -1 if this is not pulled
|
||||
|
||||
@@ -52,9 +52,9 @@ struct DurationInfo {
|
||||
};
|
||||
|
||||
struct DurationBucket {
|
||||
uint64_t mBucketStartNs;
|
||||
uint64_t mBucketEndNs;
|
||||
uint64_t mDuration;
|
||||
int64_t mBucketStartNs;
|
||||
int64_t mBucketEndNs;
|
||||
int64_t mDuration;
|
||||
};
|
||||
|
||||
class DurationTracker {
|
||||
@@ -62,8 +62,8 @@ public:
|
||||
DurationTracker(const ConfigKey& key, const int64_t& id, const MetricDimensionKey& eventKey,
|
||||
sp<ConditionWizard> wizard, int conditionIndex,
|
||||
const std::vector<Matcher>& dimensionInCondition, bool nesting,
|
||||
uint64_t currentBucketStartNs, uint64_t currentBucketNum, uint64_t startTimeNs,
|
||||
uint64_t bucketSizeNs, bool conditionSliced, bool fullLink,
|
||||
int64_t currentBucketStartNs, int64_t currentBucketNum, int64_t startTimeNs,
|
||||
int64_t bucketSizeNs, bool conditionSliced, bool fullLink,
|
||||
const std::vector<sp<DurationAnomalyTracker>>& anomalyTrackers)
|
||||
: mConfigKey(key),
|
||||
mTrackerId(id),
|
||||
@@ -84,27 +84,27 @@ public:
|
||||
|
||||
virtual ~DurationTracker(){};
|
||||
|
||||
virtual unique_ptr<DurationTracker> clone(const uint64_t eventTime) = 0;
|
||||
virtual unique_ptr<DurationTracker> clone(const int64_t eventTime) = 0;
|
||||
|
||||
virtual void noteStart(const HashableDimensionKey& key, bool condition,
|
||||
const uint64_t eventTime, const ConditionKey& conditionKey) = 0;
|
||||
virtual void noteStop(const HashableDimensionKey& key, const uint64_t eventTime,
|
||||
const int64_t eventTime, const ConditionKey& conditionKey) = 0;
|
||||
virtual void noteStop(const HashableDimensionKey& key, const int64_t eventTime,
|
||||
const bool stopAll) = 0;
|
||||
virtual void noteStopAll(const uint64_t eventTime) = 0;
|
||||
virtual void noteStopAll(const int64_t eventTime) = 0;
|
||||
|
||||
virtual void onSlicedConditionMayChange(bool overallCondition, const uint64_t timestamp) = 0;
|
||||
virtual void onConditionChanged(bool condition, const uint64_t timestamp) = 0;
|
||||
virtual void onSlicedConditionMayChange(bool overallCondition, const int64_t timestamp) = 0;
|
||||
virtual void onConditionChanged(bool condition, const int64_t timestamp) = 0;
|
||||
|
||||
// Flush stale buckets if needed, and return true if the tracker has no on-going duration
|
||||
// events, so that the owner can safely remove the tracker.
|
||||
virtual bool flushIfNeeded(
|
||||
uint64_t timestampNs,
|
||||
int64_t timestampNs,
|
||||
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) = 0;
|
||||
|
||||
// Should only be called during an app upgrade or from this tracker's flushIfNeeded. If from
|
||||
// an app upgrade, we assume that we're trying to form a partial bucket.
|
||||
virtual bool flushCurrentBucket(
|
||||
const uint64_t& eventTimeNs,
|
||||
const int64_t& eventTimeNs,
|
||||
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) = 0;
|
||||
|
||||
// Predict the anomaly timestamp given the current status.
|
||||
@@ -118,15 +118,15 @@ public:
|
||||
}
|
||||
|
||||
protected:
|
||||
uint64_t getCurrentBucketEndTimeNs() const {
|
||||
int64_t getCurrentBucketEndTimeNs() const {
|
||||
return mStartTimeNs + (mCurrentBucketNum + 1) * mBucketSizeNs;
|
||||
}
|
||||
|
||||
// Starts the anomaly alarm.
|
||||
void startAnomalyAlarm(const uint64_t eventTime) {
|
||||
void startAnomalyAlarm(const int64_t eventTime) {
|
||||
for (auto& anomalyTracker : mAnomalyTrackers) {
|
||||
if (anomalyTracker != nullptr) {
|
||||
const uint64_t alarmTimestampNs =
|
||||
const int64_t alarmTimestampNs =
|
||||
predictAnomalyTimestampNs(*anomalyTracker, eventTime);
|
||||
if (alarmTimestampNs > 0) {
|
||||
anomalyTracker->startAlarm(mEventKey, alarmTimestampNs);
|
||||
@@ -136,7 +136,7 @@ protected:
|
||||
}
|
||||
|
||||
// Stops the anomaly alarm. If it should have already fired, declare the anomaly now.
|
||||
void stopAnomalyAlarm(const uint64_t timestamp) {
|
||||
void stopAnomalyAlarm(const int64_t timestamp) {
|
||||
for (auto& anomalyTracker : mAnomalyTrackers) {
|
||||
if (anomalyTracker != nullptr) {
|
||||
anomalyTracker->stopAlarm(mEventKey, timestamp);
|
||||
@@ -152,7 +152,7 @@ protected:
|
||||
}
|
||||
}
|
||||
|
||||
void detectAndDeclareAnomaly(const uint64_t& timestamp, const int64_t& currBucketNum,
|
||||
void detectAndDeclareAnomaly(const int64_t& timestamp, const int64_t& currBucketNum,
|
||||
const int64_t& currentBucketValue) {
|
||||
for (auto& anomalyTracker : mAnomalyTrackers) {
|
||||
if (anomalyTracker != nullptr) {
|
||||
@@ -164,7 +164,7 @@ protected:
|
||||
|
||||
// Convenience to compute the current bucket's end time, which is always aligned with the
|
||||
// start time of the metric.
|
||||
uint64_t getCurrentBucketEndTimeNs() {
|
||||
int64_t getCurrentBucketEndTimeNs() {
|
||||
return mStartTimeNs + (mCurrentBucketNum + 1) * mBucketSizeNs;
|
||||
}
|
||||
|
||||
@@ -185,15 +185,15 @@ protected:
|
||||
|
||||
const bool mNested;
|
||||
|
||||
uint64_t mCurrentBucketStartTimeNs;
|
||||
int64_t mCurrentBucketStartTimeNs;
|
||||
|
||||
int64_t mDuration; // current recorded duration result (for partial bucket)
|
||||
|
||||
int64_t mDurationFullBucket; // Sum of past partial buckets in current full bucket.
|
||||
|
||||
uint64_t mCurrentBucketNum;
|
||||
int64_t mCurrentBucketNum;
|
||||
|
||||
const uint64_t mStartTimeNs;
|
||||
const int64_t mStartTimeNs;
|
||||
|
||||
const bool mConditionSliced;
|
||||
|
||||
|
||||
@@ -28,8 +28,8 @@ MaxDurationTracker::MaxDurationTracker(const ConfigKey& key, const int64_t& id,
|
||||
const MetricDimensionKey& eventKey,
|
||||
sp<ConditionWizard> wizard, int conditionIndex,
|
||||
const vector<Matcher>& dimensionInCondition, bool nesting,
|
||||
uint64_t currentBucketStartNs, uint64_t currentBucketNum,
|
||||
uint64_t startTimeNs, uint64_t bucketSizeNs,
|
||||
int64_t currentBucketStartNs, int64_t currentBucketNum,
|
||||
int64_t startTimeNs, int64_t bucketSizeNs,
|
||||
bool conditionSliced, bool fullLink,
|
||||
const vector<sp<DurationAnomalyTracker>>& anomalyTrackers)
|
||||
: DurationTracker(key, id, eventKey, wizard, conditionIndex, dimensionInCondition, nesting,
|
||||
@@ -41,7 +41,7 @@ MaxDurationTracker::MaxDurationTracker(const ConfigKey& key, const int64_t& id,
|
||||
}
|
||||
}
|
||||
|
||||
unique_ptr<DurationTracker> MaxDurationTracker::clone(const uint64_t eventTime) {
|
||||
unique_ptr<DurationTracker> MaxDurationTracker::clone(const int64_t eventTime) {
|
||||
auto clonedTracker = make_unique<MaxDurationTracker>(*this);
|
||||
for (auto it = clonedTracker->mInfos.begin(); it != clonedTracker->mInfos.end();) {
|
||||
if (it->second.state != kStopped) {
|
||||
@@ -80,7 +80,7 @@ bool MaxDurationTracker::hitGuardRail(const HashableDimensionKey& newKey) {
|
||||
}
|
||||
|
||||
void MaxDurationTracker::noteStart(const HashableDimensionKey& key, bool condition,
|
||||
const uint64_t eventTime, const ConditionKey& conditionKey) {
|
||||
const int64_t eventTime, const ConditionKey& conditionKey) {
|
||||
// this will construct a new DurationInfo if this key didn't exist.
|
||||
if (hitGuardRail(key)) {
|
||||
return;
|
||||
@@ -114,7 +114,7 @@ void MaxDurationTracker::noteStart(const HashableDimensionKey& key, bool conditi
|
||||
}
|
||||
|
||||
|
||||
void MaxDurationTracker::noteStop(const HashableDimensionKey& key, const uint64_t eventTime,
|
||||
void MaxDurationTracker::noteStop(const HashableDimensionKey& key, const int64_t eventTime,
|
||||
bool forceStop) {
|
||||
VLOG("MaxDuration: key %s stop", key.toString().c_str());
|
||||
if (mInfos.find(key) == mInfos.end()) {
|
||||
@@ -175,7 +175,7 @@ bool MaxDurationTracker::anyStarted() {
|
||||
return false;
|
||||
}
|
||||
|
||||
void MaxDurationTracker::noteStopAll(const uint64_t eventTime) {
|
||||
void MaxDurationTracker::noteStopAll(const int64_t eventTime) {
|
||||
std::set<HashableDimensionKey> keys;
|
||||
for (const auto& pair : mInfos) {
|
||||
keys.insert(pair.first);
|
||||
@@ -186,14 +186,14 @@ void MaxDurationTracker::noteStopAll(const uint64_t eventTime) {
|
||||
}
|
||||
|
||||
bool MaxDurationTracker::flushCurrentBucket(
|
||||
const uint64_t& eventTimeNs,
|
||||
const int64_t& eventTimeNs,
|
||||
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) {
|
||||
VLOG("MaxDurationTracker flushing.....");
|
||||
|
||||
// adjust the bucket start time
|
||||
int numBucketsForward = 0;
|
||||
uint64_t fullBucketEnd = getCurrentBucketEndTimeNs();
|
||||
uint64_t currentBucketEndTimeNs;
|
||||
int64_t fullBucketEnd = getCurrentBucketEndTimeNs();
|
||||
int64_t currentBucketEndTimeNs;
|
||||
if (eventTimeNs >= fullBucketEnd) {
|
||||
numBucketsForward = 1 + (eventTimeNs - fullBucketEnd) / mBucketSizeNs;
|
||||
currentBucketEndTimeNs = fullBucketEnd;
|
||||
@@ -238,7 +238,7 @@ bool MaxDurationTracker::flushCurrentBucket(
|
||||
}
|
||||
|
||||
bool MaxDurationTracker::flushIfNeeded(
|
||||
uint64_t eventTimeNs, unordered_map<MetricDimensionKey, vector<DurationBucket>>* output) {
|
||||
int64_t eventTimeNs, unordered_map<MetricDimensionKey, vector<DurationBucket>>* output) {
|
||||
if (eventTimeNs < getCurrentBucketEndTimeNs()) {
|
||||
return false;
|
||||
}
|
||||
@@ -246,7 +246,7 @@ bool MaxDurationTracker::flushIfNeeded(
|
||||
}
|
||||
|
||||
void MaxDurationTracker::onSlicedConditionMayChange(bool overallCondition,
|
||||
const uint64_t timestamp) {
|
||||
const int64_t timestamp) {
|
||||
// Now for each of the on-going event, check if the condition has changed for them.
|
||||
for (auto& pair : mInfos) {
|
||||
if (pair.second.state == kStopped) {
|
||||
@@ -268,14 +268,14 @@ void MaxDurationTracker::onSlicedConditionMayChange(bool overallCondition,
|
||||
}
|
||||
}
|
||||
|
||||
void MaxDurationTracker::onConditionChanged(bool condition, const uint64_t timestamp) {
|
||||
void MaxDurationTracker::onConditionChanged(bool condition, const int64_t timestamp) {
|
||||
for (auto& pair : mInfos) {
|
||||
noteConditionChanged(pair.first, condition, timestamp);
|
||||
}
|
||||
}
|
||||
|
||||
void MaxDurationTracker::noteConditionChanged(const HashableDimensionKey& key, bool conditionMet,
|
||||
const uint64_t timestamp) {
|
||||
const int64_t timestamp) {
|
||||
auto it = mInfos.find(key);
|
||||
if (it == mInfos.end()) {
|
||||
return;
|
||||
|
||||
@@ -31,30 +31,30 @@ public:
|
||||
MaxDurationTracker(const ConfigKey& key, const int64_t& id, const MetricDimensionKey& eventKey,
|
||||
sp<ConditionWizard> wizard, int conditionIndex,
|
||||
const std::vector<Matcher>& dimensionInCondition, bool nesting,
|
||||
uint64_t currentBucketStartNs, uint64_t currentBucketNum,
|
||||
uint64_t startTimeNs, uint64_t bucketSizeNs, bool conditionSliced,
|
||||
int64_t currentBucketStartNs, int64_t currentBucketNum,
|
||||
int64_t startTimeNs, int64_t bucketSizeNs, bool conditionSliced,
|
||||
bool fullLink,
|
||||
const std::vector<sp<DurationAnomalyTracker>>& anomalyTrackers);
|
||||
|
||||
MaxDurationTracker(const MaxDurationTracker& tracker) = default;
|
||||
|
||||
unique_ptr<DurationTracker> clone(const uint64_t eventTime) override;
|
||||
unique_ptr<DurationTracker> clone(const int64_t eventTime) override;
|
||||
|
||||
void noteStart(const HashableDimensionKey& key, bool condition, const uint64_t eventTime,
|
||||
void noteStart(const HashableDimensionKey& key, bool condition, const int64_t eventTime,
|
||||
const ConditionKey& conditionKey) override;
|
||||
void noteStop(const HashableDimensionKey& key, const uint64_t eventTime,
|
||||
void noteStop(const HashableDimensionKey& key, const int64_t eventTime,
|
||||
const bool stopAll) override;
|
||||
void noteStopAll(const uint64_t eventTime) override;
|
||||
void noteStopAll(const int64_t eventTime) override;
|
||||
|
||||
bool flushIfNeeded(
|
||||
uint64_t timestampNs,
|
||||
int64_t timestampNs,
|
||||
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) override;
|
||||
bool flushCurrentBucket(
|
||||
const uint64_t& eventTimeNs,
|
||||
const int64_t& eventTimeNs,
|
||||
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>*) override;
|
||||
|
||||
void onSlicedConditionMayChange(bool overallCondition, const uint64_t timestamp) override;
|
||||
void onConditionChanged(bool condition, const uint64_t timestamp) override;
|
||||
void onSlicedConditionMayChange(bool overallCondition, const int64_t timestamp) override;
|
||||
void onConditionChanged(bool condition, const int64_t timestamp) override;
|
||||
|
||||
int64_t predictAnomalyTimestampNs(const DurationAnomalyTracker& anomalyTracker,
|
||||
const int64_t currentTimestamp) const override;
|
||||
@@ -67,7 +67,7 @@ private:
|
||||
std::unordered_map<HashableDimensionKey, DurationInfo> mInfos;
|
||||
|
||||
void noteConditionChanged(const HashableDimensionKey& key, bool conditionMet,
|
||||
const uint64_t timestamp);
|
||||
const int64_t timestamp);
|
||||
|
||||
// return true if we should not allow newKey to be tracked because we are above the threshold
|
||||
bool hitGuardRail(const HashableDimensionKey& newKey);
|
||||
|
||||
@@ -27,8 +27,8 @@ using std::pair;
|
||||
OringDurationTracker::OringDurationTracker(
|
||||
const ConfigKey& key, const int64_t& id, const MetricDimensionKey& eventKey,
|
||||
sp<ConditionWizard> wizard, int conditionIndex, const vector<Matcher>& dimensionInCondition,
|
||||
bool nesting, uint64_t currentBucketStartNs, uint64_t currentBucketNum,
|
||||
uint64_t startTimeNs, uint64_t bucketSizeNs, bool conditionSliced, bool fullLink,
|
||||
bool nesting, int64_t currentBucketStartNs, int64_t currentBucketNum,
|
||||
int64_t startTimeNs, int64_t bucketSizeNs, bool conditionSliced, bool fullLink,
|
||||
const vector<sp<DurationAnomalyTracker>>& anomalyTrackers)
|
||||
: DurationTracker(key, id, eventKey, wizard, conditionIndex, dimensionInCondition, nesting,
|
||||
currentBucketStartNs, currentBucketNum, startTimeNs, bucketSizeNs,
|
||||
@@ -42,7 +42,7 @@ OringDurationTracker::OringDurationTracker(
|
||||
}
|
||||
}
|
||||
|
||||
unique_ptr<DurationTracker> OringDurationTracker::clone(const uint64_t eventTime) {
|
||||
unique_ptr<DurationTracker> OringDurationTracker::clone(const int64_t eventTime) {
|
||||
auto clonedTracker = make_unique<OringDurationTracker>(*this);
|
||||
clonedTracker->mLastStartTime = eventTime;
|
||||
clonedTracker->mDuration = 0;
|
||||
@@ -69,7 +69,7 @@ bool OringDurationTracker::hitGuardRail(const HashableDimensionKey& newKey) {
|
||||
}
|
||||
|
||||
void OringDurationTracker::noteStart(const HashableDimensionKey& key, bool condition,
|
||||
const uint64_t eventTime, const ConditionKey& conditionKey) {
|
||||
const int64_t eventTime, const ConditionKey& conditionKey) {
|
||||
if (hitGuardRail(key)) {
|
||||
return;
|
||||
}
|
||||
@@ -90,7 +90,7 @@ void OringDurationTracker::noteStart(const HashableDimensionKey& key, bool condi
|
||||
VLOG("Oring: %s start, condition %d", key.toString().c_str(), condition);
|
||||
}
|
||||
|
||||
void OringDurationTracker::noteStop(const HashableDimensionKey& key, const uint64_t timestamp,
|
||||
void OringDurationTracker::noteStop(const HashableDimensionKey& key, const int64_t timestamp,
|
||||
const bool stopAll) {
|
||||
VLOG("Oring: %s stop", key.toString().c_str());
|
||||
auto it = mStarted.find(key);
|
||||
@@ -121,7 +121,7 @@ void OringDurationTracker::noteStop(const HashableDimensionKey& key, const uint6
|
||||
}
|
||||
}
|
||||
|
||||
void OringDurationTracker::noteStopAll(const uint64_t timestamp) {
|
||||
void OringDurationTracker::noteStopAll(const int64_t timestamp) {
|
||||
if (!mStarted.empty()) {
|
||||
mDuration += (timestamp - mLastStartTime);
|
||||
VLOG("Oring Stop all: record duration %lld %lld ", (long long)timestamp - mLastStartTime,
|
||||
@@ -136,7 +136,7 @@ void OringDurationTracker::noteStopAll(const uint64_t timestamp) {
|
||||
}
|
||||
|
||||
bool OringDurationTracker::flushCurrentBucket(
|
||||
const uint64_t& eventTimeNs,
|
||||
const int64_t& eventTimeNs,
|
||||
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) {
|
||||
VLOG("OringDurationTracker Flushing.............");
|
||||
|
||||
@@ -144,8 +144,8 @@ bool OringDurationTracker::flushCurrentBucket(
|
||||
// MetricProducer#notifyAppUpgrade.
|
||||
|
||||
int numBucketsForward = 0;
|
||||
uint64_t fullBucketEnd = getCurrentBucketEndTimeNs();
|
||||
uint64_t currentBucketEndTimeNs;
|
||||
int64_t fullBucketEnd = getCurrentBucketEndTimeNs();
|
||||
int64_t currentBucketEndTimeNs;
|
||||
|
||||
if (eventTimeNs >= fullBucketEnd) {
|
||||
numBucketsForward = 1 + (eventTimeNs - fullBucketEnd) / mBucketSizeNs;
|
||||
@@ -207,7 +207,7 @@ bool OringDurationTracker::flushCurrentBucket(
|
||||
}
|
||||
|
||||
bool OringDurationTracker::flushIfNeeded(
|
||||
uint64_t eventTimeNs, unordered_map<MetricDimensionKey, vector<DurationBucket>>* output) {
|
||||
int64_t eventTimeNs, unordered_map<MetricDimensionKey, vector<DurationBucket>>* output) {
|
||||
if (eventTimeNs < getCurrentBucketEndTimeNs()) {
|
||||
return false;
|
||||
}
|
||||
@@ -215,7 +215,7 @@ bool OringDurationTracker::flushIfNeeded(
|
||||
}
|
||||
|
||||
void OringDurationTracker::onSlicedConditionMayChange(bool overallCondition,
|
||||
const uint64_t timestamp) {
|
||||
const int64_t timestamp) {
|
||||
vector<pair<HashableDimensionKey, int>> startedToPaused;
|
||||
vector<pair<HashableDimensionKey, int>> pausedToStarted;
|
||||
if (!mStarted.empty()) {
|
||||
@@ -297,7 +297,7 @@ void OringDurationTracker::onSlicedConditionMayChange(bool overallCondition,
|
||||
}
|
||||
}
|
||||
|
||||
void OringDurationTracker::onConditionChanged(bool condition, const uint64_t timestamp) {
|
||||
void OringDurationTracker::onConditionChanged(bool condition, const int64_t timestamp) {
|
||||
if (condition) {
|
||||
if (!mPaused.empty()) {
|
||||
VLOG("Condition true, all started");
|
||||
|
||||
@@ -30,29 +30,29 @@ public:
|
||||
OringDurationTracker(const ConfigKey& key, const int64_t& id,
|
||||
const MetricDimensionKey& eventKey, sp<ConditionWizard> wizard,
|
||||
int conditionIndex, const std::vector<Matcher>& dimensionInCondition,
|
||||
bool nesting, uint64_t currentBucketStartNs, uint64_t currentBucketNum,
|
||||
uint64_t startTimeNs, uint64_t bucketSizeNs, bool conditionSliced,
|
||||
bool nesting, int64_t currentBucketStartNs, int64_t currentBucketNum,
|
||||
int64_t startTimeNs, int64_t bucketSizeNs, bool conditionSliced,
|
||||
bool fullLink,
|
||||
const std::vector<sp<DurationAnomalyTracker>>& anomalyTrackers);
|
||||
|
||||
OringDurationTracker(const OringDurationTracker& tracker) = default;
|
||||
|
||||
unique_ptr<DurationTracker> clone(const uint64_t eventTime) override;
|
||||
unique_ptr<DurationTracker> clone(const int64_t eventTime) override;
|
||||
|
||||
void noteStart(const HashableDimensionKey& key, bool condition, const uint64_t eventTime,
|
||||
void noteStart(const HashableDimensionKey& key, bool condition, const int64_t eventTime,
|
||||
const ConditionKey& conditionKey) override;
|
||||
void noteStop(const HashableDimensionKey& key, const uint64_t eventTime,
|
||||
void noteStop(const HashableDimensionKey& key, const int64_t eventTime,
|
||||
const bool stopAll) override;
|
||||
void noteStopAll(const uint64_t eventTime) override;
|
||||
void noteStopAll(const int64_t eventTime) override;
|
||||
|
||||
void onSlicedConditionMayChange(bool overallCondition, const uint64_t timestamp) override;
|
||||
void onConditionChanged(bool condition, const uint64_t timestamp) override;
|
||||
void onSlicedConditionMayChange(bool overallCondition, const int64_t timestamp) override;
|
||||
void onConditionChanged(bool condition, const int64_t timestamp) override;
|
||||
|
||||
bool flushCurrentBucket(
|
||||
const uint64_t& eventTimeNs,
|
||||
const int64_t& eventTimeNs,
|
||||
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) override;
|
||||
bool flushIfNeeded(
|
||||
uint64_t timestampNs,
|
||||
int64_t timestampNs,
|
||||
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) override;
|
||||
|
||||
int64_t predictAnomalyTimestampNs(const DurationAnomalyTracker& anomalyTracker,
|
||||
|
||||
@@ -28,15 +28,15 @@ class PackageInfoListener : public virtual android::RefBase {
|
||||
public:
|
||||
// Uid map will notify this listener that the app with apk name and uid has been upgraded to
|
||||
// the specified version.
|
||||
virtual void notifyAppUpgrade(const uint64_t& eventTimeNs, const std::string& apk,
|
||||
virtual void notifyAppUpgrade(const int64_t& eventTimeNs, const std::string& apk,
|
||||
const int uid, const int64_t version) = 0;
|
||||
|
||||
// Notify interested listeners that the given apk and uid combination no longer exits.
|
||||
virtual void notifyAppRemoved(const uint64_t& eventTimeNs, const std::string& apk,
|
||||
virtual void notifyAppRemoved(const int64_t& eventTimeNs, const std::string& apk,
|
||||
const int uid) = 0;
|
||||
|
||||
// Notify the listener that the UidMap snapshot is available.
|
||||
virtual void onUidMapReceived(const uint64_t& eventTimeNs) = 0;
|
||||
virtual void onUidMapReceived(const int64_t& eventTimeNs) = 0;
|
||||
};
|
||||
|
||||
} // namespace statsd
|
||||
|
||||
@@ -234,6 +234,7 @@ message StatsdStatsReport {
|
||||
optional int64 id = 2;
|
||||
optional int32 creation_time_sec = 3;
|
||||
optional int32 deletion_time_sec = 4;
|
||||
optional int32 reset_time_sec = 19;
|
||||
optional int32 metric_count = 5;
|
||||
optional int32 condition_count = 6;
|
||||
optional int32 matcher_count = 7;
|
||||
|
||||
@@ -354,6 +354,8 @@ message StatsdConfig {
|
||||
}
|
||||
repeated Annotation annotation = 14;
|
||||
|
||||
optional int64 ttl_in_seconds = 15;
|
||||
|
||||
// Field number 1000 is reserved for later use.
|
||||
reserved 1000;
|
||||
}
|
||||
|
||||
@@ -252,8 +252,13 @@ void StorageManager::readConfigFromDisk(map<ConfigKey, StatsdConfig>& configsMap
|
||||
}
|
||||
}
|
||||
|
||||
bool StorageManager::hasIdenticalConfig(const ConfigKey& key,
|
||||
const vector<uint8_t>& config) {
|
||||
bool StorageManager::readConfigFromDisk(const ConfigKey& key, StatsdConfig* config) {
|
||||
string content;
|
||||
return config != nullptr &&
|
||||
StorageManager::readConfigFromDisk(key, &content) && config->ParseFromString(content);
|
||||
}
|
||||
|
||||
bool StorageManager::readConfigFromDisk(const ConfigKey& key, string* content) {
|
||||
unique_ptr<DIR, decltype(&closedir)> dir(opendir(STATS_SERVICE_DIR),
|
||||
closedir);
|
||||
if (dir == NULL) {
|
||||
@@ -262,7 +267,6 @@ bool StorageManager::hasIdenticalConfig(const ConfigKey& key,
|
||||
}
|
||||
|
||||
string suffix = StringPrintf("%d_%lld", key.GetUid(), (long long)key.GetId());
|
||||
|
||||
dirent* de;
|
||||
while ((de = readdir(dir.get()))) {
|
||||
char* name = de->d_name;
|
||||
@@ -277,13 +281,8 @@ bool StorageManager::hasIdenticalConfig(const ConfigKey& key,
|
||||
int fd = open(StringPrintf("%s/%s", STATS_SERVICE_DIR, name).c_str(),
|
||||
O_RDONLY | O_CLOEXEC);
|
||||
if (fd != -1) {
|
||||
string content;
|
||||
if (android::base::ReadFdToString(fd, &content)) {
|
||||
vector<uint8_t> vec(content.begin(), content.end());
|
||||
if (vec == config) {
|
||||
close(fd);
|
||||
return true;
|
||||
}
|
||||
if (android::base::ReadFdToString(fd, content)) {
|
||||
return true;
|
||||
}
|
||||
close(fd);
|
||||
}
|
||||
@@ -292,6 +291,18 @@ bool StorageManager::hasIdenticalConfig(const ConfigKey& key,
|
||||
return false;
|
||||
}
|
||||
|
||||
bool StorageManager::hasIdenticalConfig(const ConfigKey& key,
|
||||
const vector<uint8_t>& config) {
|
||||
string content;
|
||||
if (StorageManager::readConfigFromDisk(key, &content)) {
|
||||
vector<uint8_t> vec(content.begin(), content.end());
|
||||
if (vec == config) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void StorageManager::trimToFit(const char* path) {
|
||||
unique_ptr<DIR, decltype(&closedir)> dir(opendir(path), closedir);
|
||||
if (dir == NULL) {
|
||||
|
||||
@@ -73,6 +73,13 @@ public:
|
||||
*/
|
||||
static void readConfigFromDisk(std::map<ConfigKey, StatsdConfig>& configsMap);
|
||||
|
||||
/**
|
||||
* Call to load the specified config from disk. Returns false if the config file does not
|
||||
* exist or error occurs when reading the file.
|
||||
*/
|
||||
static bool readConfigFromDisk(const ConfigKey& key, StatsdConfig* config);
|
||||
static bool readConfigFromDisk(const ConfigKey& key, string* config);
|
||||
|
||||
/**
|
||||
* Trims files in the provided directory to limit the total size, number of
|
||||
* files, accumulation of outdated files.
|
||||
|
||||
@@ -53,7 +53,7 @@ public:
|
||||
|
||||
MOCK_METHOD0(byteSize, size_t());
|
||||
|
||||
MOCK_METHOD1(dropData, void(const uint64_t dropTimeNs));
|
||||
MOCK_METHOD1(dropData, void(const int64_t dropTimeNs));
|
||||
};
|
||||
|
||||
TEST(StatsLogProcessorTest, TestRateLimitByteSize) {
|
||||
|
||||
108
cmds/statsd/tests/e2e/ConfigTtl_e2e_test.cpp
Normal file
108
cmds/statsd/tests/e2e/ConfigTtl_e2e_test.cpp
Normal file
@@ -0,0 +1,108 @@
|
||||
// Copyright (C) 2018 The Android Open Source Project
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "src/StatsLogProcessor.h"
|
||||
#include "src/stats_log_util.h"
|
||||
#include "tests/statsd_test_util.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace android {
|
||||
namespace os {
|
||||
namespace statsd {
|
||||
|
||||
#ifdef __ANDROID__
|
||||
|
||||
namespace {
|
||||
|
||||
StatsdConfig CreateStatsdConfig(int num_buckets, int threshold) {
|
||||
StatsdConfig config;
|
||||
config.add_allowed_log_source("AID_ROOT"); // LogEvent defaults to UID of root.
|
||||
auto wakelockAcquireMatcher = CreateAcquireWakelockAtomMatcher();
|
||||
|
||||
*config.add_atom_matcher() = wakelockAcquireMatcher;
|
||||
|
||||
auto countMetric = config.add_count_metric();
|
||||
countMetric->set_id(123456);
|
||||
countMetric->set_what(wakelockAcquireMatcher.id());
|
||||
*countMetric->mutable_dimensions_in_what() = CreateAttributionUidDimensions(
|
||||
android::util::WAKELOCK_STATE_CHANGED, {Position::FIRST});
|
||||
countMetric->set_bucket(FIVE_MINUTES);
|
||||
|
||||
auto alert = config.add_alert();
|
||||
alert->set_id(StringToId("alert"));
|
||||
alert->set_metric_id(123456);
|
||||
alert->set_num_buckets(num_buckets);
|
||||
alert->set_refractory_period_secs(10);
|
||||
alert->set_trigger_if_sum_gt(threshold);
|
||||
|
||||
// Two hours
|
||||
config.set_ttl_in_seconds(2 * 3600);
|
||||
return config;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
TEST(ConfigTtlE2eTest, TestCountMetric) {
|
||||
const int num_buckets = 1;
|
||||
const int threshold = 3;
|
||||
auto config = CreateStatsdConfig(num_buckets, threshold);
|
||||
const uint64_t alert_id = config.alert(0).id();
|
||||
const uint32_t refractory_period_sec = config.alert(0).refractory_period_secs();
|
||||
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketSizeNs =
|
||||
TimeUnitToBucketSizeInMillis(config.count_metric(0).bucket()) * 1000000;
|
||||
|
||||
ConfigKey cfgKey;
|
||||
auto processor = CreateStatsLogProcessor(bucketStartTimeNs / NS_PER_SEC, config, cfgKey);
|
||||
EXPECT_EQ(processor->mMetricsManagers.size(), 1u);
|
||||
EXPECT_TRUE(processor->mMetricsManagers.begin()->second->isConfigValid());
|
||||
|
||||
std::vector<AttributionNodeInternal> attributions1 = {CreateAttribution(111, "App1")};
|
||||
|
||||
FieldValue fieldValue1(Field(android::util::WAKELOCK_STATE_CHANGED, (int32_t)0x02010101),
|
||||
Value((int32_t)111));
|
||||
HashableDimensionKey whatKey1({fieldValue1});
|
||||
MetricDimensionKey dimensionKey1(whatKey1, DEFAULT_DIMENSION_KEY);
|
||||
|
||||
FieldValue fieldValue2(Field(android::util::WAKELOCK_STATE_CHANGED, (int32_t)0x02010101),
|
||||
Value((int32_t)222));
|
||||
HashableDimensionKey whatKey2({fieldValue2});
|
||||
MetricDimensionKey dimensionKey2(whatKey2, DEFAULT_DIMENSION_KEY);
|
||||
|
||||
auto event = CreateAcquireWakelockEvent(attributions1, "wl1", bucketStartTimeNs + 2);
|
||||
processor->OnLogEvent(event.get());
|
||||
|
||||
event = CreateAcquireWakelockEvent(attributions1, "wl2", bucketStartTimeNs + bucketSizeNs + 2);
|
||||
processor->OnLogEvent(event.get());
|
||||
|
||||
event = CreateAcquireWakelockEvent(
|
||||
attributions1, "wl1", bucketStartTimeNs + 25 * bucketSizeNs + 2);
|
||||
processor->OnLogEvent(event.get());
|
||||
|
||||
EXPECT_EQ((int64_t)(bucketStartTimeNs + 25 * bucketSizeNs + 2 + 2 * 3600 * NS_PER_SEC),
|
||||
processor->mMetricsManagers.begin()->second->getTtlEndNs());
|
||||
}
|
||||
|
||||
|
||||
#else
|
||||
GTEST_LOG_(INFO) << "This test does nothing.\n";
|
||||
#endif
|
||||
|
||||
} // namespace statsd
|
||||
} // namespace os
|
||||
} // namespace android
|
||||
@@ -203,9 +203,9 @@ TEST(CountMetricProducerTest, TestEventsWithSlicedCondition) {
|
||||
|
||||
TEST(CountMetricProducerTest, TestEventWithAppUpgrade) {
|
||||
sp<AlarmMonitor> alarmMonitor;
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
|
||||
uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
|
||||
int64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
|
||||
|
||||
int tagId = 1;
|
||||
int conditionTagId = 2;
|
||||
@@ -244,7 +244,7 @@ TEST(CountMetricProducerTest, TestEventWithAppUpgrade) {
|
||||
// Anomaly tracker only contains full buckets.
|
||||
EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
|
||||
|
||||
uint64_t lastEndTimeNs = countProducer.getCurrentBucketEndTimeNs();
|
||||
int64_t lastEndTimeNs = countProducer.getCurrentBucketEndTimeNs();
|
||||
// Next event occurs in same bucket as partial bucket created.
|
||||
LogEvent event2(tagId, bucketStartTimeNs + 59 * NS_PER_SEC + 10);
|
||||
event2.write("222"); // uid
|
||||
@@ -265,9 +265,9 @@ TEST(CountMetricProducerTest, TestEventWithAppUpgrade) {
|
||||
}
|
||||
|
||||
TEST(CountMetricProducerTest, TestEventWithAppUpgradeInNextBucket) {
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
|
||||
uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
|
||||
int64_t eventUpgradeTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
|
||||
|
||||
int tagId = 1;
|
||||
int conditionTagId = 2;
|
||||
@@ -294,7 +294,7 @@ TEST(CountMetricProducerTest, TestEventWithAppUpgradeInNextBucket) {
|
||||
EXPECT_EQ((int64_t)bucketStartTimeNs,
|
||||
countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketStartNs);
|
||||
EXPECT_EQ(bucketStartTimeNs + bucketSizeNs,
|
||||
(uint64_t)countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketEndNs);
|
||||
countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mBucketEndNs);
|
||||
EXPECT_EQ(eventUpgradeTimeNs, countProducer.mCurrentBucketStartTimeNs);
|
||||
|
||||
// Next event occurs in same bucket as partial bucket created.
|
||||
@@ -313,7 +313,7 @@ TEST(CountMetricProducerTest, TestEventWithAppUpgradeInNextBucket) {
|
||||
EXPECT_EQ((int64_t)eventUpgradeTimeNs,
|
||||
countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][1].mBucketStartNs);
|
||||
EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs,
|
||||
(uint64_t)countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][1].mBucketEndNs);
|
||||
countProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][1].mBucketEndNs);
|
||||
}
|
||||
|
||||
TEST(CountMetricProducerTest, TestAnomalyDetectionUnSliced) {
|
||||
|
||||
@@ -41,7 +41,7 @@ const ConfigKey kConfigKey(0, 12345);
|
||||
|
||||
TEST(DurationMetricTrackerTest, TestNoCondition) {
|
||||
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
|
||||
|
||||
DurationMetric metric;
|
||||
@@ -71,15 +71,15 @@ TEST(DurationMetricTrackerTest, TestNoCondition) {
|
||||
EXPECT_EQ(2UL, buckets.size());
|
||||
EXPECT_EQ(bucketStartTimeNs, buckets[0].mBucketStartNs);
|
||||
EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, buckets[0].mBucketEndNs);
|
||||
EXPECT_EQ(bucketSizeNs - 1ULL, buckets[0].mDuration);
|
||||
EXPECT_EQ(bucketSizeNs - 1LL, buckets[0].mDuration);
|
||||
EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, buckets[1].mBucketStartNs);
|
||||
EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs, buckets[1].mBucketEndNs);
|
||||
EXPECT_EQ(2ULL, buckets[1].mDuration);
|
||||
EXPECT_EQ(2LL, buckets[1].mDuration);
|
||||
}
|
||||
|
||||
TEST(DurationMetricTrackerTest, TestNonSlicedCondition) {
|
||||
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
|
||||
|
||||
DurationMetric metric;
|
||||
@@ -122,7 +122,7 @@ TEST(DurationMetricTrackerTest, TestNonSlicedCondition) {
|
||||
EXPECT_EQ(1UL, buckets2.size());
|
||||
EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, buckets2[0].mBucketStartNs);
|
||||
EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs, buckets2[0].mBucketEndNs);
|
||||
EXPECT_EQ(1ULL, buckets2[0].mDuration);
|
||||
EXPECT_EQ(1LL, buckets2[0].mDuration);
|
||||
}
|
||||
|
||||
TEST(DurationMetricTrackerTest, TestSumDurationWithUpgrade) {
|
||||
@@ -135,11 +135,11 @@ TEST(DurationMetricTrackerTest, TestSumDurationWithUpgrade) {
|
||||
* - [70,130]: All 60 secs
|
||||
* - [130, 210]: Only 5 secs (event ended at 135sec)
|
||||
*/
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
|
||||
uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
|
||||
uint64_t startTimeNs = bucketStartTimeNs + 1 * NS_PER_SEC;
|
||||
uint64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
|
||||
int64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
|
||||
int64_t startTimeNs = bucketStartTimeNs + 1 * NS_PER_SEC;
|
||||
int64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
|
||||
|
||||
int tagId = 1;
|
||||
|
||||
@@ -190,11 +190,11 @@ TEST(DurationMetricTrackerTest, TestSumDurationWithUpgradeInFollowingBucket) {
|
||||
* - [70,75]: 5 sec
|
||||
* - [75,130]: 55 secs
|
||||
*/
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
|
||||
uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
|
||||
uint64_t startTimeNs = bucketStartTimeNs + 1 * NS_PER_SEC;
|
||||
uint64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
|
||||
int64_t eventUpgradeTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
|
||||
int64_t startTimeNs = bucketStartTimeNs + 1 * NS_PER_SEC;
|
||||
int64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
|
||||
|
||||
int tagId = 1;
|
||||
|
||||
@@ -240,11 +240,11 @@ TEST(DurationMetricTrackerTest, TestSumDurationWithUpgradeInFollowingBucket) {
|
||||
|
||||
TEST(DurationMetricTrackerTest, TestSumDurationAnomalyWithUpgrade) {
|
||||
sp<AlarmMonitor> alarmMonitor;
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
|
||||
uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
|
||||
uint64_t startTimeNs = bucketStartTimeNs + 1;
|
||||
uint64_t endTimeNs = startTimeNs + 65 * NS_PER_SEC;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
|
||||
int64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
|
||||
int64_t startTimeNs = bucketStartTimeNs + 1;
|
||||
int64_t endTimeNs = startTimeNs + 65 * NS_PER_SEC;
|
||||
|
||||
int tagId = 1;
|
||||
|
||||
@@ -277,15 +277,15 @@ TEST(DurationMetricTrackerTest, TestSumDurationAnomalyWithUpgrade) {
|
||||
durationProducer.onMatchedLogEvent(2 /* stop index*/, end_event);
|
||||
|
||||
EXPECT_EQ(bucketStartTimeNs + bucketSizeNs - startTimeNs,
|
||||
(uint64_t)anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
|
||||
anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
|
||||
}
|
||||
|
||||
TEST(DurationMetricTrackerTest, TestMaxDurationWithUpgrade) {
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
|
||||
uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
|
||||
uint64_t startTimeNs = bucketStartTimeNs + 1;
|
||||
uint64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
|
||||
int64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
|
||||
int64_t startTimeNs = bucketStartTimeNs + 1;
|
||||
int64_t endTimeNs = startTimeNs + 125 * NS_PER_SEC;
|
||||
|
||||
int tagId = 1;
|
||||
|
||||
@@ -329,11 +329,11 @@ TEST(DurationMetricTrackerTest, TestMaxDurationWithUpgrade) {
|
||||
}
|
||||
|
||||
TEST(DurationMetricTrackerTest, TestMaxDurationWithUpgradeInNextBucket) {
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
|
||||
uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
|
||||
uint64_t startTimeNs = bucketStartTimeNs + 1;
|
||||
uint64_t endTimeNs = startTimeNs + 115 * NS_PER_SEC;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
|
||||
int64_t eventUpgradeTimeNs = bucketStartTimeNs + 65 * NS_PER_SEC;
|
||||
int64_t startTimeNs = bucketStartTimeNs + 1;
|
||||
int64_t endTimeNs = startTimeNs + 115 * NS_PER_SEC;
|
||||
|
||||
int tagId = 1;
|
||||
|
||||
|
||||
@@ -36,9 +36,9 @@ namespace statsd {
|
||||
const ConfigKey kConfigKey(0, 12345);
|
||||
|
||||
TEST(EventMetricProducerTest, TestNoCondition) {
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
|
||||
uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t eventStartTimeNs = bucketStartTimeNs + 1;
|
||||
int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
|
||||
EventMetric metric;
|
||||
metric.set_id(1);
|
||||
@@ -59,9 +59,9 @@ TEST(EventMetricProducerTest, TestNoCondition) {
|
||||
}
|
||||
|
||||
TEST(EventMetricProducerTest, TestEventsWithNonSlicedCondition) {
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
|
||||
uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t eventStartTimeNs = bucketStartTimeNs + 1;
|
||||
int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
|
||||
EventMetric metric;
|
||||
metric.set_id(1);
|
||||
@@ -86,8 +86,8 @@ TEST(EventMetricProducerTest, TestEventsWithNonSlicedCondition) {
|
||||
}
|
||||
|
||||
TEST(EventMetricProducerTest, TestEventsWithSlicedCondition) {
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
|
||||
int tagId = 1;
|
||||
int conditionTagId = 2;
|
||||
|
||||
@@ -45,7 +45,7 @@ const int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000L
|
||||
const int64_t bucket2StartTimeNs = bucketStartTimeNs + bucketSizeNs;
|
||||
const int64_t bucket3StartTimeNs = bucketStartTimeNs + 2 * bucketSizeNs;
|
||||
const int64_t bucket4StartTimeNs = bucketStartTimeNs + 3 * bucketSizeNs;
|
||||
const uint64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
|
||||
const int64_t eventUpgradeTimeNs = bucketStartTimeNs + 15 * NS_PER_SEC;
|
||||
|
||||
TEST(GaugeMetricProducerTest, TestNoCondition) {
|
||||
GaugeMetric metric;
|
||||
@@ -160,7 +160,7 @@ TEST(GaugeMetricProducerTest, TestPushedEventsWithUpgrade) {
|
||||
gaugeProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
|
||||
EXPECT_EQ(0UL, (*gaugeProducer.mCurrentSlicedBucket).count(DEFAULT_METRIC_DIMENSION_KEY));
|
||||
EXPECT_EQ(1UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
|
||||
EXPECT_EQ(0UL, gaugeProducer.mCurrentBucketNum);
|
||||
EXPECT_EQ(0L, gaugeProducer.mCurrentBucketNum);
|
||||
EXPECT_EQ(eventUpgradeTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
|
||||
// Partial buckets are not sent to anomaly tracker.
|
||||
EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
|
||||
@@ -171,9 +171,9 @@ TEST(GaugeMetricProducerTest, TestPushedEventsWithUpgrade) {
|
||||
event2->write(10);
|
||||
event2->init();
|
||||
gaugeProducer.onMatchedLogEvent(1 /*log matcher index*/, *event2);
|
||||
EXPECT_EQ(0UL, gaugeProducer.mCurrentBucketNum);
|
||||
EXPECT_EQ(0L, gaugeProducer.mCurrentBucketNum);
|
||||
EXPECT_EQ(1UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
|
||||
EXPECT_EQ((uint64_t)eventUpgradeTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
|
||||
EXPECT_EQ((int64_t)eventUpgradeTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
|
||||
// Partial buckets are not sent to anomaly tracker.
|
||||
EXPECT_EQ(0, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
|
||||
|
||||
@@ -184,9 +184,9 @@ TEST(GaugeMetricProducerTest, TestPushedEventsWithUpgrade) {
|
||||
event3->write(10);
|
||||
event3->init();
|
||||
gaugeProducer.onMatchedLogEvent(1 /*log matcher index*/, *event3);
|
||||
EXPECT_EQ(1UL, gaugeProducer.mCurrentBucketNum);
|
||||
EXPECT_EQ(1L, gaugeProducer.mCurrentBucketNum);
|
||||
EXPECT_EQ(2UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
|
||||
EXPECT_EQ((uint64_t)bucketStartTimeNs + bucketSizeNs, gaugeProducer.mCurrentBucketStartTimeNs);
|
||||
EXPECT_EQ((int64_t)bucketStartTimeNs + bucketSizeNs, gaugeProducer.mCurrentBucketStartTimeNs);
|
||||
EXPECT_EQ(1, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
|
||||
|
||||
// Next event should trigger creation of new bucket.
|
||||
@@ -196,7 +196,7 @@ TEST(GaugeMetricProducerTest, TestPushedEventsWithUpgrade) {
|
||||
event4->write(10);
|
||||
event4->init();
|
||||
gaugeProducer.onMatchedLogEvent(1 /*log matcher index*/, *event4);
|
||||
EXPECT_EQ(2UL, gaugeProducer.mCurrentBucketNum);
|
||||
EXPECT_EQ(2L, gaugeProducer.mCurrentBucketNum);
|
||||
EXPECT_EQ(3UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
|
||||
EXPECT_EQ(2, anomalyTracker->getSumOverPastBuckets(DEFAULT_METRIC_DIMENSION_KEY));
|
||||
}
|
||||
@@ -246,8 +246,8 @@ TEST(GaugeMetricProducerTest, TestPulledWithUpgrade) {
|
||||
|
||||
gaugeProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
|
||||
EXPECT_EQ(1UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
|
||||
EXPECT_EQ(0UL, gaugeProducer.mCurrentBucketNum);
|
||||
EXPECT_EQ((uint64_t)eventUpgradeTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
|
||||
EXPECT_EQ(0L, gaugeProducer.mCurrentBucketNum);
|
||||
EXPECT_EQ((int64_t)eventUpgradeTimeNs, gaugeProducer.mCurrentBucketStartTimeNs);
|
||||
EXPECT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
|
||||
EXPECT_EQ(2, gaugeProducer.mCurrentSlicedBucket->begin()
|
||||
->second.front()
|
||||
|
||||
@@ -45,7 +45,7 @@ const HashableDimensionKey eventKey = getMockedDimensionKey(TagId, 0, "1");
|
||||
const HashableDimensionKey conditionKey = getMockedDimensionKey(TagId, 4, "1");
|
||||
const HashableDimensionKey key1 = getMockedDimensionKey(TagId, 1, "1");
|
||||
const HashableDimensionKey key2 = getMockedDimensionKey(TagId, 1, "2");
|
||||
const uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
const int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
|
||||
TEST(MaxDurationTrackerTest, TestSimpleMaxDuration) {
|
||||
const MetricDimensionKey eventKey = getMockedMetricDimensionKey(TagId, 0, "1");
|
||||
@@ -58,9 +58,9 @@ TEST(MaxDurationTrackerTest, TestSimpleMaxDuration) {
|
||||
|
||||
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
|
||||
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
|
||||
uint64_t bucketNum = 0;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
|
||||
int64_t bucketNum = 0;
|
||||
|
||||
int64_t metricId = 1;
|
||||
MaxDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, -1, dimensionInCondition,
|
||||
@@ -80,7 +80,7 @@ TEST(MaxDurationTrackerTest, TestSimpleMaxDuration) {
|
||||
tracker.flushIfNeeded(bucketStartTimeNs + bucketSizeNs + 1, &buckets);
|
||||
EXPECT_TRUE(buckets.find(eventKey) != buckets.end());
|
||||
EXPECT_EQ(1u, buckets[eventKey].size());
|
||||
EXPECT_EQ(20ULL, buckets[eventKey][0].mDuration);
|
||||
EXPECT_EQ(20LL, buckets[eventKey][0].mDuration);
|
||||
}
|
||||
|
||||
TEST(MaxDurationTrackerTest, TestStopAll) {
|
||||
@@ -93,10 +93,10 @@ TEST(MaxDurationTrackerTest, TestStopAll) {
|
||||
|
||||
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
|
||||
|
||||
uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
|
||||
uint64_t bucketNum = 0;
|
||||
int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
|
||||
int64_t bucketNum = 0;
|
||||
|
||||
int64_t metricId = 1;
|
||||
MaxDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, -1, dimensionInCondition,
|
||||
@@ -129,10 +129,10 @@ TEST(MaxDurationTrackerTest, TestCrossBucketBoundary) {
|
||||
|
||||
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
|
||||
|
||||
uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
|
||||
uint64_t bucketNum = 0;
|
||||
int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
|
||||
int64_t bucketNum = 0;
|
||||
|
||||
int64_t metricId = 1;
|
||||
MaxDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, -1, dimensionInCondition,
|
||||
@@ -170,10 +170,10 @@ TEST(MaxDurationTrackerTest, TestCrossBucketBoundary_nested) {
|
||||
|
||||
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
|
||||
|
||||
uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
|
||||
uint64_t bucketNum = 0;
|
||||
int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
|
||||
int64_t bucketNum = 0;
|
||||
|
||||
int64_t metricId = 1;
|
||||
MaxDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, -1, dimensionInCondition,
|
||||
@@ -213,14 +213,14 @@ TEST(MaxDurationTrackerTest, TestMaxDurationWithCondition) {
|
||||
Start in first bucket, stop in second bucket. Condition turns on and off in the first bucket
|
||||
and again turns on and off in the second bucket.
|
||||
*/
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
|
||||
uint64_t eventStartTimeNs = bucketStartTimeNs + 1 * NS_PER_SEC;
|
||||
uint64_t conditionStarts1 = bucketStartTimeNs + 11 * NS_PER_SEC;
|
||||
uint64_t conditionStops1 = bucketStartTimeNs + 14 * NS_PER_SEC;
|
||||
uint64_t conditionStarts2 = bucketStartTimeNs + bucketSizeNs + 5 * NS_PER_SEC;
|
||||
uint64_t conditionStops2 = conditionStarts2 + 10 * NS_PER_SEC;
|
||||
uint64_t eventStopTimeNs = conditionStops2 + 8 * NS_PER_SEC;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
|
||||
int64_t eventStartTimeNs = bucketStartTimeNs + 1 * NS_PER_SEC;
|
||||
int64_t conditionStarts1 = bucketStartTimeNs + 11 * NS_PER_SEC;
|
||||
int64_t conditionStops1 = bucketStartTimeNs + 14 * NS_PER_SEC;
|
||||
int64_t conditionStarts2 = bucketStartTimeNs + bucketSizeNs + 5 * NS_PER_SEC;
|
||||
int64_t conditionStops2 = conditionStarts2 + 10 * NS_PER_SEC;
|
||||
int64_t eventStopTimeNs = conditionStops2 + 8 * NS_PER_SEC;
|
||||
|
||||
int64_t metricId = 1;
|
||||
MaxDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
|
||||
@@ -242,7 +242,7 @@ TEST(MaxDurationTrackerTest, TestMaxDurationWithCondition) {
|
||||
EXPECT_EQ(1U, buckets.size());
|
||||
vector<DurationBucket> item = buckets.begin()->second;
|
||||
EXPECT_EQ(1UL, item.size());
|
||||
EXPECT_EQ(13ULL * NS_PER_SEC, item[0].mDuration);
|
||||
EXPECT_EQ((int64_t)(13LL * NS_PER_SEC), item[0].mDuration);
|
||||
}
|
||||
|
||||
TEST(MaxDurationTrackerTest, TestAnomalyDetection) {
|
||||
@@ -255,11 +255,11 @@ TEST(MaxDurationTrackerTest, TestAnomalyDetection) {
|
||||
|
||||
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
|
||||
|
||||
uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
|
||||
uint64_t bucketNum = 0;
|
||||
uint64_t eventStartTimeNs = 13000000000;
|
||||
int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
|
||||
int64_t bucketNum = 0;
|
||||
int64_t eventStartTimeNs = 13000000000;
|
||||
int64_t durationTimeNs = 2 * 1000;
|
||||
|
||||
int64_t metricId = 1;
|
||||
@@ -312,15 +312,15 @@ TEST(MaxDurationTrackerTest, TestAnomalyPredictedTimestamp) {
|
||||
* dimension has already been running for 4 seconds. Thus, we have 40-4=36 seconds remaining
|
||||
* before we trigger the anomaly.
|
||||
*/
|
||||
uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
|
||||
uint64_t bucketNum = 0;
|
||||
uint64_t eventStartTimeNs = bucketStartTimeNs + 5 * NS_PER_SEC; // Condition is off at start.
|
||||
uint64_t conditionStarts1 = bucketStartTimeNs + 11 * NS_PER_SEC;
|
||||
uint64_t conditionStops1 = bucketStartTimeNs + 14 * NS_PER_SEC;
|
||||
uint64_t conditionStarts2 = bucketStartTimeNs + 20 * NS_PER_SEC;
|
||||
uint64_t eventStartTimeNs2 = conditionStarts2 - 4 * NS_PER_SEC;
|
||||
int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
|
||||
int64_t bucketNum = 0;
|
||||
int64_t eventStartTimeNs = bucketStartTimeNs + 5 * NS_PER_SEC; // Condition is off at start.
|
||||
int64_t conditionStarts1 = bucketStartTimeNs + 11 * NS_PER_SEC;
|
||||
int64_t conditionStops1 = bucketStartTimeNs + 14 * NS_PER_SEC;
|
||||
int64_t conditionStarts2 = bucketStartTimeNs + 20 * NS_PER_SEC;
|
||||
int64_t eventStartTimeNs2 = conditionStarts2 - 4 * NS_PER_SEC;
|
||||
|
||||
int64_t metricId = 1;
|
||||
Alert alert;
|
||||
@@ -344,9 +344,9 @@ TEST(MaxDurationTrackerTest, TestAnomalyPredictedTimestamp) {
|
||||
tracker.noteConditionChanged(key1, true, conditionStarts2);
|
||||
EXPECT_EQ(1U, anomalyTracker->mAlarms.size());
|
||||
auto alarm = anomalyTracker->mAlarms.begin()->second;
|
||||
uint64_t anomalyFireTimeSec = alarm->timestampSec;
|
||||
int64_t anomalyFireTimeSec = alarm->timestampSec;
|
||||
EXPECT_EQ(conditionStarts2 + 36 * NS_PER_SEC,
|
||||
(unsigned long long)anomalyFireTimeSec * NS_PER_SEC);
|
||||
(long long)anomalyFireTimeSec * NS_PER_SEC);
|
||||
|
||||
// Now we test the calculation now that there's a refractory period.
|
||||
// At the correct time, declare the anomaly. This will set a refractory period. Make sure it
|
||||
@@ -354,23 +354,23 @@ TEST(MaxDurationTrackerTest, TestAnomalyPredictedTimestamp) {
|
||||
std::unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> firedAlarms({alarm});
|
||||
anomalyTracker->informAlarmsFired(anomalyFireTimeSec * NS_PER_SEC, firedAlarms);
|
||||
EXPECT_EQ(0u, anomalyTracker->mAlarms.size());
|
||||
uint64_t refractoryPeriodEndsSec = anomalyFireTimeSec + refPeriodSec;
|
||||
int64_t refractoryPeriodEndsSec = anomalyFireTimeSec + refPeriodSec;
|
||||
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(eventKey), refractoryPeriodEndsSec);
|
||||
|
||||
// Now stop and start again. Make sure the new predictAnomalyTimestampNs takes into account
|
||||
// the refractory period correctly.
|
||||
uint64_t eventStopTimeNs = anomalyFireTimeSec * NS_PER_SEC + 10;
|
||||
int64_t eventStopTimeNs = anomalyFireTimeSec * NS_PER_SEC + 10;
|
||||
tracker.noteStop(key1, eventStopTimeNs, false);
|
||||
tracker.noteStop(key2, eventStopTimeNs, false);
|
||||
tracker.noteStart(key1, true, eventStopTimeNs + 1000000, conditionKey1);
|
||||
// Anomaly is ongoing, but we're still in the refractory period.
|
||||
EXPECT_EQ(1U, anomalyTracker->mAlarms.size());
|
||||
alarm = anomalyTracker->mAlarms.begin()->second;
|
||||
EXPECT_EQ(refractoryPeriodEndsSec, (unsigned long long)(alarm->timestampSec));
|
||||
EXPECT_EQ(refractoryPeriodEndsSec, (long long)(alarm->timestampSec));
|
||||
|
||||
// Makes sure it is correct after the refractory period is over.
|
||||
tracker.noteStop(key1, eventStopTimeNs + 2000000, false);
|
||||
uint64_t justBeforeRefPeriodNs = (refractoryPeriodEndsSec - 2) * NS_PER_SEC;
|
||||
int64_t justBeforeRefPeriodNs = (refractoryPeriodEndsSec - 2) * NS_PER_SEC;
|
||||
tracker.noteStart(key1, true, justBeforeRefPeriodNs, conditionKey1);
|
||||
alarm = anomalyTracker->mAlarms.begin()->second;
|
||||
EXPECT_EQ(justBeforeRefPeriodNs + 40 * NS_PER_SEC,
|
||||
@@ -397,13 +397,13 @@ TEST(MaxDurationTrackerTest, TestAnomalyPredictedTimestamp_UpdatedOnStop) {
|
||||
* nested dimensions, are started for 8 seconds. When we stop, the other nested dimension has
|
||||
* been started for 5 seconds. So we can only allow 35 more seconds from now.
|
||||
*/
|
||||
uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
|
||||
uint64_t bucketNum = 0;
|
||||
uint64_t eventStartTimeNs1 = bucketStartTimeNs + 5 * NS_PER_SEC; // Condition is off at start.
|
||||
uint64_t eventStopTimeNs1 = bucketStartTimeNs + 13 * NS_PER_SEC;
|
||||
uint64_t eventStartTimeNs2 = bucketStartTimeNs + 8 * NS_PER_SEC;
|
||||
int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketEndTimeNs = bucketStartTimeNs + bucketSizeNs;
|
||||
int64_t bucketNum = 0;
|
||||
int64_t eventStartTimeNs1 = bucketStartTimeNs + 5 * NS_PER_SEC; // Condition is off at start.
|
||||
int64_t eventStopTimeNs1 = bucketStartTimeNs + 13 * NS_PER_SEC;
|
||||
int64_t eventStartTimeNs2 = bucketStartTimeNs + 8 * NS_PER_SEC;
|
||||
|
||||
int64_t metricId = 1;
|
||||
Alert alert;
|
||||
|
||||
@@ -44,7 +44,7 @@ const HashableDimensionKey eventKey = getMockedDimensionKey(TagId, 0, "event");
|
||||
const HashableDimensionKey kConditionKey1 = getMockedDimensionKey(TagId, 1, "maps");
|
||||
const HashableDimensionKey kEventKey1 = getMockedDimensionKey(TagId, 2, "maps");
|
||||
const HashableDimensionKey kEventKey2 = getMockedDimensionKey(TagId, 3, "maps");
|
||||
const uint64_t bucketSizeNs = 30 * NS_PER_SEC;
|
||||
const int64_t bucketSizeNs = 30 * NS_PER_SEC;
|
||||
|
||||
TEST(OringDurationTrackerTest, TestDurationOverlap) {
|
||||
const MetricDimensionKey eventKey = getMockedMetricDimensionKey(TagId, 0, "event");
|
||||
@@ -56,11 +56,11 @@ TEST(OringDurationTrackerTest, TestDurationOverlap) {
|
||||
|
||||
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
|
||||
|
||||
uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketNum = 0;
|
||||
uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
|
||||
uint64_t durationTimeNs = 2 * 1000;
|
||||
int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketNum = 0;
|
||||
int64_t eventStartTimeNs = bucketStartTimeNs + 1;
|
||||
int64_t durationTimeNs = 2 * 1000;
|
||||
|
||||
OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
|
||||
false, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
|
||||
@@ -89,10 +89,10 @@ TEST(OringDurationTrackerTest, TestDurationNested) {
|
||||
|
||||
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
|
||||
|
||||
uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketNum = 0;
|
||||
uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
|
||||
int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketNum = 0;
|
||||
int64_t eventStartTimeNs = bucketStartTimeNs + 1;
|
||||
|
||||
OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
|
||||
true, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
|
||||
@@ -107,7 +107,7 @@ TEST(OringDurationTrackerTest, TestDurationNested) {
|
||||
tracker.flushIfNeeded(bucketStartTimeNs + bucketSizeNs + 1, &buckets);
|
||||
EXPECT_TRUE(buckets.find(eventKey) != buckets.end());
|
||||
EXPECT_EQ(1u, buckets[eventKey].size());
|
||||
EXPECT_EQ(2003ULL, buckets[eventKey][0].mDuration);
|
||||
EXPECT_EQ(2003LL, buckets[eventKey][0].mDuration);
|
||||
}
|
||||
|
||||
TEST(OringDurationTrackerTest, TestStopAll) {
|
||||
@@ -122,10 +122,10 @@ TEST(OringDurationTrackerTest, TestStopAll) {
|
||||
|
||||
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
|
||||
|
||||
uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketNum = 0;
|
||||
uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
|
||||
int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketNum = 0;
|
||||
int64_t eventStartTimeNs = bucketStartTimeNs + 1;
|
||||
|
||||
OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
|
||||
true, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
|
||||
@@ -139,7 +139,7 @@ TEST(OringDurationTrackerTest, TestStopAll) {
|
||||
tracker.flushIfNeeded(bucketStartTimeNs + bucketSizeNs + 1, &buckets);
|
||||
EXPECT_TRUE(buckets.find(eventKey) != buckets.end());
|
||||
EXPECT_EQ(1u, buckets[eventKey].size());
|
||||
EXPECT_EQ(2003ULL, buckets[eventKey][0].mDuration);
|
||||
EXPECT_EQ(2003LL, buckets[eventKey][0].mDuration);
|
||||
}
|
||||
|
||||
TEST(OringDurationTrackerTest, TestCrossBucketBoundary) {
|
||||
@@ -152,11 +152,11 @@ TEST(OringDurationTrackerTest, TestCrossBucketBoundary) {
|
||||
|
||||
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
|
||||
|
||||
uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketNum = 0;
|
||||
uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
|
||||
uint64_t durationTimeNs = 2 * 1000;
|
||||
int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketNum = 0;
|
||||
int64_t eventStartTimeNs = bucketStartTimeNs + 1;
|
||||
int64_t durationTimeNs = 2 * 1000;
|
||||
|
||||
OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
|
||||
true, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
|
||||
@@ -197,11 +197,11 @@ TEST(OringDurationTrackerTest, TestDurationConditionChange) {
|
||||
|
||||
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
|
||||
|
||||
uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketNum = 0;
|
||||
uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
|
||||
uint64_t durationTimeNs = 2 * 1000;
|
||||
int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketNum = 0;
|
||||
int64_t eventStartTimeNs = bucketStartTimeNs + 1;
|
||||
int64_t durationTimeNs = 2 * 1000;
|
||||
|
||||
OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
|
||||
false, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
|
||||
@@ -216,7 +216,7 @@ TEST(OringDurationTrackerTest, TestDurationConditionChange) {
|
||||
tracker.flushIfNeeded(bucketStartTimeNs + bucketSizeNs + 1, &buckets);
|
||||
EXPECT_TRUE(buckets.find(eventKey) != buckets.end());
|
||||
EXPECT_EQ(1u, buckets[eventKey].size());
|
||||
EXPECT_EQ(5ULL, buckets[eventKey][0].mDuration);
|
||||
EXPECT_EQ(5LL, buckets[eventKey][0].mDuration);
|
||||
}
|
||||
|
||||
TEST(OringDurationTrackerTest, TestDurationConditionChange2) {
|
||||
@@ -237,11 +237,11 @@ TEST(OringDurationTrackerTest, TestDurationConditionChange2) {
|
||||
|
||||
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
|
||||
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
uint64_t bucketNum = 0;
|
||||
uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
|
||||
uint64_t durationTimeNs = 2 * 1000;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
int64_t bucketNum = 0;
|
||||
int64_t eventStartTimeNs = bucketStartTimeNs + 1;
|
||||
int64_t durationTimeNs = 2 * 1000;
|
||||
|
||||
OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
|
||||
false, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
|
||||
@@ -258,7 +258,7 @@ TEST(OringDurationTrackerTest, TestDurationConditionChange2) {
|
||||
tracker.flushIfNeeded(bucketStartTimeNs + bucketSizeNs + 1, &buckets);
|
||||
EXPECT_TRUE(buckets.find(eventKey) != buckets.end());
|
||||
EXPECT_EQ(1u, buckets[eventKey].size());
|
||||
EXPECT_EQ(1005ULL, buckets[eventKey][0].mDuration);
|
||||
EXPECT_EQ(1005LL, buckets[eventKey][0].mDuration);
|
||||
}
|
||||
|
||||
TEST(OringDurationTrackerTest, TestDurationConditionChangeNested) {
|
||||
@@ -277,10 +277,10 @@ TEST(OringDurationTrackerTest, TestDurationConditionChangeNested) {
|
||||
|
||||
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
|
||||
|
||||
uint64_t bucketStartTimeNs = 10000000000;
|
||||
uint64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
uint64_t bucketNum = 0;
|
||||
uint64_t eventStartTimeNs = bucketStartTimeNs + 1;
|
||||
int64_t bucketStartTimeNs = 10000000000;
|
||||
int64_t bucketSizeNs = 30 * 1000 * 1000 * 1000LL;
|
||||
int64_t bucketNum = 0;
|
||||
int64_t eventStartTimeNs = bucketStartTimeNs + 1;
|
||||
|
||||
OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, dimensionInCondition,
|
||||
true, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
|
||||
@@ -298,7 +298,7 @@ TEST(OringDurationTrackerTest, TestDurationConditionChangeNested) {
|
||||
tracker.flushIfNeeded(bucketStartTimeNs + bucketSizeNs + 1, &buckets);
|
||||
EXPECT_TRUE(buckets.find(eventKey) != buckets.end());
|
||||
EXPECT_EQ(1u, buckets[eventKey].size());
|
||||
EXPECT_EQ(15ULL, buckets[eventKey][0].mDuration);
|
||||
EXPECT_EQ(15LL, buckets[eventKey][0].mDuration);
|
||||
}
|
||||
|
||||
TEST(OringDurationTrackerTest, TestPredictAnomalyTimestamp) {
|
||||
@@ -317,9 +317,9 @@ TEST(OringDurationTrackerTest, TestPredictAnomalyTimestamp) {
|
||||
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
|
||||
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
|
||||
|
||||
uint64_t bucketStartTimeNs = 10 * NS_PER_SEC;
|
||||
uint64_t bucketNum = 0;
|
||||
uint64_t eventStartTimeNs = bucketStartTimeNs + NS_PER_SEC + 1;
|
||||
int64_t bucketStartTimeNs = 10 * NS_PER_SEC;
|
||||
int64_t bucketNum = 0;
|
||||
int64_t eventStartTimeNs = bucketStartTimeNs + NS_PER_SEC + 1;
|
||||
|
||||
sp<AlarmMonitor> alarmMonitor;
|
||||
sp<DurationAnomalyTracker> anomalyTracker =
|
||||
@@ -336,26 +336,26 @@ TEST(OringDurationTrackerTest, TestPredictAnomalyTimestamp) {
|
||||
tracker.noteStop(DEFAULT_DIMENSION_KEY, eventStartTimeNs + 3, false);
|
||||
EXPECT_EQ(0u, buckets[eventKey].size());
|
||||
|
||||
uint64_t event1StartTimeNs = eventStartTimeNs + 10;
|
||||
int64_t event1StartTimeNs = eventStartTimeNs + 10;
|
||||
tracker.noteStart(kEventKey1, true, event1StartTimeNs, ConditionKey());
|
||||
// No past buckets. The anomaly will happen in bucket #0.
|
||||
EXPECT_EQ((long long)(event1StartTimeNs + alert.trigger_if_sum_gt() - 3),
|
||||
tracker.predictAnomalyTimestampNs(*anomalyTracker, event1StartTimeNs));
|
||||
|
||||
uint64_t event1StopTimeNs = eventStartTimeNs + bucketSizeNs + 10;
|
||||
int64_t event1StopTimeNs = eventStartTimeNs + bucketSizeNs + 10;
|
||||
tracker.flushIfNeeded(event1StopTimeNs, &buckets);
|
||||
tracker.noteStop(kEventKey1, event1StopTimeNs, false);
|
||||
|
||||
EXPECT_TRUE(buckets.find(eventKey) != buckets.end());
|
||||
EXPECT_EQ(1u, buckets[eventKey].size());
|
||||
EXPECT_EQ(3ULL + bucketStartTimeNs + bucketSizeNs - eventStartTimeNs - 10,
|
||||
EXPECT_EQ(3LL + bucketStartTimeNs + bucketSizeNs - eventStartTimeNs - 10,
|
||||
buckets[eventKey][0].mDuration);
|
||||
|
||||
const int64_t bucket0Duration = 3ULL + bucketStartTimeNs + bucketSizeNs - eventStartTimeNs - 10;
|
||||
const int64_t bucket1Duration = eventStartTimeNs + 10 - bucketStartTimeNs;
|
||||
|
||||
// One past buckets. The anomaly will happen in bucket #1.
|
||||
uint64_t event2StartTimeNs = eventStartTimeNs + bucketSizeNs + 15;
|
||||
int64_t event2StartTimeNs = eventStartTimeNs + bucketSizeNs + 15;
|
||||
tracker.noteStart(kEventKey1, true, event2StartTimeNs, ConditionKey());
|
||||
EXPECT_EQ((long long)(event2StartTimeNs + alert.trigger_if_sum_gt() - bucket0Duration -
|
||||
bucket1Duration),
|
||||
@@ -364,7 +364,7 @@ TEST(OringDurationTrackerTest, TestPredictAnomalyTimestamp) {
|
||||
|
||||
// Only one past buckets is applicable. Bucket +0 should be trashed. The anomaly will happen in
|
||||
// bucket #2.
|
||||
uint64_t event3StartTimeNs = bucketStartTimeNs + 2 * bucketSizeNs - 9 * NS_PER_SEC;
|
||||
int64_t event3StartTimeNs = bucketStartTimeNs + 2 * bucketSizeNs - 9 * NS_PER_SEC;
|
||||
tracker.noteStart(kEventKey1, true, event3StartTimeNs, ConditionKey());
|
||||
EXPECT_EQ((long long)(event3StartTimeNs + alert.trigger_if_sum_gt() - bucket1Duration - 1LL),
|
||||
tracker.predictAnomalyTimestampNs(*anomalyTracker, event3StartTimeNs));
|
||||
@@ -379,8 +379,8 @@ TEST(OringDurationTrackerTest, TestPredictAnomalyTimestamp2) {
|
||||
alert.set_num_buckets(1);
|
||||
alert.set_refractory_period_secs(20);
|
||||
|
||||
uint64_t bucketStartTimeNs = 10 * NS_PER_SEC;
|
||||
uint64_t bucketNum = 0;
|
||||
int64_t bucketStartTimeNs = 10 * NS_PER_SEC;
|
||||
int64_t bucketNum = 0;
|
||||
|
||||
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
|
||||
sp<AlarmMonitor> alarmMonitor;
|
||||
@@ -391,7 +391,7 @@ TEST(OringDurationTrackerTest, TestPredictAnomalyTimestamp2) {
|
||||
true, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
|
||||
bucketSizeNs, true, false, {anomalyTracker});
|
||||
|
||||
uint64_t eventStartTimeNs = bucketStartTimeNs + 9 * NS_PER_SEC;
|
||||
int64_t eventStartTimeNs = bucketStartTimeNs + 9 * NS_PER_SEC;
|
||||
tracker.noteStart(DEFAULT_DIMENSION_KEY, true, eventStartTimeNs, ConditionKey());
|
||||
// Anomaly happens in the bucket #1.
|
||||
EXPECT_EQ((long long)(bucketStartTimeNs + 14 * NS_PER_SEC),
|
||||
@@ -402,7 +402,7 @@ TEST(OringDurationTrackerTest, TestPredictAnomalyTimestamp2) {
|
||||
EXPECT_EQ((long long)(bucketStartTimeNs + 34 * NS_PER_SEC) / NS_PER_SEC,
|
||||
anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_METRIC_DIMENSION_KEY));
|
||||
|
||||
uint64_t event2StartTimeNs = bucketStartTimeNs + 22 * NS_PER_SEC;
|
||||
int64_t event2StartTimeNs = bucketStartTimeNs + 22 * NS_PER_SEC;
|
||||
EXPECT_EQ((long long)(bucketStartTimeNs + 34 * NS_PER_SEC) / NS_PER_SEC,
|
||||
anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_METRIC_DIMENSION_KEY));
|
||||
EXPECT_EQ((long long)(bucketStartTimeNs + 35 * NS_PER_SEC),
|
||||
@@ -413,7 +413,7 @@ TEST(OringDurationTrackerTest, TestPredictAnomalyTimestamp3) {
|
||||
// Test the cases where the refractory period is smaller than the bucket size, longer than
|
||||
// the bucket size, and longer than 2x of the anomaly detection window.
|
||||
for (int j = 0; j < 3; j++) {
|
||||
uint64_t thresholdNs = j * bucketSizeNs + 5 * NS_PER_SEC;
|
||||
int64_t thresholdNs = j * bucketSizeNs + 5 * NS_PER_SEC;
|
||||
for (int i = 0; i <= 7; ++i) {
|
||||
vector<Matcher> dimensionInCondition;
|
||||
Alert alert;
|
||||
@@ -424,8 +424,8 @@ TEST(OringDurationTrackerTest, TestPredictAnomalyTimestamp3) {
|
||||
alert.set_refractory_period_secs(
|
||||
bucketSizeNs / NS_PER_SEC / 2 + i * bucketSizeNs / NS_PER_SEC);
|
||||
|
||||
uint64_t bucketStartTimeNs = 10 * NS_PER_SEC;
|
||||
uint64_t bucketNum = 101;
|
||||
int64_t bucketStartTimeNs = 10 * NS_PER_SEC;
|
||||
int64_t bucketNum = 101;
|
||||
|
||||
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
|
||||
sp<AlarmMonitor> alarmMonitor;
|
||||
@@ -436,32 +436,32 @@ TEST(OringDurationTrackerTest, TestPredictAnomalyTimestamp3) {
|
||||
true, bucketStartTimeNs, bucketNum, bucketStartTimeNs,
|
||||
bucketSizeNs, true, false, {anomalyTracker});
|
||||
|
||||
uint64_t eventStartTimeNs = bucketStartTimeNs + 9 * NS_PER_SEC;
|
||||
int64_t eventStartTimeNs = bucketStartTimeNs + 9 * NS_PER_SEC;
|
||||
tracker.noteStart(DEFAULT_DIMENSION_KEY, true, eventStartTimeNs, ConditionKey());
|
||||
EXPECT_EQ((long long)(eventStartTimeNs + thresholdNs),
|
||||
tracker.predictAnomalyTimestampNs(*anomalyTracker, eventStartTimeNs));
|
||||
uint64_t eventStopTimeNs = eventStartTimeNs + thresholdNs + NS_PER_SEC;
|
||||
int64_t eventStopTimeNs = eventStartTimeNs + thresholdNs + NS_PER_SEC;
|
||||
tracker.noteStop(DEFAULT_DIMENSION_KEY, eventStopTimeNs, false);
|
||||
|
||||
uint64_t refractoryPeriodEndSec =
|
||||
int64_t refractoryPeriodEndSec =
|
||||
anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_METRIC_DIMENSION_KEY);
|
||||
EXPECT_EQ((long long)(eventStopTimeNs) / NS_PER_SEC + alert.refractory_period_secs(),
|
||||
EXPECT_EQ(eventStopTimeNs / (int64_t)NS_PER_SEC + alert.refractory_period_secs(),
|
||||
refractoryPeriodEndSec);
|
||||
|
||||
// Acquire and release a wakelock in the next bucket.
|
||||
uint64_t event2StartTimeNs = eventStopTimeNs + bucketSizeNs;
|
||||
int64_t event2StartTimeNs = eventStopTimeNs + bucketSizeNs;
|
||||
tracker.noteStart(DEFAULT_DIMENSION_KEY, true, event2StartTimeNs, ConditionKey());
|
||||
uint64_t event2StopTimeNs = event2StartTimeNs + 4 * NS_PER_SEC;
|
||||
int64_t event2StopTimeNs = event2StartTimeNs + 4 * NS_PER_SEC;
|
||||
tracker.noteStop(DEFAULT_DIMENSION_KEY, event2StopTimeNs, false);
|
||||
|
||||
// Test the alarm prediction works well when seeing another wakelock start event.
|
||||
for (int k = 0; k <= 2; ++k) {
|
||||
uint64_t event3StartTimeNs = event2StopTimeNs + NS_PER_SEC + k * bucketSizeNs;
|
||||
uint64_t alarmTimestampNs =
|
||||
int64_t event3StartTimeNs = event2StopTimeNs + NS_PER_SEC + k * bucketSizeNs;
|
||||
int64_t alarmTimestampNs =
|
||||
tracker.predictAnomalyTimestampNs(*anomalyTracker, event3StartTimeNs);
|
||||
EXPECT_GT(alarmTimestampNs, 0u);
|
||||
EXPECT_GE(alarmTimestampNs, event3StartTimeNs);
|
||||
EXPECT_GE(alarmTimestampNs, refractoryPeriodEndSec * NS_PER_SEC);
|
||||
EXPECT_GE(alarmTimestampNs, refractoryPeriodEndSec *(int64_t) NS_PER_SEC);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -484,9 +484,9 @@ TEST(OringDurationTrackerTest, TestAnomalyDetectionExpiredAlarm) {
|
||||
unordered_map<MetricDimensionKey, vector<DurationBucket>> buckets;
|
||||
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
|
||||
|
||||
uint64_t bucketStartTimeNs = 10 * NS_PER_SEC;
|
||||
uint64_t bucketNum = 0;
|
||||
uint64_t eventStartTimeNs = bucketStartTimeNs + NS_PER_SEC + 1;
|
||||
int64_t bucketStartTimeNs = 10 * NS_PER_SEC;
|
||||
int64_t bucketNum = 0;
|
||||
int64_t eventStartTimeNs = bucketStartTimeNs + NS_PER_SEC + 1;
|
||||
|
||||
sp<AlarmMonitor> alarmMonitor;
|
||||
sp<DurationAnomalyTracker> anomalyTracker =
|
||||
@@ -535,8 +535,8 @@ TEST(OringDurationTrackerTest, TestAnomalyDetectionFiredAlarm) {
|
||||
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
|
||||
ConditionKey conkey;
|
||||
conkey[StringToId("APP_BACKGROUND")] = kConditionKey1;
|
||||
uint64_t bucketStartTimeNs = 10 * NS_PER_SEC;
|
||||
uint64_t bucketSizeNs = 30 * NS_PER_SEC;
|
||||
int64_t bucketStartTimeNs = 10 * NS_PER_SEC;
|
||||
int64_t bucketSizeNs = 30 * NS_PER_SEC;
|
||||
|
||||
sp<AlarmMonitor> alarmMonitor;
|
||||
sp<DurationAnomalyTracker> anomalyTracker =
|
||||
|
||||
@@ -234,7 +234,7 @@ TEST(ValueMetricProducerTest, TestPushedEventsWithUpgrade) {
|
||||
|
||||
valueProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
|
||||
EXPECT_EQ(1UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
|
||||
EXPECT_EQ((uint64_t)eventUpgradeTimeNs, valueProducer.mCurrentBucketStartTimeNs);
|
||||
EXPECT_EQ(eventUpgradeTimeNs, valueProducer.mCurrentBucketStartTimeNs);
|
||||
|
||||
shared_ptr<LogEvent> event2 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 59 * NS_PER_SEC);
|
||||
event2->write(1);
|
||||
@@ -242,7 +242,7 @@ TEST(ValueMetricProducerTest, TestPushedEventsWithUpgrade) {
|
||||
event2->init();
|
||||
valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event2);
|
||||
EXPECT_EQ(1UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
|
||||
EXPECT_EQ((uint64_t)eventUpgradeTimeNs, valueProducer.mCurrentBucketStartTimeNs);
|
||||
EXPECT_EQ(eventUpgradeTimeNs, valueProducer.mCurrentBucketStartTimeNs);
|
||||
|
||||
// Next value should create a new bucket.
|
||||
shared_ptr<LogEvent> event3 = make_shared<LogEvent>(tagId, bucketStartTimeNs + 65 * NS_PER_SEC);
|
||||
@@ -251,7 +251,7 @@ TEST(ValueMetricProducerTest, TestPushedEventsWithUpgrade) {
|
||||
event3->init();
|
||||
valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event3);
|
||||
EXPECT_EQ(2UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
|
||||
EXPECT_EQ((uint64_t)bucketStartTimeNs + bucketSizeNs, valueProducer.mCurrentBucketStartTimeNs);
|
||||
EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, valueProducer.mCurrentBucketStartTimeNs);
|
||||
}
|
||||
|
||||
TEST(ValueMetricProducerTest, TestPulledValueWithUpgrade) {
|
||||
@@ -294,7 +294,7 @@ TEST(ValueMetricProducerTest, TestPulledValueWithUpgrade) {
|
||||
|
||||
valueProducer.notifyAppUpgrade(eventUpgradeTimeNs, "ANY.APP", 1, 1);
|
||||
EXPECT_EQ(1UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
|
||||
EXPECT_EQ((uint64_t)eventUpgradeTimeNs, valueProducer.mCurrentBucketStartTimeNs);
|
||||
EXPECT_EQ(eventUpgradeTimeNs, valueProducer.mCurrentBucketStartTimeNs);
|
||||
EXPECT_EQ(20L, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][0].mValue);
|
||||
|
||||
allData.clear();
|
||||
@@ -305,7 +305,7 @@ TEST(ValueMetricProducerTest, TestPulledValueWithUpgrade) {
|
||||
allData.push_back(event);
|
||||
valueProducer.onDataPulled(allData);
|
||||
EXPECT_EQ(2UL, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
|
||||
EXPECT_EQ((uint64_t)bucket2StartTimeNs, valueProducer.mCurrentBucketStartTimeNs);
|
||||
EXPECT_EQ(bucket2StartTimeNs, valueProducer.mCurrentBucketStartTimeNs);
|
||||
EXPECT_EQ(30L, valueProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY][1].mValue);
|
||||
}
|
||||
|
||||
|
||||
@@ -455,7 +455,7 @@ sp<StatsLogProcessor> CreateStatsLogProcessor(const long timeBaseSec, const Stat
|
||||
[](const sp<IStatsCompanionService>&){});
|
||||
sp<StatsLogProcessor> processor = new StatsLogProcessor(
|
||||
uidMap, anomalyAlarmMonitor, periodicAlarmMonitor, timeBaseSec, [](const ConfigKey&){});
|
||||
processor->OnConfigUpdated(timeBaseSec, key, config);
|
||||
processor->OnConfigUpdated(timeBaseSec * NS_PER_SEC, key, config);
|
||||
return processor;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user