Anomaly detection is per dimension

Anomalies are now detected and declared per dimension. This means that
declareAnomaly now gets access to the key that is responsible for the
anomaly. Moreover, the refractory period is per dimension, not overall.
So a second anomaly for the same metric but a different dimension can
fire within the first dimension's refractory period. Thus, if app A
misbehaves and app B misbehaves shortly thereafter, they will both be
detected.

Eventually this key will be passed to the subscribers, although this cl
doesn't do anything with it.

Test: adb shell data/nativetest64/statsd_test/statsd_test
Change-Id: Id76856dc44fe9ecf91ac81a423e84f97c81d30ab
This commit is contained in:
Bookatz
2018-01-04 11:43:20 -08:00
parent 26d5b41fdb
commit 1bf94382d0
13 changed files with 330 additions and 172 deletions

View File

@@ -162,45 +162,23 @@ int64_t AnomalyTracker::getSumOverPastBuckets(const HashableDimensionKey& key) c
return 0;
}
bool AnomalyTracker::detectAnomaly(const int64_t& currentBucketNum,
const DimToValMap& currentBucket) {
if (currentBucketNum > mMostRecentBucketNum + 1) {
addPastBucket(nullptr, currentBucketNum - 1);
}
for (auto itr = currentBucket.begin(); itr != currentBucket.end(); itr++) {
if (itr->second + getSumOverPastBuckets(itr->first) > mAlert.trigger_if_sum_gt()) {
return true;
}
}
// In theory, we also need to check the dimsions not in the current bucket. In single-thread
// mode, usually we could avoid the following loops.
for (auto itr = mSumOverPastBuckets.begin(); itr != mSumOverPastBuckets.end(); itr++) {
if (itr->second > mAlert.trigger_if_sum_gt()) {
return true;
}
}
return false;
}
bool AnomalyTracker::detectAnomaly(const int64_t& currentBucketNum, const HashableDimensionKey& key,
const int64_t& currentBucketValue) {
if (currentBucketNum > mMostRecentBucketNum + 1) {
// TODO: This creates a needless 0 entry in mSumOverPastBuckets. Fix this.
addPastBucket(key, 0, currentBucketNum - 1);
}
return mAlert.has_trigger_if_sum_gt()
&& getSumOverPastBuckets(key) + currentBucketValue > mAlert.trigger_if_sum_gt();
}
void AnomalyTracker::declareAnomaly(const uint64_t& timestampNs) {
// TODO: This should also take in the const HashableDimensionKey& key, to pass
// more details to incidentd and to make mRefractoryPeriodEndsSec key-specific.
void AnomalyTracker::declareAnomaly(const uint64_t& timestampNs, const HashableDimensionKey& key) {
// TODO: Why receive timestamp? RefractoryPeriod should always be based on real time right now.
if (isInRefractoryPeriod(timestampNs)) {
if (isInRefractoryPeriod(timestampNs, key)) {
VLOG("Skipping anomaly declaration since within refractory period");
return;
}
// TODO(guardrail): Consider guarding against too short refractory periods.
mLastAnomalyTimestampNs = timestampNs;
mRefractoryPeriodEndsSec[key] = (timestampNs / NS_PER_SEC) + mAlert.refractory_period_secs();
// TODO: If we had access to the bucket_size_millis, consider calling resetStorage()
// if (mAlert.refractory_period_secs() > mNumOfPastBuckets * bucketSizeNs) { resetStorage(); }
@@ -208,7 +186,7 @@ void AnomalyTracker::declareAnomaly(const uint64_t& timestampNs) {
if (!mSubscriptions.empty()) {
if (mAlert.has_id()) {
ALOGI("An anomaly (%llu) has occurred! Informing subscribers.",mAlert.id());
informSubscribers();
informSubscribers(key);
} else {
ALOGI("An anomaly (with no id) has occurred! Not informing any subscribers.");
}
@@ -218,6 +196,7 @@ void AnomalyTracker::declareAnomaly(const uint64_t& timestampNs) {
StatsdStats::getInstance().noteAnomalyDeclared(mConfigKey, mAlert.id());
// TODO: This should also take in the const HashableDimensionKey& key?
android::util::stats_write(android::util::ANOMALY_DETECTED, mConfigKey.GetUid(),
mConfigKey.GetId(), mAlert.id());
}
@@ -227,24 +206,24 @@ void AnomalyTracker::detectAndDeclareAnomaly(const uint64_t& timestampNs,
const HashableDimensionKey& key,
const int64_t& currentBucketValue) {
if (detectAnomaly(currBucketNum, key, currentBucketValue)) {
declareAnomaly(timestampNs);
declareAnomaly(timestampNs, key);
}
}
void AnomalyTracker::detectAndDeclareAnomaly(const uint64_t& timestampNs,
const int64_t& currBucketNum,
const DimToValMap& currentBucket) {
if (detectAnomaly(currBucketNum, currentBucket)) {
declareAnomaly(timestampNs);
bool AnomalyTracker::isInRefractoryPeriod(const uint64_t& timestampNs,
const HashableDimensionKey& key) {
const auto& it = mRefractoryPeriodEndsSec.find(key);
if (it != mRefractoryPeriodEndsSec.end()) {
if ((timestampNs / NS_PER_SEC) <= it->second) {
return true;
} else {
mRefractoryPeriodEndsSec.erase(key);
}
}
return false;
}
bool AnomalyTracker::isInRefractoryPeriod(const uint64_t& timestampNs) const {
return mLastAnomalyTimestampNs >= 0 &&
timestampNs - mLastAnomalyTimestampNs <= mAlert.refractory_period_secs() * NS_PER_SEC;
}
void AnomalyTracker::informSubscribers() {
void AnomalyTracker::informSubscribers(const HashableDimensionKey& key) {
VLOG("informSubscribers called.");
if (mSubscriptions.empty()) {
ALOGE("Attempt to call with no subscribers.");

View File

@@ -52,16 +52,13 @@ public:
const int64_t& bucketNum);
// Returns true if detected anomaly for the existing buckets on one or more dimension keys.
bool detectAnomaly(const int64_t& currBucketNum, const DimToValMap& currentBucket);
bool detectAnomaly(const int64_t& currBucketNum, const HashableDimensionKey& key,
const int64_t& currentBucketValue);
// Informs incidentd about the detected alert.
void declareAnomaly(const uint64_t& timestampNs);
void declareAnomaly(const uint64_t& timestampNs, const HashableDimensionKey& key);
// Detects the alert and informs the incidentd when applicable.
void detectAndDeclareAnomaly(const uint64_t& timestampNs, const int64_t& currBucketNum,
const DimToValMap& currentBucket);
void detectAndDeclareAnomaly(const uint64_t& timestampNs, const int64_t& currBucketNum,
const HashableDimensionKey& key,
const int64_t& currentBucketValue);
@@ -82,9 +79,11 @@ public:
return mAlert.trigger_if_sum_gt();
}
// Helper function to return the timestamp of the last detected anomaly.
inline int64_t getLastAnomalyTimestampNs() const {
return mLastAnomalyTimestampNs;
// Returns the refractory period timestamp (in seconds) for the given key.
// If there is no stored refractory period ending timestamp, returns 0.
uint32_t getRefractoryPeriodEndsSec(const HashableDimensionKey& key) const {
const auto& it = mRefractoryPeriodEndsSec.find(key);
return it != mRefractoryPeriodEndsSec.end() ? it->second : 0;
}
inline int getNumOfPastBuckets() const {
@@ -121,8 +120,11 @@ protected:
// The bucket number of the last added bucket.
int64_t mMostRecentBucketNum = -1;
// The timestamp when the last anomaly was declared.
int64_t mLastAnomalyTimestampNs = -1;
// Map from each dimension to the timestamp that its refractory period (if this anomaly was
// declared for that dimension) ends, in seconds. Only anomalies that occur after this period
// ends will be declared.
// Entries may be, but are not guaranteed to be, removed after the period is finished.
unordered_map<HashableDimensionKey, uint32_t> mRefractoryPeriodEndsSec;
void flushPastBuckets(const int64_t& currBucketNum);
@@ -133,7 +135,7 @@ protected:
// and remove any items with value 0.
void subtractBucketFromSum(const shared_ptr<DimToValMap>& bucket);
bool isInRefractoryPeriod(const uint64_t& timestampNs) const;
bool isInRefractoryPeriod(const uint64_t& timestampNs, const HashableDimensionKey& key);
// Calculates the corresponding bucket index within the circular array.
size_t index(int64_t bucketNum) const;
@@ -142,12 +144,12 @@ protected:
virtual void resetStorage();
// Informs the subscribers that an anomaly has occurred.
void informSubscribers();
void informSubscribers(const HashableDimensionKey& key);
FRIEND_TEST(AnomalyTrackerTest, TestConsecutiveBuckets);
FRIEND_TEST(AnomalyTrackerTest, TestSparseBuckets);
FRIEND_TEST(GaugeMetricProducerTest, TestAnomalyDetection);
FRIEND_TEST(CountMetricProducerTest, TestAnomalyDetection);
FRIEND_TEST(CountMetricProducerTest, TestAnomalyDetectionUnSliced);
};
} // namespace statsd

View File

@@ -46,7 +46,7 @@ void DurationAnomalyTracker::declareAnomalyIfAlarmExpired(const HashableDimensio
if (itr->second != nullptr &&
static_cast<uint32_t>(timestampNs / NS_PER_SEC) >= itr->second->timestampSec) {
declareAnomaly(timestampNs);
declareAnomaly(timestampNs, dimensionKey);
stopAlarm(dimensionKey);
}
}
@@ -55,7 +55,7 @@ void DurationAnomalyTracker::startAlarm(const HashableDimensionKey& dimensionKey
const uint64_t& timestampNs) {
uint32_t timestampSec = static_cast<uint32_t>(timestampNs / NS_PER_SEC);
if (isInRefractoryPeriod(timestampNs)) {
if (isInRefractoryPeriod(timestampNs, dimensionKey)) {
VLOG("Skipping setting anomaly alarm since it'd fall in the refractory period");
return;
}
@@ -104,7 +104,7 @@ void DurationAnomalyTracker::informAlarmsFired(const uint64_t& timestampNs,
// Now declare each of these alarms to have fired.
for (const auto& kv : matchedAlarms) {
declareAnomaly(timestampNs /* TODO: , kv.first */);
declareAnomaly(timestampNs, kv.first);
mAlarms.erase(kv.first);
firedAlarms.erase(kv.second); // No one else can also own it, so we're done with it.
}

View File

@@ -50,7 +50,7 @@ public:
const uint64_t& timestampNs);
// Declares an anomaly for each alarm in firedAlarms that belongs to this DurationAnomalyTracker
// and removes it from firedAlarms. Does NOT remove the alarm from the AnomalyMonitor.
// and removes it from firedAlarms.
// TODO: This will actually be called from a different thread, so make it thread-safe!
// This means that almost every function in DurationAnomalyTracker needs to be locked.
// But this should be done at the level of StatsLogProcessor, which needs to lock
@@ -70,10 +70,10 @@ protected:
void resetStorage() override;
FRIEND_TEST(OringDurationTrackerTest, TestPredictAnomalyTimestamp);
FRIEND_TEST(OringDurationTrackerTest, TestAnomalyDetection);
FRIEND_TEST(OringDurationTrackerTest, TestAnomalyDetectionExpiredAlarm);
FRIEND_TEST(OringDurationTrackerTest, TestAnomalyDetectionFiredAlarm);
FRIEND_TEST(MaxDurationTrackerTest, TestAnomalyDetection);
FRIEND_TEST(MaxDurationTrackerTest, TestAnomalyDetection);
FRIEND_TEST(OringDurationTrackerTest, TestAnomalyDetection);
};
} // namespace statsd

View File

@@ -84,7 +84,7 @@ private:
FRIEND_TEST(CountMetricProducerTest, TestNonDimensionalEvents);
FRIEND_TEST(CountMetricProducerTest, TestEventsWithNonSlicedCondition);
FRIEND_TEST(CountMetricProducerTest, TestEventsWithSlicedCondition);
FRIEND_TEST(CountMetricProducerTest, TestAnomalyDetection);
FRIEND_TEST(CountMetricProducerTest, TestAnomalyDetectionUnSliced);
};
} // namespace statsd

View File

@@ -169,7 +169,8 @@ protected:
std::vector<sp<DurationAnomalyTracker>> mAnomalyTrackers;
FRIEND_TEST(OringDurationTrackerTest, TestPredictAnomalyTimestamp);
FRIEND_TEST(OringDurationTrackerTest, TestAnomalyDetection);
FRIEND_TEST(OringDurationTrackerTest, TestAnomalyDetectionExpiredAlarm);
FRIEND_TEST(OringDurationTrackerTest, TestAnomalyDetectionFiredAlarm);
};
} // namespace statsd

View File

@@ -67,7 +67,8 @@ private:
FRIEND_TEST(OringDurationTrackerTest, TestCrossBucketBoundary);
FRIEND_TEST(OringDurationTrackerTest, TestDurationConditionChange);
FRIEND_TEST(OringDurationTrackerTest, TestPredictAnomalyTimestamp);
FRIEND_TEST(OringDurationTrackerTest, TestAnomalyDetection);
FRIEND_TEST(OringDurationTrackerTest, TestAnomalyDetectionExpiredAlarm);
FRIEND_TEST(OringDurationTrackerTest, TestAnomalyDetectionFiredAlarm);
};
} // namespace statsd

View File

@@ -54,11 +54,73 @@ std::shared_ptr<DimToValMap> MockBucket(
return bucket;
}
// Returns the value, for the given key, in that bucket, or 0 if not present.
int64_t getBucketValue(const std::shared_ptr<DimToValMap>& bucket,
const HashableDimensionKey& key) {
const auto& itr = bucket->find(key);
if (itr != bucket->end()) {
return itr->second;
}
return 0;
}
// Returns true if keys in trueList are detected as anomalies and keys in falseList are not.
bool detectAnomaliesPass(AnomalyTracker& tracker,
const int64_t& bucketNum,
const std::shared_ptr<DimToValMap>& currentBucket,
const std::set<const HashableDimensionKey>& trueList,
const std::set<const HashableDimensionKey>& falseList) {
for (HashableDimensionKey key : trueList) {
if (!tracker.detectAnomaly(bucketNum, key, getBucketValue(currentBucket, key))) {
return false;
}
}
for (HashableDimensionKey key : falseList) {
if (tracker.detectAnomaly(bucketNum, key, getBucketValue(currentBucket, key))) {
return false;
}
}
return true;
}
// Calls tracker.detectAndDeclareAnomaly on each key in the bucket.
void detectAndDeclareAnomalies(AnomalyTracker& tracker,
const int64_t& bucketNum,
const std::shared_ptr<DimToValMap>& bucket,
const int64_t& eventTimestamp) {
for (const auto& kv : *bucket) {
tracker.detectAndDeclareAnomaly(eventTimestamp, bucketNum, kv.first, kv.second);
}
}
// Asserts that the refractory time for each key in timestamps is the corresponding
// timestamp (in ns) + refractoryPeriodSec.
// If a timestamp value is negative, instead asserts that the refractory period is inapplicable
// (either non-existant or already past).
void checkRefractoryTimes(AnomalyTracker& tracker,
const int64_t& currTimestampNs,
const int32_t& refractoryPeriodSec,
const std::unordered_map<HashableDimensionKey, int64_t>& timestamps) {
for (const auto& kv : timestamps) {
if (kv.second < 0) {
// Make sure that, if there is a refractory period, it is already past.
EXPECT_LT(tracker.getRefractoryPeriodEndsSec(kv.first),
currTimestampNs / NS_PER_SEC + 1)
<< "Failure was at currTimestampNs " << currTimestampNs;
} else {
EXPECT_EQ(tracker.getRefractoryPeriodEndsSec(kv.first),
kv.second / NS_PER_SEC + refractoryPeriodSec)
<< "Failure was at currTimestampNs " << currTimestampNs;
}
}
}
TEST(AnomalyTrackerTest, TestConsecutiveBuckets) {
const int64_t bucketSizeNs = 30 * NS_PER_SEC;
const int32_t refractoryPeriodSec = 2 * bucketSizeNs / NS_PER_SEC;
Alert alert;
alert.set_num_buckets(3);
alert.set_refractory_period_secs(2 * bucketSizeNs / NS_PER_SEC);
alert.set_refractory_period_secs(refractoryPeriodSec);
alert.set_trigger_if_sum_gt(2);
AnomalyTracker anomalyTracker(alert, kConfigKey);
@@ -66,26 +128,31 @@ TEST(AnomalyTrackerTest, TestConsecutiveBuckets) {
HashableDimensionKey keyB = getMockDimensionKey(1, "b");
HashableDimensionKey keyC = getMockDimensionKey(1, "c");
std::shared_ptr<DimToValMap> bucket0 = MockBucket({{keyA, 1}, {keyB, 2}, {keyC, 1}});
int64_t eventTimestamp0 = 10;
std::shared_ptr<DimToValMap> bucket1 = MockBucket({{keyA, 1}});
int64_t eventTimestamp1 = bucketSizeNs + 11;
std::shared_ptr<DimToValMap> bucket2 = MockBucket({{keyB, 1}});
int64_t eventTimestamp2 = 2 * bucketSizeNs + 12;
std::shared_ptr<DimToValMap> bucket3 = MockBucket({{keyA, 2}});
int64_t eventTimestamp3 = 3 * bucketSizeNs + 13;
std::shared_ptr<DimToValMap> bucket4 = MockBucket({{keyB, 1}});
int64_t eventTimestamp4 = 4 * bucketSizeNs + 14;
std::shared_ptr<DimToValMap> bucket5 = MockBucket({{keyA, 2}});
int64_t eventTimestamp5 = 5 * bucketSizeNs + 15;
std::shared_ptr<DimToValMap> bucket6 = MockBucket({{keyA, 2}});
int64_t eventTimestamp6 = 6 * bucketSizeNs + 16;
int64_t eventTimestamp0 = 10 * NS_PER_SEC;
int64_t eventTimestamp1 = bucketSizeNs + 11 * NS_PER_SEC;
int64_t eventTimestamp2 = 2 * bucketSizeNs + 12 * NS_PER_SEC;
int64_t eventTimestamp3 = 3 * bucketSizeNs + 13 * NS_PER_SEC;
int64_t eventTimestamp4 = 4 * bucketSizeNs + 14 * NS_PER_SEC;
int64_t eventTimestamp5 = 5 * bucketSizeNs + 5 * NS_PER_SEC;
int64_t eventTimestamp6 = 6 * bucketSizeNs + 16 * NS_PER_SEC;
std::shared_ptr<DimToValMap> bucket0 = MockBucket({{keyA, 1}, {keyB, 2}, {keyC, 1}});
std::shared_ptr<DimToValMap> bucket1 = MockBucket({{keyA, 1}});
std::shared_ptr<DimToValMap> bucket2 = MockBucket({{keyB, 1}});
std::shared_ptr<DimToValMap> bucket3 = MockBucket({{keyA, 2}});
std::shared_ptr<DimToValMap> bucket4 = MockBucket({{keyB, 5}});
std::shared_ptr<DimToValMap> bucket5 = MockBucket({{keyA, 2}});
std::shared_ptr<DimToValMap> bucket6 = MockBucket({{keyA, 2}});
// Start time with no events.
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0u);
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, -1LL);
EXPECT_FALSE(anomalyTracker.detectAnomaly(0, *bucket0));
anomalyTracker.detectAndDeclareAnomaly(eventTimestamp0, 0, *bucket0);
EXPECT_EQ(anomalyTracker.mLastAnomalyTimestampNs, -1L);
// Event from bucket #0 occurs.
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 0, bucket0, {}, {keyA, keyB, keyC}));
detectAndDeclareAnomalies(anomalyTracker, 0, bucket0, eventTimestamp1);
checkRefractoryTimes(anomalyTracker, eventTimestamp0, refractoryPeriodSec,
{{keyA, -1}, {keyB, -1}, {keyC, -1}});
// Adds past bucket #0
anomalyTracker.addPastBucket(bucket0, 0);
@@ -94,9 +161,12 @@ TEST(AnomalyTrackerTest, TestConsecutiveBuckets) {
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 2LL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyC), 1LL);
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, 0LL);
EXPECT_FALSE(anomalyTracker.detectAnomaly(1, *bucket1));
anomalyTracker.detectAndDeclareAnomaly(eventTimestamp1, 1, *bucket1);
EXPECT_EQ(anomalyTracker.mLastAnomalyTimestampNs, -1L);
// Event from bucket #1 occurs.
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 1, bucket1, {}, {keyA, keyB, keyC}));
detectAndDeclareAnomalies(anomalyTracker, 1, bucket1, eventTimestamp1);
checkRefractoryTimes(anomalyTracker, eventTimestamp1, refractoryPeriodSec,
{{keyA, -1}, {keyB, -1}, {keyC, -1}});
// Adds past bucket #0 again. The sum does not change.
anomalyTracker.addPastBucket(bucket0, 0);
@@ -105,9 +175,10 @@ TEST(AnomalyTrackerTest, TestConsecutiveBuckets) {
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 2LL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyC), 1LL);
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, 0LL);
EXPECT_FALSE(anomalyTracker.detectAnomaly(1, *bucket1));
anomalyTracker.detectAndDeclareAnomaly(eventTimestamp1 + 1, 1, *bucket1);
EXPECT_EQ(anomalyTracker.mLastAnomalyTimestampNs, -1L);
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 1, bucket1, {}, {keyA, keyB, keyC}));
detectAndDeclareAnomalies(anomalyTracker, 1, bucket1, eventTimestamp1 + 1);
checkRefractoryTimes(anomalyTracker, eventTimestamp1, refractoryPeriodSec,
{{keyA, -1}, {keyB, -1}, {keyC, -1}});
// Adds past bucket #1.
anomalyTracker.addPastBucket(bucket1, 1);
@@ -116,9 +187,12 @@ TEST(AnomalyTrackerTest, TestConsecutiveBuckets) {
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyA), 2LL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 2LL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyC), 1LL);
EXPECT_TRUE(anomalyTracker.detectAnomaly(2, *bucket2));
anomalyTracker.detectAndDeclareAnomaly(eventTimestamp2, 2, *bucket2);
EXPECT_EQ(anomalyTracker.mLastAnomalyTimestampNs, eventTimestamp2);
// Event from bucket #2 occurs. New anomaly on keyB.
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 2, bucket2, {keyB}, {keyA, keyC}));
detectAndDeclareAnomalies(anomalyTracker, 2, bucket2, eventTimestamp2);
checkRefractoryTimes(anomalyTracker, eventTimestamp2, refractoryPeriodSec,
{{keyA, -1}, {keyB, eventTimestamp2}, {keyC, -1}});
// Adds past bucket #1 again. Nothing changes.
anomalyTracker.addPastBucket(bucket1, 1);
@@ -127,9 +201,11 @@ TEST(AnomalyTrackerTest, TestConsecutiveBuckets) {
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyA), 2LL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 2LL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyC), 1LL);
EXPECT_TRUE(anomalyTracker.detectAnomaly(2, *bucket2));
anomalyTracker.detectAndDeclareAnomaly(eventTimestamp2 + 1, 2, *bucket2);
EXPECT_EQ(anomalyTracker.mLastAnomalyTimestampNs, eventTimestamp2);
// Event from bucket #2 occurs (again).
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 2, bucket2, {keyB}, {keyA, keyC}));
detectAndDeclareAnomalies(anomalyTracker, 2, bucket2, eventTimestamp2 + 1);
checkRefractoryTimes(anomalyTracker, eventTimestamp2, refractoryPeriodSec,
{{keyA, -1}, {keyB, eventTimestamp2}, {keyC, -1}});
// Adds past bucket #2.
anomalyTracker.addPastBucket(bucket2, 2);
@@ -137,10 +213,12 @@ TEST(AnomalyTrackerTest, TestConsecutiveBuckets) {
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 2UL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyA), 1LL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 1LL);
EXPECT_TRUE(anomalyTracker.detectAnomaly(3, *bucket3));
anomalyTracker.detectAndDeclareAnomaly(eventTimestamp3, 3, *bucket3);
// Within refractory period.
EXPECT_EQ(anomalyTracker.mLastAnomalyTimestampNs, eventTimestamp2);
// Event from bucket #3 occurs. New anomaly on keyA.
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 3, bucket3, {keyA}, {keyB, keyC}));
detectAndDeclareAnomalies(anomalyTracker, 3, bucket3, eventTimestamp3);
checkRefractoryTimes(anomalyTracker, eventTimestamp3, refractoryPeriodSec,
{{keyA, eventTimestamp3}, {keyB, eventTimestamp2}, {keyC, -1}});
// Adds bucket #3.
anomalyTracker.addPastBucket(bucket3, 3L);
@@ -148,37 +226,46 @@ TEST(AnomalyTrackerTest, TestConsecutiveBuckets) {
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 2UL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyA), 2LL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 1LL);
EXPECT_FALSE(anomalyTracker.detectAnomaly(4, *bucket4));
anomalyTracker.detectAndDeclareAnomaly(eventTimestamp4, 4, *bucket4);
EXPECT_EQ(anomalyTracker.mLastAnomalyTimestampNs, eventTimestamp2);
// Event from bucket #4 occurs. New anomaly on keyB.
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 4, bucket4, {keyB}, {keyA, keyC}));
detectAndDeclareAnomalies(anomalyTracker, 4, bucket4, eventTimestamp4);
checkRefractoryTimes(anomalyTracker, eventTimestamp4, refractoryPeriodSec,
{{keyA, eventTimestamp3}, {keyB, eventTimestamp4}, {keyC, -1}});
// Adds bucket #4.
anomalyTracker.addPastBucket(bucket4, 4);
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, 4L);
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 2UL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyA), 2LL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 1LL);
EXPECT_TRUE(anomalyTracker.detectAnomaly(5, *bucket5));
anomalyTracker.detectAndDeclareAnomaly(eventTimestamp5, 5, *bucket5);
EXPECT_EQ(anomalyTracker.mLastAnomalyTimestampNs, eventTimestamp5);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 5LL);
// Event from bucket #5 occurs. New anomaly on keyA, which is still in refractory.
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 5, bucket5, {keyA, keyB}, {keyC}));
detectAndDeclareAnomalies(anomalyTracker, 5, bucket5, eventTimestamp5);
checkRefractoryTimes(anomalyTracker, eventTimestamp5, refractoryPeriodSec,
{{keyA, eventTimestamp3}, {keyB, eventTimestamp4}, {keyC, -1}});
// Adds bucket #5.
anomalyTracker.addPastBucket(bucket5, 5);
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, 5L);
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 2UL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyA), 2LL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 1LL);
EXPECT_TRUE(anomalyTracker.detectAnomaly(6, *bucket6));
// Within refractory period.
anomalyTracker.detectAndDeclareAnomaly(eventTimestamp6, 6, *bucket6);
EXPECT_EQ(anomalyTracker.mLastAnomalyTimestampNs, eventTimestamp5);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 5LL);
// Event from bucket #6 occurs. New anomaly on keyA, which is now out of refractory.
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 6, bucket6, {keyA, keyB}, {keyC}));
detectAndDeclareAnomalies(anomalyTracker, 6, bucket6, eventTimestamp6);
checkRefractoryTimes(anomalyTracker, eventTimestamp6, refractoryPeriodSec,
{{keyA, eventTimestamp6}, {keyB, eventTimestamp4}, {keyC, -1}});
}
TEST(AnomalyTrackerTest, TestSparseBuckets) {
const int64_t bucketSizeNs = 30 * NS_PER_SEC;
const int32_t refractoryPeriodSec = 2 * bucketSizeNs / NS_PER_SEC;
Alert alert;
alert.set_num_buckets(3);
alert.set_refractory_period_secs(2 * bucketSizeNs / NS_PER_SEC);
alert.set_refractory_period_secs(refractoryPeriodSec);
alert.set_trigger_if_sum_gt(2);
AnomalyTracker anomalyTracker(alert, kConfigKey);
@@ -204,9 +291,10 @@ TEST(AnomalyTrackerTest, TestSparseBuckets) {
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, -1LL);
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
EXPECT_FALSE(anomalyTracker.detectAnomaly(9, *bucket9));
anomalyTracker.detectAndDeclareAnomaly(eventTimestamp1, 9, *bucket9);
EXPECT_EQ(anomalyTracker.mLastAnomalyTimestampNs, -1);
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 9, bucket9, {}, {keyA, keyB, keyC, keyD}));
detectAndDeclareAnomalies(anomalyTracker, 9, bucket9, eventTimestamp1);
checkRefractoryTimes(anomalyTracker, eventTimestamp1, refractoryPeriodSec,
{{keyA, -1}, {keyB, -1}, {keyC, -1}, {keyD, -1}, {keyE, -1}});
// Add past bucket #9
anomalyTracker.addPastBucket(bucket9, 9);
@@ -215,25 +303,27 @@ TEST(AnomalyTrackerTest, TestSparseBuckets) {
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyA), 1LL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 2LL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyC), 1LL);
EXPECT_TRUE(anomalyTracker.detectAnomaly(16, *bucket16));
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 16, bucket16, {keyB}, {keyA, keyC, keyD}));
// TODO: after detectAnomaly fix: EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, 15L);
anomalyTracker.detectAndDeclareAnomaly(eventTimestamp2, 16, *bucket16);
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
EXPECT_EQ(anomalyTracker.mLastAnomalyTimestampNs, eventTimestamp2);
detectAndDeclareAnomalies(anomalyTracker, 16, bucket16, eventTimestamp2);
// TODO: after detectAnomaly fix: EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, 15L);
checkRefractoryTimes(anomalyTracker, eventTimestamp2, refractoryPeriodSec,
{{keyA, -1}, {keyB, eventTimestamp2}, {keyC, -1}, {keyD, -1}, {keyE, -1}});
// Add past bucket #16
anomalyTracker.addPastBucket(bucket16, 16);
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, 16L);
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 1UL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 4LL);
EXPECT_TRUE(anomalyTracker.detectAnomaly(18, *bucket18));
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 18, bucket18, {keyB}, {keyA, keyC, keyD}));
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 1UL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 4LL);
// Within refractory period.
anomalyTracker.detectAndDeclareAnomaly(eventTimestamp3, 18, *bucket18);
EXPECT_EQ(anomalyTracker.mLastAnomalyTimestampNs, eventTimestamp2);
detectAndDeclareAnomalies(anomalyTracker, 18, bucket18, eventTimestamp3);
checkRefractoryTimes(anomalyTracker, eventTimestamp3, refractoryPeriodSec,
{{keyA, -1}, {keyB, eventTimestamp2}, {keyC, -1}, {keyD, -1}, {keyE, -1}});
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 1UL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 4LL);
@@ -243,13 +333,14 @@ TEST(AnomalyTrackerTest, TestSparseBuckets) {
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 2UL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 1LL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyC), 1LL);
EXPECT_TRUE(anomalyTracker.detectAnomaly(20, *bucket20));
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 20, bucket20, {keyB}, {keyA, keyC, keyD}));
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, 19L);
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 2UL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 1LL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyC), 1LL);
anomalyTracker.detectAndDeclareAnomaly(eventTimestamp4, 20, *bucket20);
EXPECT_EQ(anomalyTracker.mLastAnomalyTimestampNs, eventTimestamp4);
detectAndDeclareAnomalies(anomalyTracker, 20, bucket20, eventTimestamp4);
checkRefractoryTimes(anomalyTracker, eventTimestamp4, refractoryPeriodSec,
{{keyA, -1}, {keyB, eventTimestamp4}, {keyC, -1}, {keyD, -1}, {keyE, -1}});
// Add bucket #18 again. Nothing changes.
anomalyTracker.addPastBucket(bucket18, 18);
@@ -257,13 +348,14 @@ TEST(AnomalyTrackerTest, TestSparseBuckets) {
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 2UL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 1LL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyC), 1LL);
EXPECT_TRUE(anomalyTracker.detectAnomaly(20, *bucket20));
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 20, bucket20, {keyB}, {keyA, keyC, keyD}));
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 2UL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 1LL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyC), 1LL);
anomalyTracker.detectAndDeclareAnomaly(eventTimestamp4 + 1, 20, *bucket20);
detectAndDeclareAnomalies(anomalyTracker, 20, bucket20, eventTimestamp4 + 1);
// Within refractory period.
EXPECT_EQ(anomalyTracker.mLastAnomalyTimestampNs, eventTimestamp4);
checkRefractoryTimes(anomalyTracker, eventTimestamp4 + 1, refractoryPeriodSec,
{{keyA, -1}, {keyB, eventTimestamp4}, {keyC, -1}, {keyD, -1}, {keyE, -1}});
// Add past bucket #20
anomalyTracker.addPastBucket(bucket20, 20);
@@ -271,32 +363,37 @@ TEST(AnomalyTrackerTest, TestSparseBuckets) {
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 2UL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyB), 3LL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyC), 1LL);
EXPECT_FALSE(anomalyTracker.detectAnomaly(25, *bucket25));
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 25, bucket25, {}, {keyA, keyB, keyC, keyD}));
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, 24L);
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
anomalyTracker.detectAndDeclareAnomaly(eventTimestamp5, 25, *bucket25);
EXPECT_EQ(anomalyTracker.mLastAnomalyTimestampNs, eventTimestamp4);
// TODO: after detectAnomaly fix: EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
detectAndDeclareAnomalies(anomalyTracker, 25, bucket25, eventTimestamp5);
checkRefractoryTimes(anomalyTracker, eventTimestamp5, refractoryPeriodSec,
{{keyA, -1}, {keyB, eventTimestamp4}, {keyC, -1}, {keyD, -1}, {keyE, -1}});
// Add past bucket #25
anomalyTracker.addPastBucket(bucket25, 25);
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, 25L);
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 1UL);
// TODO: after detectAnomaly fix: EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 1UL);
EXPECT_EQ(anomalyTracker.getSumOverPastBuckets(keyD), 1LL);
EXPECT_FALSE(anomalyTracker.detectAnomaly(28, *bucket28));
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 28, bucket28, {},
{keyA, keyB, keyC, keyD, keyE}));
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, 27L);
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
anomalyTracker.detectAndDeclareAnomaly(eventTimestamp6, 28, *bucket28);
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
EXPECT_EQ(anomalyTracker.mLastAnomalyTimestampNs, eventTimestamp4);
// TODO: after detectAnomaly fix: EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
detectAndDeclareAnomalies(anomalyTracker, 28, bucket28, eventTimestamp6);
// TODO: after detectAnomaly fix: EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
checkRefractoryTimes(anomalyTracker, eventTimestamp6, refractoryPeriodSec,
{{keyA, -1}, {keyB, -1}, {keyC, -1}, {keyD, -1}, {keyE, -1}});
// Updates current bucket #28.
(*bucket28)[keyE] = 5;
EXPECT_TRUE(anomalyTracker.detectAnomaly(28, *bucket28));
EXPECT_TRUE(detectAnomaliesPass(anomalyTracker, 28, bucket28, {keyE},
{keyA, keyB, keyC, keyD}));
EXPECT_EQ(anomalyTracker.mMostRecentBucketNum, 27L);
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
anomalyTracker.detectAndDeclareAnomaly(eventTimestamp6 + 7, 28, *bucket28);
EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
EXPECT_EQ(anomalyTracker.mLastAnomalyTimestampNs, eventTimestamp6 + 7);
// TODO: after detectAnomaly fix: EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
detectAndDeclareAnomalies(anomalyTracker, 28, bucket28, eventTimestamp6 + 7);
// TODO: after detectAnomaly fix: EXPECT_EQ(anomalyTracker.mSumOverPastBuckets.size(), 0UL);
checkRefractoryTimes(anomalyTracker, eventTimestamp6, refractoryPeriodSec,
{{keyA, -1}, {keyB, -1}, {keyC, -1}, {keyD, -1}, {keyE, eventTimestamp6 + 7}});
}
} // namespace statsd

View File

@@ -191,13 +191,14 @@ TEST(CountMetricProducerTest, TestEventsWithSlicedCondition) {
EXPECT_EQ(1LL, bucketInfo.mCount);
}
TEST(CountMetricProducerTest, TestAnomalyDetection) {
TEST(CountMetricProducerTest, TestAnomalyDetectionUnSliced) {
Alert alert;
alert.set_id(11);
alert.set_metric_id(1);
alert.set_trigger_if_sum_gt(2);
alert.set_num_buckets(2);
alert.set_refractory_period_secs(1);
const int32_t refPeriodSec = 1;
alert.set_refractory_period_secs(refPeriodSec);
int64_t bucketStartTimeNs = 10000000000;
int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(ONE_MINUTE) * 1000000LL;
@@ -220,7 +221,7 @@ TEST(CountMetricProducerTest, TestAnomalyDetection) {
LogEvent event4(tagId, bucketStartTimeNs + 3 * bucketSizeNs + 1);
LogEvent event5(tagId, bucketStartTimeNs + 3 * bucketSizeNs + 2);
LogEvent event6(tagId, bucketStartTimeNs + 3 * bucketSizeNs + 3);
LogEvent event7(tagId, bucketStartTimeNs + 3 * bucketSizeNs + 3 + NS_PER_SEC);
LogEvent event7(tagId, bucketStartTimeNs + 3 * bucketSizeNs + 2 * NS_PER_SEC);
// Two events in bucket #0.
countProducer.onMatchedLogEvent(1 /*log matcher index*/, event1);
@@ -228,13 +229,13 @@ TEST(CountMetricProducerTest, TestAnomalyDetection) {
EXPECT_EQ(1UL, countProducer.mCurrentSlicedCounter->size());
EXPECT_EQ(2L, countProducer.mCurrentSlicedCounter->begin()->second);
EXPECT_EQ(anomalyTracker->getLastAnomalyTimestampNs(), -1LL);
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_DIMENSION_KEY), 0U);
// One event in bucket #2. No alarm as bucket #0 is trashed out.
countProducer.onMatchedLogEvent(1 /*log matcher index*/, event3);
EXPECT_EQ(1UL, countProducer.mCurrentSlicedCounter->size());
EXPECT_EQ(1L, countProducer.mCurrentSlicedCounter->begin()->second);
EXPECT_EQ(anomalyTracker->getLastAnomalyTimestampNs(), -1LL);
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_DIMENSION_KEY), 0U);
// Two events in bucket #3.
countProducer.onMatchedLogEvent(1 /*log matcher index*/, event4);
@@ -243,12 +244,14 @@ TEST(CountMetricProducerTest, TestAnomalyDetection) {
EXPECT_EQ(1UL, countProducer.mCurrentSlicedCounter->size());
EXPECT_EQ(3L, countProducer.mCurrentSlicedCounter->begin()->second);
// Anomaly at event 6 is within refractory period. The alarm is at event 5 timestamp not event 6
EXPECT_EQ(anomalyTracker->getLastAnomalyTimestampNs(), (long long)event5.GetTimestampNs());
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_DIMENSION_KEY),
event5.GetTimestampNs() / NS_PER_SEC + refPeriodSec);
countProducer.onMatchedLogEvent(1 /*log matcher index*/, event7);
EXPECT_EQ(1UL, countProducer.mCurrentSlicedCounter->size());
EXPECT_EQ(4L, countProducer.mCurrentSlicedCounter->begin()->second);
EXPECT_EQ(anomalyTracker->getLastAnomalyTimestampNs(), (long long)event7.GetTimestampNs());
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_DIMENSION_KEY),
event7.GetTimestampNs() / NS_PER_SEC + refPeriodSec);
}
} // namespace statsd

View File

@@ -201,6 +201,8 @@ TEST(GaugeMetricProducerTest, TestAnomalyDetection) {
alert.set_metric_id(metricId);
alert.set_trigger_if_sum_gt(25);
alert.set_num_buckets(2);
const int32_t refPeriodSec = 60;
alert.set_refractory_period_secs(refPeriodSec);
sp<AnomalyTracker> anomalyTracker = gaugeProducer.addAnomalyTracker(alert);
int tagId = 1;
@@ -213,10 +215,10 @@ TEST(GaugeMetricProducerTest, TestAnomalyDetection) {
EXPECT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
EXPECT_EQ(13L,
gaugeProducer.mCurrentSlicedBucket->begin()->second->begin()->second.value_int());
EXPECT_EQ(anomalyTracker->getLastAnomalyTimestampNs(), -1LL);
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_DIMENSION_KEY), 0U);
std::shared_ptr<LogEvent> event2 =
std::make_shared<LogEvent>(tagId, bucketStartTimeNs + bucketSizeNs + 10);
std::make_shared<LogEvent>(tagId, bucketStartTimeNs + bucketSizeNs + 20);
event2->write("some value");
event2->write(15);
event2->init();
@@ -225,19 +227,21 @@ TEST(GaugeMetricProducerTest, TestAnomalyDetection) {
EXPECT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
EXPECT_EQ(15L,
gaugeProducer.mCurrentSlicedBucket->begin()->second->begin()->second.value_int());
EXPECT_EQ(anomalyTracker->getLastAnomalyTimestampNs(), (long long)event2->GetTimestampNs());
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_DIMENSION_KEY),
event2->GetTimestampNs() / NS_PER_SEC + refPeriodSec);
std::shared_ptr<LogEvent> event3 =
std::make_shared<LogEvent>(tagId, bucketStartTimeNs + 2 * bucketSizeNs + 10);
event3->write("some value");
event3->write(24);
event3->write(26);
event3->init();
gaugeProducer.onDataPulled({event3});
EXPECT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
EXPECT_EQ(24L,
EXPECT_EQ(26L,
gaugeProducer.mCurrentSlicedBucket->begin()->second->begin()->second.value_int());
EXPECT_EQ(anomalyTracker->getLastAnomalyTimestampNs(), (long long)event3->GetTimestampNs());
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_DIMENSION_KEY),
event2->GetTimestampNs() / NS_PER_SEC + refPeriodSec);
// The event4 does not have the gauge field. Thus the current bucket value is 0.
std::shared_ptr<LogEvent> event4 =
@@ -247,7 +251,6 @@ TEST(GaugeMetricProducerTest, TestAnomalyDetection) {
gaugeProducer.onDataPulled({event4});
EXPECT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
EXPECT_TRUE(gaugeProducer.mCurrentSlicedBucket->begin()->second->empty());
EXPECT_EQ(anomalyTracker->getLastAnomalyTimestampNs(), (long long)event3->GetTimestampNs());
}
} // namespace statsd

View File

@@ -210,7 +210,8 @@ TEST(MaxDurationTrackerTest, TestAnomalyDetection) {
alert.set_metric_id(metricId);
alert.set_trigger_if_sum_gt(32 * NS_PER_SEC);
alert.set_num_buckets(2);
alert.set_refractory_period_secs(1);
const int32_t refPeriodSec = 1;
alert.set_refractory_period_secs(refPeriodSec);
unordered_map<HashableDimensionKey, vector<DurationBucket>> buckets;
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
@@ -225,15 +226,16 @@ TEST(MaxDurationTrackerTest, TestAnomalyDetection) {
tracker.noteStart(key1, true, eventStartTimeNs, ConditionKey());
tracker.noteStop(key1, eventStartTimeNs + 10, false);
EXPECT_EQ(anomalyTracker->mLastAnomalyTimestampNs, -1);
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(eventKey), 0U);
EXPECT_EQ(10LL, tracker.mDuration);
tracker.noteStart(key2, true, eventStartTimeNs + 20, ConditionKey());
tracker.flushIfNeeded(eventStartTimeNs + 2 * bucketSizeNs + 3 * NS_PER_SEC, &buckets);
tracker.noteStop(key2, eventStartTimeNs + 2 * bucketSizeNs + 3 * NS_PER_SEC, false);
EXPECT_EQ((long long)(4 * NS_PER_SEC + 1LL), tracker.mDuration);
EXPECT_EQ(anomalyTracker->mLastAnomalyTimestampNs,
(long long)(eventStartTimeNs + 2 * bucketSizeNs + 3 * NS_PER_SEC));
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(eventKey),
(eventStartTimeNs + 2 * bucketSizeNs) / NS_PER_SEC + 3 + refPeriodSec);
}
} // namespace statsd

View File

@@ -309,13 +309,14 @@ TEST(OringDurationTrackerTest, TestPredictAnomalyTimestamp) {
tracker.predictAnomalyTimestampNs(*anomalyTracker, event3StartTimeNs));
}
TEST(OringDurationTrackerTest, TestAnomalyDetection) {
TEST(OringDurationTrackerTest, TestAnomalyDetectionExpiredAlarm) {
Alert alert;
alert.set_id(101);
alert.set_metric_id(1);
alert.set_trigger_if_sum_gt(40 * NS_PER_SEC);
alert.set_num_buckets(2);
alert.set_refractory_period_secs(1);
const int32_t refPeriodSec = 45;
alert.set_refractory_period_secs(refPeriodSec);
unordered_map<HashableDimensionKey, vector<DurationBucket>> buckets;
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
@@ -328,23 +329,86 @@ TEST(OringDurationTrackerTest, TestAnomalyDetection) {
OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, true /*nesting*/,
bucketStartTimeNs, bucketSizeNs, false, {anomalyTracker});
tracker.noteStart(DEFAULT_DIMENSION_KEY, true, eventStartTimeNs, ConditionKey());
tracker.noteStop(DEFAULT_DIMENSION_KEY, eventStartTimeNs + 10, false);
EXPECT_EQ(anomalyTracker->mLastAnomalyTimestampNs, -1);
tracker.noteStart(kEventKey1, true, eventStartTimeNs, ConditionKey());
tracker.noteStop(kEventKey1, eventStartTimeNs + 10, false);
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(eventKey), 0U);
EXPECT_TRUE(tracker.mStarted.empty());
EXPECT_EQ(10LL, tracker.mDuration);
EXPECT_EQ(0u, tracker.mStarted.size());
tracker.noteStart(DEFAULT_DIMENSION_KEY, true, eventStartTimeNs + 20, ConditionKey());
tracker.noteStart(kEventKey1, true, eventStartTimeNs + 20, ConditionKey());
EXPECT_EQ(1u, anomalyTracker->mAlarms.size());
EXPECT_EQ((long long)(51ULL * NS_PER_SEC),
(long long)(anomalyTracker->mAlarms.begin()->second->timestampSec * NS_PER_SEC));
// The alarm is set to fire at 51s, and when it does, an anomaly would be declared. However,
// because this is a unit test, the alarm won't actually fire at all. Since the alarm fails
// to fire in time, the anomaly is instead caught when noteStop is called, at around 71s.
tracker.flushIfNeeded(eventStartTimeNs + 2 * bucketSizeNs + 25, &buckets);
tracker.noteStop(DEFAULT_DIMENSION_KEY, eventStartTimeNs + 2 * bucketSizeNs + 25, false);
tracker.noteStop(kEventKey1, eventStartTimeNs + 2 * bucketSizeNs + 25, false);
EXPECT_EQ(anomalyTracker->getSumOverPastBuckets(eventKey), (long long)(bucketSizeNs));
EXPECT_EQ((long long)(eventStartTimeNs + 2 * bucketSizeNs + 25),
anomalyTracker->mLastAnomalyTimestampNs);
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(eventKey),
(eventStartTimeNs + 2 * bucketSizeNs + 25) / NS_PER_SEC + refPeriodSec);
}
TEST(OringDurationTrackerTest, TestAnomalyDetectionFiredAlarm) {
Alert alert;
alert.set_id(101);
alert.set_metric_id(1);
alert.set_trigger_if_sum_gt(40 * NS_PER_SEC);
alert.set_num_buckets(2);
const int32_t refPeriodSec = 45;
alert.set_refractory_period_secs(refPeriodSec);
unordered_map<HashableDimensionKey, vector<DurationBucket>> buckets;
sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
ConditionKey conkey;
conkey[StringToId("APP_BACKGROUND")] = kConditionKey1;
uint64_t bucketStartTimeNs = 10 * NS_PER_SEC;
uint64_t eventStartTimeNs = bucketStartTimeNs + NS_PER_SEC + 1;
uint64_t bucketSizeNs = 30 * NS_PER_SEC;
sp<DurationAnomalyTracker> anomalyTracker = new DurationAnomalyTracker(alert, kConfigKey);
OringDurationTracker tracker(kConfigKey, metricId, eventKey, wizard, 1, true /*nesting*/,
bucketStartTimeNs, bucketSizeNs, false, {anomalyTracker});
tracker.noteStart(kEventKey1, true, 15 * NS_PER_SEC, conkey); // start key1
EXPECT_EQ(1u, anomalyTracker->mAlarms.size());
sp<const AnomalyAlarm> alarm = anomalyTracker->mAlarms.begin()->second;
EXPECT_EQ((long long)(55ULL * NS_PER_SEC), (long long)(alarm->timestampSec * NS_PER_SEC));
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(eventKey), 0U);
tracker.noteStop(kEventKey1, 17 * NS_PER_SEC, false); // stop key1 (2 seconds later)
EXPECT_EQ(0u, anomalyTracker->mAlarms.size());
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(eventKey), 0U);
tracker.noteStart(kEventKey1, true, 22 * NS_PER_SEC, conkey); // start key1 again
EXPECT_EQ(1u, anomalyTracker->mAlarms.size());
alarm = anomalyTracker->mAlarms.begin()->second;
EXPECT_EQ((long long)(60ULL * NS_PER_SEC), (long long)(alarm->timestampSec * NS_PER_SEC));
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(eventKey), 0U);
tracker.noteStart(kEventKey2, true, 32 * NS_PER_SEC, conkey); // start key2
EXPECT_EQ(1u, anomalyTracker->mAlarms.size());
alarm = anomalyTracker->mAlarms.begin()->second;
EXPECT_EQ((long long)(60ULL * NS_PER_SEC), (long long)(alarm->timestampSec * NS_PER_SEC));
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(eventKey), 0U);
tracker.noteStop(kEventKey1, 47 * NS_PER_SEC, false); // stop key1
EXPECT_EQ(1u, anomalyTracker->mAlarms.size());
alarm = anomalyTracker->mAlarms.begin()->second;
EXPECT_EQ((long long)(60ULL * NS_PER_SEC), (long long)(alarm->timestampSec * NS_PER_SEC));
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(eventKey), 0U);
// Now, at 60s, which is 38s after key1 started again, we have reached 40s of 'on' time.
std::unordered_set<sp<const AnomalyAlarm>, SpHash<AnomalyAlarm>> firedAlarms({alarm});
anomalyTracker->informAlarmsFired(62 * NS_PER_SEC, firedAlarms);
EXPECT_EQ(0u, anomalyTracker->mAlarms.size());
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(eventKey), 62U + refPeriodSec);
tracker.noteStop(kEventKey2, 69 * NS_PER_SEC, false); // stop key2
EXPECT_EQ(0u, anomalyTracker->mAlarms.size());
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(eventKey), 62U + refPeriodSec);
}
} // namespace statsd

View File

@@ -249,7 +249,8 @@ TEST(ValueMetricProducerTest, TestAnomalyDetection) {
alert.set_metric_id(metricId);
alert.set_trigger_if_sum_gt(130);
alert.set_num_buckets(2);
alert.set_refractory_period_secs(3);
const int32_t refPeriodSec = 3;
alert.set_refractory_period_secs(refPeriodSec);
ValueMetric metric;
metric.set_id(metricId);
@@ -297,23 +298,28 @@ TEST(ValueMetricProducerTest, TestAnomalyDetection) {
// Two events in bucket #0.
valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event1);
valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event2);
EXPECT_EQ(anomalyTracker->getLastAnomalyTimestampNs(), -1LL); // Value sum == 30 <= 130.
// Value sum == 30 <= 130.
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_DIMENSION_KEY), 0U);
// One event in bucket #2. No alarm as bucket #0 is trashed out.
valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event3);
EXPECT_EQ(anomalyTracker->getLastAnomalyTimestampNs(), -1LL); // Value sum == 130 <= 130.
// Value sum == 130 <= 130.
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_DIMENSION_KEY), 0U);
// Three events in bucket #3.
valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event4);
// Anomaly at event 4 since Value sum == 131 > 130!
EXPECT_EQ(anomalyTracker->getLastAnomalyTimestampNs(), (long long)event4->GetTimestampNs());
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_DIMENSION_KEY),
event4->GetTimestampNs() / NS_PER_SEC + refPeriodSec);
valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event5);
// Event 5 is within 3 sec refractory period. Thus last alarm timestamp is still event4.
EXPECT_EQ(anomalyTracker->getLastAnomalyTimestampNs(), (long long)event4->GetTimestampNs());
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_DIMENSION_KEY),
event4->GetTimestampNs() / NS_PER_SEC + refPeriodSec);
valueProducer.onMatchedLogEvent(1 /*log matcher index*/, *event6);
// Anomaly at event 6 since Value sum == 160 > 130 and after refractory period.
EXPECT_EQ(anomalyTracker->getLastAnomalyTimestampNs(), (long long)event6->GetTimestampNs());
EXPECT_EQ(anomalyTracker->getRefractoryPeriodEndsSec(DEFAULT_DIMENSION_KEY),
event6->GetTimestampNs() / NS_PER_SEC + refPeriodSec);
}
} // namespace statsd