From 61e27abf280906e125c7b6ff1a6b7cf90f960b2e Mon Sep 17 00:00:00 2001 From: Chalard Jean Date: Wed, 12 Dec 2018 17:56:37 +0900 Subject: [PATCH 1/4] [MS03] Add the contract for the IPMS database. Test: Some more boilerplate that doesn't exactly need tests as such. The important thing is that the database can store and retrieve data, not that it creates as specific file or schema. Bug: 116512211 Change-Id: Ic27e706f15754b34d7bc26626a92d895a15a083d --- .../android/net/ipmemorystore/Status.java | 2 + .../ipmemorystore/IpMemoryStoreDatabase.java | 143 ++++++++++++++++++ .../ipmemorystore/IpMemoryStoreService.java | 44 +++++- .../IpMemoryStoreServiceTest.java | 6 + 4 files changed, 190 insertions(+), 5 deletions(-) create mode 100644 services/ipmemorystore/java/com/android/server/net/ipmemorystore/IpMemoryStoreDatabase.java diff --git a/core/java/android/net/ipmemorystore/Status.java b/core/java/android/net/ipmemorystore/Status.java index 5b016ec55ae1a..2cec135322adf 100644 --- a/core/java/android/net/ipmemorystore/Status.java +++ b/core/java/android/net/ipmemorystore/Status.java @@ -26,6 +26,8 @@ import android.annotation.NonNull; public class Status { public static final int SUCCESS = 0; + public static final int ERROR_DATABASE_CANNOT_BE_OPENED = -1; + public final int resultCode; public Status(final int resultCode) { diff --git a/services/ipmemorystore/java/com/android/server/net/ipmemorystore/IpMemoryStoreDatabase.java b/services/ipmemorystore/java/com/android/server/net/ipmemorystore/IpMemoryStoreDatabase.java new file mode 100644 index 0000000000000..eaab6507e8d4c --- /dev/null +++ b/services/ipmemorystore/java/com/android/server/net/ipmemorystore/IpMemoryStoreDatabase.java @@ -0,0 +1,143 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.android.server.net.ipmemorystore; + +import android.annotation.NonNull; +import android.content.Context; +import android.database.sqlite.SQLiteDatabase; +import android.database.sqlite.SQLiteOpenHelper; + +/** + * Encapsulating class for using the SQLite database backing the memory store. + * + * This class groups together the contracts and the SQLite helper used to + * use the database. + * + * @hide + */ +public class IpMemoryStoreDatabase { + /** + * Contract class for the Network Attributes table. + */ + public static class NetworkAttributesContract { + public static final String TABLENAME = "NetworkAttributes"; + + public static final String COLNAME_L2KEY = "l2Key"; + public static final String COLTYPE_L2KEY = "TEXT NOT NULL"; + + public static final String COLNAME_EXPIRYDATE = "expiryDate"; + // Milliseconds since the Epoch, in true Java style + public static final String COLTYPE_EXPIRYDATE = "BIGINT"; + + public static final String COLNAME_ASSIGNEDV4ADDRESS = "assignedV4Address"; + public static final String COLTYPE_ASSIGNEDV4ADDRESS = "INTEGER"; + + // Please note that the group hint is only a *hint*, hence its name. The client can offer + // this information to nudge the grouping in the decision it thinks is right, but it can't + // decide for the memory store what is the same L3 network. + public static final String COLNAME_GROUPHINT = "groupHint"; + public static final String COLTYPE_GROUPHINT = "TEXT"; + + public static final String COLNAME_DNSADDRESSES = "dnsAddresses"; + // Stored in marshalled form as is + public static final String COLTYPE_DNSADDRESSES = "BLOB"; + + public static final String COLNAME_MTU = "mtu"; + public static final String COLTYPE_MTU = "INTEGER"; + + public static final String CREATE_TABLE = "CREATE TABLE IF NOT EXISTS " + + TABLENAME + " (" + + COLNAME_L2KEY + " " + COLTYPE_L2KEY + " PRIMARY KEY NOT NULL, " + + COLNAME_EXPIRYDATE + " " + COLTYPE_EXPIRYDATE + ", " + + COLNAME_ASSIGNEDV4ADDRESS + " " + COLTYPE_ASSIGNEDV4ADDRESS + ", " + + COLNAME_GROUPHINT + " " + COLTYPE_GROUPHINT + ", " + + COLNAME_DNSADDRESSES + " " + COLTYPE_DNSADDRESSES + ", " + + COLNAME_MTU + " " + COLTYPE_MTU + ")"; + public static final String DROP_TABLE = "DROP TABLE IF EXISTS " + TABLENAME; + } + + /** + * Contract class for the Private Data table. + */ + public static class PrivateDataContract { + public static final String TABLENAME = "PrivateData"; + + public static final String COLNAME_L2KEY = "l2Key"; + public static final String COLTYPE_L2KEY = "TEXT NOT NULL"; + + public static final String COLNAME_CLIENT = "client"; + public static final String COLTYPE_CLIENT = "TEXT NOT NULL"; + + public static final String COLNAME_DATANAME = "dataName"; + public static final String COLTYPE_DATANAME = "TEXT NOT NULL"; + + public static final String COLNAME_DATA = "data"; + public static final String COLTYPE_DATA = "BLOB NOT NULL"; + + public static final String CREATE_TABLE = "CREATE TABLE IF NOT EXISTS " + + TABLENAME + " (" + + COLNAME_L2KEY + " " + COLTYPE_L2KEY + ", " + + COLNAME_CLIENT + " " + COLTYPE_CLIENT + ", " + + COLNAME_DATANAME + " " + COLTYPE_DATANAME + ", " + + COLNAME_DATA + " " + COLTYPE_DATA + ", " + + "PRIMARY KEY (" + + COLNAME_L2KEY + ", " + + COLNAME_CLIENT + ", " + + COLNAME_DATANAME + "))"; + public static final String DROP_TABLE = "DROP TABLE IF EXISTS " + TABLENAME; + } + + // To save memory when the DB is not used, close it after 30s of inactivity. This is + // determined manually based on what feels right. + private static final long IDLE_CONNECTION_TIMEOUT_MS = 30_000; + + /** The SQLite DB helper */ + public static class DbHelper extends SQLiteOpenHelper { + // Update this whenever changing the schema. + private static final int SCHEMA_VERSION = 1; + private static final String DATABASE_FILENAME = "IpMemoryStore.db"; + + public DbHelper(@NonNull final Context context) { + super(context, DATABASE_FILENAME, null, SCHEMA_VERSION); + setIdleConnectionTimeout(IDLE_CONNECTION_TIMEOUT_MS); + } + + /** Called when the database is created */ + public void onCreate(@NonNull final SQLiteDatabase db) { + db.execSQL(NetworkAttributesContract.CREATE_TABLE); + db.execSQL(PrivateDataContract.CREATE_TABLE); + } + + /** Called when the database is upgraded */ + public void onUpgrade(@NonNull final SQLiteDatabase db, final int oldVersion, + final int newVersion) { + // No upgrade supported yet. + db.execSQL(NetworkAttributesContract.DROP_TABLE); + db.execSQL(PrivateDataContract.DROP_TABLE); + onCreate(db); + } + + /** Called when the database is downgraded */ + public void onDowngrade(@NonNull final SQLiteDatabase db, final int oldVersion, + final int newVersion) { + // Downgrades always nuke all data and recreate an empty table. + db.execSQL(NetworkAttributesContract.DROP_TABLE); + db.execSQL(PrivateDataContract.DROP_TABLE); + onCreate(db); + } + } +} diff --git a/services/ipmemorystore/java/com/android/server/net/ipmemorystore/IpMemoryStoreService.java b/services/ipmemorystore/java/com/android/server/net/ipmemorystore/IpMemoryStoreService.java index c9759bf6170fc..98dc9884b2f56 100644 --- a/services/ipmemorystore/java/com/android/server/net/ipmemorystore/IpMemoryStoreService.java +++ b/services/ipmemorystore/java/com/android/server/net/ipmemorystore/IpMemoryStoreService.java @@ -19,6 +19,8 @@ package com.android.server.net.ipmemorystore; import android.annotation.NonNull; import android.annotation.Nullable; import android.content.Context; +import android.database.SQLException; +import android.database.sqlite.SQLiteDatabase; import android.net.IIpMemoryStore; import android.net.ipmemorystore.Blob; import android.net.ipmemorystore.IOnBlobRetrievedListener; @@ -27,6 +29,7 @@ import android.net.ipmemorystore.IOnNetworkAttributesRetrieved; import android.net.ipmemorystore.IOnSameNetworkResponseListener; import android.net.ipmemorystore.IOnStatusListener; import android.net.ipmemorystore.NetworkAttributesParcelable; +import android.util.Log; /** * Implementation for the IP memory store. @@ -37,10 +40,41 @@ import android.net.ipmemorystore.NetworkAttributesParcelable; * @hide */ public class IpMemoryStoreService extends IIpMemoryStore.Stub { - final Context mContext; + private static final String TAG = IpMemoryStoreService.class.getSimpleName(); + @NonNull + final Context mContext; + @Nullable + final SQLiteDatabase mDb; + + /** + * Construct an IpMemoryStoreService object. + * This constructor will block on disk access to open the database. + * @param context the context to access storage with. + */ public IpMemoryStoreService(@NonNull final Context context) { + // Note that constructing the service will access the disk and block + // for some time, but it should make no difference to the clients. Because + // the interface is one-way, clients fire and forget requests, and the callback + // will get called eventually in any case, and the framework will wait for the + // service to be created to deliver subsequent requests. + // Avoiding this would mean the mDb member can't be final, which means the service would + // have to test for nullity, care for failure, and allow for a wait at every single access, + // which would make the code a lot more complex and require all methods to possibly block. mContext = context; + SQLiteDatabase db; + final IpMemoryStoreDatabase.DbHelper helper = new IpMemoryStoreDatabase.DbHelper(context); + try { + db = helper.getWritableDatabase(); + if (null == db) Log.e(TAG, "Unexpected null return of getWriteableDatabase"); + } catch (final SQLException e) { + Log.e(TAG, "Can't open the Ip Memory Store database", e); + db = null; + } catch (final Exception e) { + Log.wtf(TAG, "Impossible exception Ip Memory Store database", e); + db = null; + } + mDb = db; } /** @@ -61,7 +95,7 @@ public class IpMemoryStoreService extends IIpMemoryStore.Stub { public void storeNetworkAttributes(@NonNull final String l2Key, @NonNull final NetworkAttributesParcelable attributes, @Nullable final IOnStatusListener listener) { - // TODO : implement this + // TODO : implement this. } /** @@ -79,7 +113,7 @@ public class IpMemoryStoreService extends IIpMemoryStore.Stub { public void storeBlob(@NonNull final String l2Key, @NonNull final String clientId, @NonNull final String name, @NonNull final Blob data, @Nullable final IOnStatusListener listener) { - // TODO : implement this + // TODO : implement this. } /** @@ -99,7 +133,7 @@ public class IpMemoryStoreService extends IIpMemoryStore.Stub { @Override public void findL2Key(@NonNull final NetworkAttributesParcelable attributes, @NonNull final IOnL2KeyResponseListener listener) { - // TODO : implement this + // TODO : implement this. } /** @@ -114,7 +148,7 @@ public class IpMemoryStoreService extends IIpMemoryStore.Stub { @Override public void isSameNetwork(@NonNull final String l2Key1, @NonNull final String l2Key2, @NonNull final IOnSameNetworkResponseListener listener) { - // TODO : implement this + // TODO : implement this. } /** diff --git a/tests/net/java/com/android/server/net/ipmemorystore/IpMemoryStoreServiceTest.java b/tests/net/java/com/android/server/net/ipmemorystore/IpMemoryStoreServiceTest.java index 859a54d29321e..e63c3b02d1c3d 100644 --- a/tests/net/java/com/android/server/net/ipmemorystore/IpMemoryStoreServiceTest.java +++ b/tests/net/java/com/android/server/net/ipmemorystore/IpMemoryStoreServiceTest.java @@ -16,6 +16,9 @@ package com.android.server.net.ipmemorystore; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doReturn; + import android.content.Context; import android.support.test.filters.SmallTest; import android.support.test.runner.AndroidJUnit4; @@ -26,6 +29,8 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.MockitoAnnotations; +import java.io.File; + /** Unit tests for {@link IpMemoryStoreServiceTest}. */ @SmallTest @RunWith(AndroidJUnit4.class) @@ -36,6 +41,7 @@ public class IpMemoryStoreServiceTest { @Before public void setUp() { MockitoAnnotations.initMocks(this); + doReturn(new File("/tmp/test.db")).when(mMockContext).getDatabasePath(anyString()); } @Test From 0150bd9283228cc279d4f69e48f8fa1e9d693c36 Mon Sep 17 00:00:00 2001 From: Chalard Jean Date: Thu, 13 Dec 2018 23:32:01 +0900 Subject: [PATCH 2/4] [MS04] Add an executor to the memory store. Test: atest IpMemoryStoreServiceTest atest IpMemoryStore atest ParcelableTests Bug: 116512211 Change-Id: I67797b3ff83fa9de4c813035204d4542d1fcc14e --- .../ipmemorystore/IpMemoryStoreService.java | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/services/ipmemorystore/java/com/android/server/net/ipmemorystore/IpMemoryStoreService.java b/services/ipmemorystore/java/com/android/server/net/ipmemorystore/IpMemoryStoreService.java index 98dc9884b2f56..55a72190eff4b 100644 --- a/services/ipmemorystore/java/com/android/server/net/ipmemorystore/IpMemoryStoreService.java +++ b/services/ipmemorystore/java/com/android/server/net/ipmemorystore/IpMemoryStoreService.java @@ -31,6 +31,9 @@ import android.net.ipmemorystore.IOnStatusListener; import android.net.ipmemorystore.NetworkAttributesParcelable; import android.util.Log; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + /** * Implementation for the IP memory store. * This component offers specialized services for network components to store and retrieve @@ -41,11 +44,14 @@ import android.util.Log; */ public class IpMemoryStoreService extends IIpMemoryStore.Stub { private static final String TAG = IpMemoryStoreService.class.getSimpleName(); + private static final int MAX_CONCURRENT_THREADS = 4; @NonNull final Context mContext; @Nullable final SQLiteDatabase mDb; + @NonNull + final ExecutorService mExecutor; /** * Construct an IpMemoryStoreService object. @@ -75,6 +81,37 @@ public class IpMemoryStoreService extends IIpMemoryStore.Stub { db = null; } mDb = db; + // The work-stealing thread pool executor will spawn threads as needed up to + // the max only when there is no free thread available. This generally behaves + // exactly like one would expect it intuitively : + // - When work arrives, it will spawn a new thread iff there are no available threads + // - When there is no work to do it will shutdown threads after a while (the while + // being equal to 2 seconds (not configurable) when max threads are spun up and + // twice as much for every one less thread) + // - When all threads are busy the work is enqueued and waits for any worker + // to become available. + // Because the stealing pool is made for very heavily parallel execution of + // small tasks that spawn others, it creates a queue per thread that in this + // case is overhead. However, the three behaviors above make it a superior + // choice to cached or fixedThreadPoolExecutor, neither of which can actually + // enqueue a task waiting for a thread to be free. This can probably be solved + // with judicious subclassing of ThreadPoolExecutor, but that's a lot of dangerous + // complexity for little benefit in this case. + mExecutor = Executors.newWorkStealingPool(MAX_CONCURRENT_THREADS); + } + + /** + * Shutdown the memory store service, cancelling running tasks and dropping queued tasks. + * + * This is provided to give a way to clean up, and is meant to be available in case of an + * emergency shutdown. + */ + public void shutdown() { + // By contrast with ExecutorService#shutdown, ExecutorService#shutdownNow tries + // to cancel the existing tasks, and does not wait for completion. It does not + // guarantee the threads can be terminated in any given amount of time. + mExecutor.shutdownNow(); + if (mDb != null) mDb.close(); } /** From 8fe503f192ffb405cc5983f46c4db8e49a5f09a0 Mon Sep 17 00:00:00 2001 From: Chalard Jean Date: Fri, 14 Dec 2018 00:59:53 +0900 Subject: [PATCH 3/4] [MS05] Pretty print the data classes of IPMS. Test: A commando of monkeys has been dispatched to make sure this pretty-printing is very, very pretty. Also one human has made sure of this by printing instances to the log. Bug: 116512211 Change-Id: I7df77a997a982a0b3d444502cc69de5544f70736 --- .../net/ipmemorystore/NetworkAttributes.java | 49 +++++++++++++++++++ .../ipmemorystore/SameL3NetworkResponse.java | 15 ++++++ .../android/net/ipmemorystore/Status.java | 10 ++++ .../java/android/net/ipmemorystore/Utils.java | 44 +++++++++++++++++ 4 files changed, 118 insertions(+) create mode 100644 core/java/android/net/ipmemorystore/Utils.java diff --git a/core/java/android/net/ipmemorystore/NetworkAttributes.java b/core/java/android/net/ipmemorystore/NetworkAttributes.java index d7e5b2761e587..b932d2197f854 100644 --- a/core/java/android/net/ipmemorystore/NetworkAttributes.java +++ b/core/java/android/net/ipmemorystore/NetworkAttributes.java @@ -28,6 +28,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Objects; +import java.util.StringJoiner; /** * A POD object to represent attributes of a single L2 network entry. @@ -207,4 +208,52 @@ public class NetworkAttributes { public int hashCode() { return Objects.hash(assignedV4Address, groupHint, dnsAddresses, mtu); } + + /** Pretty print */ + @Override + public String toString() { + final StringJoiner resultJoiner = new StringJoiner(" ", "{", "}"); + final ArrayList nullFields = new ArrayList<>(); + + if (null != assignedV4Address) { + resultJoiner.add("assignedV4Addr :"); + resultJoiner.add(assignedV4Address.toString()); + } else { + nullFields.add("assignedV4Addr"); + } + + if (null != groupHint) { + resultJoiner.add("groupHint :"); + resultJoiner.add(groupHint); + } else { + nullFields.add("groupHint"); + } + + if (null != dnsAddresses) { + resultJoiner.add("dnsAddr : ["); + for (final InetAddress addr : dnsAddresses) { + resultJoiner.add(addr.getHostAddress()); + } + resultJoiner.add("]"); + } else { + nullFields.add("dnsAddr"); + } + + if (null != mtu) { + resultJoiner.add("mtu :"); + resultJoiner.add(mtu.toString()); + } else { + nullFields.add("mtu"); + } + + if (!nullFields.isEmpty()) { + resultJoiner.add("; Null fields : ["); + for (final String field : nullFields) { + resultJoiner.add(field); + } + resultJoiner.add("]"); + } + + return resultJoiner.toString(); + } } diff --git a/core/java/android/net/ipmemorystore/SameL3NetworkResponse.java b/core/java/android/net/ipmemorystore/SameL3NetworkResponse.java index 0cb37e9f75b40..d040dcc3d6088 100644 --- a/core/java/android/net/ipmemorystore/SameL3NetworkResponse.java +++ b/core/java/android/net/ipmemorystore/SameL3NetworkResponse.java @@ -128,4 +128,19 @@ public class SameL3NetworkResponse { public int hashCode() { return Objects.hash(l2Key1, l2Key2, confidence); } + + @Override + /** Pretty print */ + public String toString() { + switch (getNetworkSameness()) { + case NETWORK_SAME: + return "\"" + l2Key1 + "\" same L3 network as \"" + l2Key2 + "\""; + case NETWORK_DIFFERENT: + return "\"" + l2Key1 + "\" different L3 network from \"" + l2Key2 + "\""; + case NETWORK_NEVER_CONNECTED: + return "\"" + l2Key1 + "\" can't be tested against \"" + l2Key2 + "\""; + default: + return "Buggy sameness value ? \"" + l2Key1 + "\", \"" + l2Key2 + "\""; + } + } } diff --git a/core/java/android/net/ipmemorystore/Status.java b/core/java/android/net/ipmemorystore/Status.java index 2cec135322adf..95e504224ac74 100644 --- a/core/java/android/net/ipmemorystore/Status.java +++ b/core/java/android/net/ipmemorystore/Status.java @@ -49,4 +49,14 @@ public class Status { public boolean isSuccess() { return SUCCESS == resultCode; } + + /** Pretty print */ + @Override + public String toString() { + switch (resultCode) { + case SUCCESS: return "SUCCESS"; + case ERROR_DATABASE_CANNOT_BE_OPENED: return "DATABASE CANNOT BE OPENED"; + default: return "Unknown value ?!"; + } + } } diff --git a/core/java/android/net/ipmemorystore/Utils.java b/core/java/android/net/ipmemorystore/Utils.java new file mode 100644 index 0000000000000..73d8c83acdd98 --- /dev/null +++ b/core/java/android/net/ipmemorystore/Utils.java @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package android.net.ipmemorystore; + +import android.annotation.NonNull; + +/** {@hide} */ +public class Utils { + /** Pretty print */ + public static String blobToString(final Blob blob) { + final StringBuilder sb = new StringBuilder("Blob : ["); + if (blob.data.length <= 24) { + appendByteArray(sb, blob.data, 0, blob.data.length); + } else { + appendByteArray(sb, blob.data, 0, 16); + sb.append("..."); + appendByteArray(sb, blob.data, blob.data.length - 8, blob.data.length); + } + sb.append("]"); + return sb.toString(); + } + + // Adds the hex representation of the array between the specified indices (inclusive, exclusive) + private static void appendByteArray(@NonNull final StringBuilder sb, @NonNull final byte[] ar, + final int from, final int to) { + for (int i = from; i < to; ++i) { + sb.append(String.format("%02X", ar[i])); + } + } +} From 5e5c764762233f6eff9d11658a4563f770ed88cb Mon Sep 17 00:00:00 2001 From: Chalard Jean Date: Tue, 18 Dec 2018 18:47:12 +0900 Subject: [PATCH 4/4] [MS06] Add the relevance utils. This models the relevance of the network. Test: This contains new tests, and they pass. Bug: 116512211 Change-Id: I09167532c4041a12abf749454138d56a764b22a6 --- .../net/ipmemorystore/RelevanceUtils.java | 307 ++++++++++++++++++ .../ipmemorystore/RelevanceUtilsTests.java | 149 +++++++++ 2 files changed, 456 insertions(+) create mode 100644 services/ipmemorystore/java/com/android/server/net/ipmemorystore/RelevanceUtils.java create mode 100644 tests/net/java/com/android/server/net/ipmemorystore/RelevanceUtilsTests.java diff --git a/services/ipmemorystore/java/com/android/server/net/ipmemorystore/RelevanceUtils.java b/services/ipmemorystore/java/com/android/server/net/ipmemorystore/RelevanceUtils.java new file mode 100644 index 0000000000000..aa454008958d4 --- /dev/null +++ b/services/ipmemorystore/java/com/android/server/net/ipmemorystore/RelevanceUtils.java @@ -0,0 +1,307 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.android.server.net.ipmemorystore; + +import com.android.internal.annotations.VisibleForTesting; + +/** + * A class containing the logic around the relevance value for + * IP Memory Store. + * + * @hide + */ +public class RelevanceUtils { + /** + * The relevance is a decaying value that gets lower and lower until it + * reaches 0 after some time passes. It follows an exponential decay law, + * dropping slowly at first then faster and faster, because a network is + * likely to be visited again if it was visited not long ago, and the longer + * it hasn't been visited the more likely it is that it won't be visited + * again. For example, a network visited on holiday should stay fresh for + * the duration of the holiday and persist for a while, but after the venue + * hasn't been visited for a while it should quickly be discarded. What + * should accelerate forgetting the network is extended periods without + * visits, so that occasional venues get discarded but regular visits keep + * the network relevant, even if the visits are infrequent. + * + * This function must be stable by iteration, meaning that adjusting the same value + * for different dates iteratively multiple times should give the same result. + * Formally, if f is the decay function that associates a relevance x at a date d1 + * to the value at ulterior date d3, then for any date d2 between d1 and d3 : + * f(x, d3 - d1) = f(f(x, d3 - d2), d2 - d1). Intuitively, this property simply + * means it should be the same to compute and store back the value after two months, + * or to do it once after one month, store it back, and do it again after another + * months has passed. + * The pair of the relevance and date define the entire curve, so any pair + * of values on the curve will define the same curve. Setting one of them to a + * constant, so as not to have to store it, means the other one will always suffice + * to describe the curve. For example, only storing the date for a known, constant + * value of the relevance is an efficient way of remembering this information (and + * to compare relevances together, as f is monotonically decreasing). + * + *** Choosing the function : + * Functions of the kind described above are standard exponential decay functions + * like the ones that govern atomic decay where the value at any given date can be + * computed uniformly from the value at a previous date and the time elapsed since + * that date. It is simple to picture this kind of function as one where after a + * given period of time called the half-life, the relevance value will have been + * halved. Decay of this kind is expressed in function of the previous value by + * functions like + * f(x, t) = x * F ^ (t / L) + * ...where x is the value, t is the elapsed time, L is the half-life (or more + * generally the F-th-life) and F the decay factor (typically 0.5, hence why L is + * usually called the half-life). The ^ symbol here is used for exponentiation. + * Or, starting at a given M for t = 0 : + * f(t) = M * F ^ (t / L) + * + * Because a line in the store needs to become irrelevant at some point but + * this class of functions never go to 0, a minimum cutoff has to be chosen to + * represent irrelevance. The simpler way of doing this is to simply add this + * minimum cutoff to the computation before and removing it after. + * Thus the function becomes : + * f(x, t) = ((x + K) * F ^ (t / L)) - K + * ...where K is the minimum cutoff, L the half-life, and F the factor between + * the original x and x after its half-life. Strictly speaking using the word + * "half-life" implies that F = 0.5, but the relation works for any value of F. + * + * It is easy enough to check that this function satisfies the stability + * relation that was given above for any value of F, L and K, which become + * parameters that can be defined at will. + * + * relevance + * 1.0 | + * |\ + * | \ + * | \ (this graph rendered with L = 75 days and K = 1/40) + * 0.75| ', + * | \ + * | '. + * | \. + * | \ + * 0.5 | '\ + * | ''. + * | ''. + * | ''. + * 0.25| '''.. + * | '''.. + * | ''''.... + * | '''''.......... + * 0 +-------------------------------------------------------''''''''''---- + * 0 50 100 150 200 250 300 350 400 days + * + *** Choosing the parameters + * The maximum M is an arbitrary parameter that simply scales the curve. + * The tradeoff for M is pretty simple : if the relevance is going to be an + * integer, the bigger M is the more precision there is in the relevance. + * However, values of M that are easy for humans to read are preferable to + * help debugging, and a suitably low value may be enough to ensure there + * won't be integer overflows in intermediate computations. + * A value of 1_000_000 probably is plenty for precision, while still in the + * low range of what ints can represent. + * + * F and L are parameters to be chosen arbitrarily and have an impact on how + * fast the relevance will be decaying at first, keeping in mind that + * the 400 days value and the cap stay the same. In simpler words, F and L + * define the steepness of the curve. + * To keep things simple (and familiar) F is arbitrarily chosen to be 0.5, and + * L is set to 200 days visually to achieve the desired effect. Refer to the + * illustration above to get a feel of how that feels. + * + * Moreover, the memory store works on an assumption that the relevance should + * be capped, and that an entry with capped relevance should decay in 400 days. + * This is on premises that the networks a device will need to remember the + * longest should be networks visited about once a year. + * For this reason, the relevance is at the maximum M 400 days before expiry : + * f(M, 400 days) = 0 + * From replacing this with the value of the function, K can then be derived + * from the values of M, F and L : + * (M + K) * F ^ (t / L) - K = 0 + * K = M * F ^ (400 days / L) / (1 - F ^ (400 days / L)) + * Replacing with actual values this gives : + * K = 1_000_000 * 0.5 ^ (400 / 200) / (1 - 0.5 ^ (400 / 200)) + * = 1_000_000 / 3 ≈ 333_333.3 + * This ensures the function has the desired profile, the desired value at + * cap, and the desired value at expiry. + * + *** Useful relations + * Let's define the expiry time for any given relevance x as the interval of + * time such as : + * f(x, expiry) = 0 + * which can be rewritten + * ((x + K) * F ^ (expiry / L)) = K + * ...giving an expression of the expiry in function of the relevance x as + * expiry = L * logF(K / (x + K)) + * Conversely the relevance x can be expressed in function of the expiry as + * x = K / F ^ (expiry / L) - K + * These relations are useful in utility functions. + * + *** Bumping things up + * The last issue therefore is to decide how to bump up the relevance. The + * simple approach is to simply lift up the curve a little bit by a constant + * normalized amount, delaying the time of expiry. For example increasing + * the relevance by an amount I gives : + * x2 = x1 + I + * x2 and x1 correspond to two different expiry times expiry2 and expiry1, + * and replacing x1 and x2 in the relation above with their expression in + * function of the expiry comes : + * K / F ^ (expiry2 / L) - K = K / F ^ (expiry1 / L) - K + I + * which resolves to : + * expiry2 = L * logF(K / (I + K / F ^ (expiry1 / L))) + * + * In this implementation, the bump is defined as 1/25th of the cap for + * the relevance. This means a network will be remembered for the maximum + * period of 400 days if connected 25 times in succession not accounting + * for decay. Of course decay actually happens so it will take more than 25 + * connections for any given network to actually reach the cap, but because + * decay is slow at first, it is a good estimate of how fast cap happens. + * + * Specifically, it gives the following four results : + * - A network that a device connects to once hits irrelevance about 32.7 days after + * it was first registered if never connected again. + * - A network that a device connects to once a day at a fixed hour will hit the cap + * on the 27th connection. + * - A network that a device connects to once a week at a fixed hour will hit the cap + * on the 57th connection. + * - A network that a device connects to every day for 7 straight days then never again + * expires 144 days after the last connection. + * These metrics tend to match pretty well the requirements. + */ + + // TODO : make these constants configurable at runtime. Don't forget to build it so that + // changes will wipe the database, migrate the values, or otherwise make sure the relevance + // values are still meaningful. + + // How long, in milliseconds, is a capped relevance valid for, or in other + // words how many milliseconds after its relevance was set to RELEVANCE_CAP does + // any given line expire. 400 days. + @VisibleForTesting + public static final long CAPPED_RELEVANCE_LIFETIME_MS = 400L * 24 * 60 * 60 * 1000; + + // The constant that represents a normalized 1.0 value for the relevance. In other words, + // the cap for the relevance. This is referred to as M in the explanation above. + @VisibleForTesting + public static final int CAPPED_RELEVANCE = 1_000_000; + + // The decay factor. After a half-life, the relevance will have decayed by this value. + // This is referred to as F in the explanation above. + private static final double DECAY_FACTOR = 0.5; + + // The half-life. After this time, the relevance will have decayed by a factor DECAY_FACTOR. + // This is referred to as L in the explanation above. + private static final long HALF_LIFE_MS = 200L * 24 * 60 * 60 * 1000; + + // The value of the frame change. This is referred to as K in the explanation above. + private static final double IRRELEVANCE_FLOOR = + CAPPED_RELEVANCE * powF((double) CAPPED_RELEVANCE_LIFETIME_MS / HALF_LIFE_MS) + / (1 - powF((double) CAPPED_RELEVANCE_LIFETIME_MS / HALF_LIFE_MS)); + + // How much to bump the relevance by every time a line is written to. + @VisibleForTesting + public static final int RELEVANCE_BUMP = CAPPED_RELEVANCE / 25; + + // Java doesn't include a function for the logarithm in an arbitrary base, so implement it + private static final double LOG_DECAY_FACTOR = Math.log(DECAY_FACTOR); + private static double logF(final double value) { + return Math.log(value) / LOG_DECAY_FACTOR; + } + + // Utility function to get a power of the decay factor, to simplify the code. + private static double powF(final double value) { + return Math.pow(DECAY_FACTOR, value); + } + + /** + * Compute the value of the relevance now given an expiry date. + * + * @param expiry the date at which the column in the database expires. + * @return the adjusted value of the relevance for this moment in time. + */ + public static int computeRelevanceForNow(final long expiry) { + return computeRelevanceForTargetDate(expiry, System.currentTimeMillis()); + } + + /** + * Compute the value of the relevance at a given date from an expiry date. + * + * Because relevance decays with time, a relevance in the past corresponds to + * a different relevance later. + * + * Relevance is always a positive value. 0 means not relevant at all. + * + * See the explanation at the top of this file to get the justification for this + * computation. + * + * @param expiry the date at which the column in the database expires. + * @param target the target date to adjust the relevance to. + * @return the adjusted value of the relevance for the target moment. + */ + public static int computeRelevanceForTargetDate(final long expiry, final long target) { + final long delay = expiry - target; + if (delay >= CAPPED_RELEVANCE_LIFETIME_MS) return CAPPED_RELEVANCE; + if (delay <= 0) return 0; + return (int) (IRRELEVANCE_FLOOR / powF((float) delay / HALF_LIFE_MS) - IRRELEVANCE_FLOOR); + } + + /** + * Compute the expiry duration adjusted up for a new fresh write. + * + * Every time data is written to the memory store for a given line, the + * relevance is bumped up by a certain amount, which will boost the priority + * of this line for computation of group attributes, and delay (possibly + * indefinitely, if the line is accessed regularly) forgetting the data stored + * in that line. + * As opposed to bumpExpiryDate, this function uses a duration from now to expiry. + * + * See the explanation at the top of this file for a justification of this computation. + * + * @param oldExpiryDuration the old expiry duration in milliseconds from now. + * @return the expiry duration representing a bumped up relevance value. + */ + public static long bumpExpiryDuration(final long oldExpiryDuration) { + // L * logF(K / (I + K / F ^ (expiry1 / L))), as documented above + final double divisionFactor = powF(((double) oldExpiryDuration) / HALF_LIFE_MS); + final double oldRelevance = IRRELEVANCE_FLOOR / divisionFactor; + final long newDuration = + (long) (HALF_LIFE_MS * logF(IRRELEVANCE_FLOOR / (RELEVANCE_BUMP + oldRelevance))); + return Math.min(newDuration, CAPPED_RELEVANCE_LIFETIME_MS); + } + + /** + * Compute the new expiry date adjusted up for a new fresh write. + * + * Every time data is written to the memory store for a given line, the + * relevance is bumped up by a certain amount, which will boost the priority + * of this line for computation of group attributes, and delay (possibly + * indefinitely, if the line is accessed regularly) forgetting the data stored + * in that line. + * As opposed to bumpExpiryDuration, this function takes the old timestamp and returns the + * new timestamp. + * + * {@see bumpExpiryDuration}, and keep in mind that the bump depends on when this is called, + * because the relevance decays exponentially, therefore bumping up a high relevance (for a + * date far in the future) is less potent than bumping up a low relevance (for a date in + * a close future). + * + * @param oldExpiryDate the old date of expiration. + * @return the new expiration date after the relevance bump. + */ + public static long bumpExpiryDate(final long oldExpiryDate) { + final long now = System.currentTimeMillis(); + final long newDuration = bumpExpiryDuration(oldExpiryDate - now); + return now + newDuration; + } +} diff --git a/tests/net/java/com/android/server/net/ipmemorystore/RelevanceUtilsTests.java b/tests/net/java/com/android/server/net/ipmemorystore/RelevanceUtilsTests.java new file mode 100644 index 0000000000000..8d367e2fc387c --- /dev/null +++ b/tests/net/java/com/android/server/net/ipmemorystore/RelevanceUtilsTests.java @@ -0,0 +1,149 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.android.server.net.ipmemorystore; + +import static com.android.server.net.ipmemorystore.RelevanceUtils.CAPPED_RELEVANCE; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import android.support.test.filters.SmallTest; +import android.support.test.runner.AndroidJUnit4; + +import org.junit.Test; +import org.junit.runner.RunWith; + +/** Unit tests for {@link RelevanceUtils}. */ +@SmallTest +@RunWith(AndroidJUnit4.class) +public class RelevanceUtilsTests { + @Test + public void testComputeRelevanceForTargetDate() { + final long dayInMillis = 24L * 60 * 60 * 1000; + final long base = 1_000_000L; // any given point in time + // Relevance when the network expires in 1000 years must be capped + assertEquals(CAPPED_RELEVANCE, RelevanceUtils.computeRelevanceForTargetDate( + base + 1000L * dayInMillis, base)); + // Relevance when expiry is before the date must be 0 + assertEquals(0, RelevanceUtils.computeRelevanceForTargetDate(base - 1, base)); + // Make sure the relevance for a given target date is higher if the expiry is further + // in the future + assertTrue(RelevanceUtils.computeRelevanceForTargetDate(base + 100 * dayInMillis, base) + < RelevanceUtils.computeRelevanceForTargetDate(base + 150 * dayInMillis, base)); + + // Make sure the relevance falls slower as the expiry is closing in. This is to ensure + // the decay is indeed logarithmic. + final int relevanceAtExpiry = RelevanceUtils.computeRelevanceForTargetDate(base, base); + final int relevance50DaysBeforeExpiry = + RelevanceUtils.computeRelevanceForTargetDate(base + 50 * dayInMillis, base); + final int relevance100DaysBeforeExpiry = + RelevanceUtils.computeRelevanceForTargetDate(base + 100 * dayInMillis, base); + final int relevance150DaysBeforeExpiry = + RelevanceUtils.computeRelevanceForTargetDate(base + 150 * dayInMillis, base); + assertEquals(0, relevanceAtExpiry); + assertTrue(relevance50DaysBeforeExpiry - relevanceAtExpiry + < relevance100DaysBeforeExpiry - relevance50DaysBeforeExpiry); + assertTrue(relevance100DaysBeforeExpiry - relevance50DaysBeforeExpiry + < relevance150DaysBeforeExpiry - relevance100DaysBeforeExpiry); + } + + @Test + public void testIncreaseRelevance() { + long expiry = System.currentTimeMillis(); + + final long firstBump = RelevanceUtils.bumpExpiryDate(expiry); + // Though a few milliseconds might have elapsed, the first bump should push the duration + // to days in the future, so unless this test takes literal days between these two lines, + // this should always pass. + assertTrue(firstBump > expiry); + + expiry = 0; + long lastDifference = Long.MAX_VALUE; + // The relevance should be capped in at most this many steps. Otherwise, fail. + final int steps = 1000; + for (int i = 0; i < steps; ++i) { + final long newExpiry = RelevanceUtils.bumpExpiryDuration(expiry); + if (newExpiry == expiry) { + // The relevance should be capped. Make sure it is, then exit without failure. + assertEquals(newExpiry, RelevanceUtils.CAPPED_RELEVANCE_LIFETIME_MS); + return; + } + // Make sure the new expiry is further in the future than last time. + assertTrue(newExpiry > expiry); + // Also check that it was not bumped as much as the last bump, because the + // decay must be exponential. + assertTrue(newExpiry - expiry < lastDifference); + lastDifference = newExpiry - expiry; + expiry = newExpiry; + } + fail("Relevance failed to go to the maximum value after " + steps + " bumps"); + } + + @Test + public void testContinuity() { + final long expiry = System.currentTimeMillis(); + + // Relevance at expiry and after expiry should be the cap. + final int relevanceBeforeMaxLifetime = RelevanceUtils.computeRelevanceForTargetDate(expiry, + expiry - (RelevanceUtils.CAPPED_RELEVANCE_LIFETIME_MS + 1_000_000)); + assertEquals(relevanceBeforeMaxLifetime, CAPPED_RELEVANCE); + final int relevanceForMaxLifetime = RelevanceUtils.computeRelevanceForTargetDate(expiry, + expiry - RelevanceUtils.CAPPED_RELEVANCE_LIFETIME_MS); + assertEquals(relevanceForMaxLifetime, CAPPED_RELEVANCE); + + // If the max relevance is reached at the cap lifetime, one millisecond less than this + // should be very close. Strictly speaking this is a bit brittle, but it should be + // good enough for the purposes of the memory store. + final int relevanceForOneMillisecLessThanCap = RelevanceUtils.computeRelevanceForTargetDate( + expiry, expiry - RelevanceUtils.CAPPED_RELEVANCE_LIFETIME_MS + 1); + assertTrue(relevanceForOneMillisecLessThanCap <= CAPPED_RELEVANCE); + assertTrue(relevanceForOneMillisecLessThanCap >= CAPPED_RELEVANCE - 10); + + // Likewise the relevance one millisecond before expiry should be very close to 0. It's + // fine if it rounds down to 0. + final int relevanceOneMillisecBeforeExpiry = RelevanceUtils.computeRelevanceForTargetDate( + expiry, expiry - 1); + assertTrue(relevanceOneMillisecBeforeExpiry <= 10); + assertTrue(relevanceOneMillisecBeforeExpiry >= 0); + + final int relevanceAtExpiry = RelevanceUtils.computeRelevanceForTargetDate(expiry, expiry); + assertEquals(relevanceAtExpiry, 0); + final int relevanceAfterExpiry = RelevanceUtils.computeRelevanceForTargetDate(expiry, + expiry + 1_000_000); + assertEquals(relevanceAfterExpiry, 0); + } + + // testIncreaseRelevance makes sure bumping the expiry continuously always yields a + // monotonically increasing date as a side effect, but this tests that the relevance (as + // opposed to the expiry date) increases monotonically with increasing periods. + @Test + public void testMonotonicity() { + // Hopefully the relevance is granular enough to give a different value for every one + // of this number of steps. + final int steps = 40; + final long expiry = System.currentTimeMillis(); + + int lastRelevance = -1; + for (int i = 0; i < steps; ++i) { + final long date = expiry - i * (RelevanceUtils.CAPPED_RELEVANCE_LIFETIME_MS / steps); + final int relevance = RelevanceUtils.computeRelevanceForTargetDate(expiry, date); + assertTrue(relevance > lastRelevance); + lastRelevance = relevance; + } + } +}