diff --git a/src/main/java/com/android/volley/toolbox/CacheHeader.java b/src/main/java/com/android/volley/toolbox/CacheHeader.java deleted file mode 100644 index 5d05e78b..00000000 --- a/src/main/java/com/android/volley/toolbox/CacheHeader.java +++ /dev/null @@ -1,205 +0,0 @@ -package com.android.volley.toolbox; - -import androidx.annotation.Nullable; -import com.android.volley.Cache; -import com.android.volley.Header; -import com.android.volley.VolleyLog; -import java.io.IOException; -import java.io.OutputStream; -import java.nio.BufferUnderflowException; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.List; - -/** Handles holding onto the cache headers for an entry. */ -class CacheHeader { - /** Magic number for current version of cache file format. */ - private static final int CACHE_MAGIC = 0x20150306; - - /** Bits required to write 6 longs and 1 int. */ - private static final int HEADER_SIZE = 52; - - /** - * The size of the data identified by this CacheHeader on disk (both header and data). - * - *

Must be set by the caller after it has been calculated. - * - *

This is not serialized to disk. - */ - long size; - - /** The key that identifies the cache entry. */ - final String key; - - /** ETag for cache coherence. */ - @Nullable final String etag; - - /** Date of this response as reported by the server. */ - final long serverDate; - - /** The last modified date for the requested object. */ - final long lastModified; - - /** TTL for this record. */ - final long ttl; - - /** Soft TTL for this record. */ - final long softTtl; - - /** Headers from the response resulting in this cache entry. */ - final List

allResponseHeaders; - - private CacheHeader( - String key, - String etag, - long serverDate, - long lastModified, - long ttl, - long softTtl, - List
allResponseHeaders) { - this.key = key; - this.etag = "".equals(etag) ? null : etag; - this.serverDate = serverDate; - this.lastModified = lastModified; - this.ttl = ttl; - this.softTtl = softTtl; - this.allResponseHeaders = allResponseHeaders; - } - - /** - * Instantiates a new CacheHeader object. - * - * @param key The key that identifies the cache entry - * @param entry The cache entry. - */ - CacheHeader(String key, Cache.Entry entry) { - this( - key, - entry.etag, - entry.serverDate, - entry.lastModified, - entry.ttl, - entry.softTtl, - getAllResponseHeaders(entry)); - } - - private static List
getAllResponseHeaders(Cache.Entry entry) { - // If the entry contains all the response headers, use that field directly. - if (entry.allResponseHeaders != null) { - return entry.allResponseHeaders; - } - - // Legacy fallback - copy headers from the map. - return HttpHeaderParser.toAllHeaderList(entry.responseHeaders); - } - - /** - * Reads the header from a CountingInputStream and returns a CacheHeader object. - * - * @param is The InputStream to read from. - * @throws IOException if fails to read header - */ - static CacheHeader readHeader(DiskBasedCache.CountingInputStream is) throws IOException { - int magic = DiskBasedCacheUtility.readInt(is); - if (magic != CACHE_MAGIC) { - // don't bother deleting, it'll get pruned eventually - throw new IOException(); - } - String key = DiskBasedCacheUtility.readString(is); - String etag = DiskBasedCacheUtility.readString(is); - long serverDate = DiskBasedCacheUtility.readLong(is); - long lastModified = DiskBasedCacheUtility.readLong(is); - long ttl = DiskBasedCacheUtility.readLong(is); - long softTtl = DiskBasedCacheUtility.readLong(is); - List
allResponseHeaders = DiskBasedCacheUtility.readHeaderList(is); - return new CacheHeader( - key, etag, serverDate, lastModified, ttl, softTtl, allResponseHeaders); - } - - /** - * Reads the header from a ByteBuffer and returns a CacheHeader object. - * - * @param buffer Buffer to get header info from. - * @throws IOException if fails to read header - */ - @Nullable - static CacheHeader readHeader(final ByteBuffer buffer) { - try { - int magic = buffer.getInt(); - if (magic != CACHE_MAGIC) { - return null; - } - String key = DiskBasedCacheUtility.readString(buffer); - String etag = DiskBasedCacheUtility.readString(buffer); - long serverDate = buffer.getLong(); - long lastModified = buffer.getLong(); - long ttl = buffer.getLong(); - long softTtl = buffer.getLong(); - List
allResponseHeaders = DiskBasedCacheUtility.readHeaderList(buffer); - return new CacheHeader( - key, etag, serverDate, lastModified, ttl, softTtl, allResponseHeaders); - } catch (IOException e) { - VolleyLog.e(e, "Failed to read CacheHeader"); - return null; - } catch (BufferUnderflowException e) { - VolleyLog.e(e, "Ran out of room while reading from the buffer"); - return null; - } - } - - /** Creates a cache entry for the specified data. */ - Cache.Entry toCacheEntry(byte[] data) { - Cache.Entry e = new Cache.Entry(); - e.data = data; - e.etag = etag; - e.serverDate = serverDate; - e.lastModified = lastModified; - e.ttl = ttl; - e.softTtl = softTtl; - e.responseHeaders = HttpHeaderParser.toHeaderMap(allResponseHeaders); - e.allResponseHeaders = Collections.unmodifiableList(allResponseHeaders); - return e; - } - - /** Writes the contents of this CacheHeader to the specified OutputStream. */ - boolean writeHeader(OutputStream os) { - try { - DiskBasedCacheUtility.writeInt(os, CACHE_MAGIC); - DiskBasedCacheUtility.writeString(os, key); - DiskBasedCacheUtility.writeString(os, etag == null ? "" : etag); - DiskBasedCacheUtility.writeLong(os, serverDate); - DiskBasedCacheUtility.writeLong(os, lastModified); - DiskBasedCacheUtility.writeLong(os, ttl); - DiskBasedCacheUtility.writeLong(os, softTtl); - DiskBasedCacheUtility.writeHeaderList(allResponseHeaders, os); - os.flush(); - return true; - } catch (IOException e) { - VolleyLog.d("%s", e.toString()); - return false; - } - } - - /** Writes the contents of this CacheHeader to the specified ByteBuffer. */ - void writeHeader(ByteBuffer buffer) throws IOException { - buffer.putInt(CACHE_MAGIC); - DiskBasedCacheUtility.writeString(buffer, key); - DiskBasedCacheUtility.writeString(buffer, etag); - buffer.putLong(serverDate); - buffer.putLong(lastModified); - buffer.putLong(ttl); - buffer.putLong(softTtl); - DiskBasedCacheUtility.writeHeaderList(allResponseHeaders, buffer); - } - - /** Gets the size of the header in bytes. */ - int getHeaderSize() throws IOException { - int size = 0; - size += key.getBytes("UTF-8").length; - if (etag != null) { - size += etag.getBytes("UTF-8").length; - } - size += DiskBasedCacheUtility.headerListSize(allResponseHeaders); - return size + HEADER_SIZE; - } -} diff --git a/src/main/java/com/android/volley/toolbox/DiskBasedAsyncCache.java b/src/main/java/com/android/volley/toolbox/DiskBasedAsyncCache.java deleted file mode 100644 index 18112a9d..00000000 --- a/src/main/java/com/android/volley/toolbox/DiskBasedAsyncCache.java +++ /dev/null @@ -1,462 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.android.volley.toolbox; - -import android.os.Build; -import android.text.TextUtils; -import androidx.annotation.Nullable; -import androidx.annotation.RequiresApi; -import com.android.volley.AsyncCache; -import com.android.volley.Cache; -import com.android.volley.VolleyLog; -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.AsynchronousFileChannel; -import java.nio.channels.CompletionHandler; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.StandardOpenOption; -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletableFuture; - -/** - * AsyncCache implementation that uses Java NIO's AsynchronousFileChannel to perform asynchronous - * disk reads and writes. - */ -@RequiresApi(Build.VERSION_CODES.O) -public class DiskBasedAsyncCache extends AsyncCache { - - /** Map of the Key, CacheHeader pairs */ - private final Map mEntries = new LinkedHashMap<>(16, .75f, true); - - /** The supplier for the root directory to use for the cache. */ - private final FileSupplier mRootDirectorySupplier; - - /** Total amount of space currently used by the cache in bytes. */ - private long mTotalSize = 0; - - /** The maximum size of the cache in bytes. */ - private final int mMaxCacheSizeInBytes; - - /** - * Constructs an instance of the DiskBasedAsyncCache at the specified directory. - * - * @param rootDirectorySupplier The root directory supplier of the cache. - */ - private DiskBasedAsyncCache(FileSupplier rootDirectorySupplier, int maxCacheSizeInBytes) { - mRootDirectorySupplier = rootDirectorySupplier; - mMaxCacheSizeInBytes = maxCacheSizeInBytes; - } - - /** Returns the cache entry with the specified key if it exists, null otherwise. */ - @Override - public void get(final String key, final OnGetCompleteCallback callback) { - final CacheHeader entry = mEntries.get(key); - // if the entry does not exist, return null. - if (entry == null) { - callback.onGetComplete(null); - return; - } - final File file = DiskBasedCacheUtility.getFileForKey(key, mRootDirectorySupplier); - Path path = Paths.get(file.getPath()); - - // channel we can close after IOException - AsynchronousFileChannel channel = null; - try { - final AsynchronousFileChannel afc = - AsynchronousFileChannel.open(path, StandardOpenOption.READ); - channel = afc; - final int size = (int) file.length(); - final ByteBuffer buffer = ByteBuffer.allocate(size); - afc.read( - /* destination= */ buffer, - /* position= */ 0, - /* attachment= */ null, - new CompletionHandler() { - @Override - public void completed(Integer result, Void v) { - closeChannel(afc, "completed read"); - if (size != result) { - VolleyLog.e( - "File changed while reading: %s", file.getAbsolutePath()); - deleteFileAndInvokeCallback(key, callback, file); - return; - } - buffer.flip(); - CacheHeader entryOnDisk = CacheHeader.readHeader(buffer); - if (entryOnDisk == null) { - // BufferUnderflowException was thrown while reading header - deleteFileAndInvokeCallback(key, callback, file); - } else if (!TextUtils.equals(key, entryOnDisk.key)) { - // File shared by two keys and holds data for a different entry! - VolleyLog.d( - "%s: key=%s, found=%s", - file.getAbsolutePath(), key, entryOnDisk.key); - deleteFileAndInvokeCallback(key, callback, file); - } else { - byte[] data = new byte[buffer.remaining()]; - buffer.get(data); - callback.onGetComplete(entry.toCacheEntry(data)); - } - } - - @Override - public void failed(Throwable exc, Void ignore) { - closeChannel(afc, "failed read"); - VolleyLog.e(exc, "Failed to read file %s", file.getAbsolutePath()); - deleteFileAndInvokeCallback(key, callback, file); - } - }); - } catch (IOException e) { - VolleyLog.e(e, "Failed to read file %s", file.getAbsolutePath()); - closeChannel(channel, "IOException"); - deleteFileAndInvokeCallback(key, callback, file); - } - } - - /** Puts the cache entry with a specified key into the cache. */ - @Override - public void put(final String key, Cache.Entry entry, final OnWriteCompleteCallback callback) { - if (DiskBasedCacheUtility.wouldBePruned( - mTotalSize, entry.data.length, mMaxCacheSizeInBytes)) { - return; - } - - final File file = DiskBasedCacheUtility.getFileForKey(key, mRootDirectorySupplier); - Path path = Paths.get(file.getPath()); - - // channel we can close after IOException - AsynchronousFileChannel channel = null; - try { - final AsynchronousFileChannel afc = - AsynchronousFileChannel.open( - path, StandardOpenOption.WRITE, StandardOpenOption.CREATE); - channel = afc; - final CacheHeader header = new CacheHeader(key, entry); - int headerSize = header.getHeaderSize(); - final int size = entry.data.length + headerSize; - ByteBuffer buffer = ByteBuffer.allocate(size); - header.writeHeader(buffer); - buffer.put(entry.data); - buffer.flip(); - afc.write( - /* source= */ buffer, - /* position= */ 0, - /* attachment= */ null, - new CompletionHandler() { - @Override - public void completed(Integer resultLen, Void ignore) { - if (closeChannel(afc, "completed write")) { - if (resultLen != size) { - VolleyLog.e( - "File changed while writing: %s", - file.getAbsolutePath()); - deleteFile(file); - callback.onWriteComplete(); - return; - } - header.size = resultLen; - mTotalSize = - DiskBasedCacheUtility.putEntry( - key, header, mTotalSize, mEntries); - mTotalSize = - DiskBasedCacheUtility.pruneIfNeeded( - mTotalSize, - mMaxCacheSizeInBytes, - mEntries, - mRootDirectorySupplier); - } else { - deleteFile(file); - } - - callback.onWriteComplete(); - } - - @Override - public void failed(Throwable throwable, Void ignore) { - VolleyLog.e( - throwable, "Failed to read file %s", file.getAbsolutePath()); - deleteFile(file); - callback.onWriteComplete(); - closeChannel(afc, "failed read"); - } - }); - } catch (IOException e) { - if (closeChannel(channel, "IOException")) { - deleteFile(file); - initializeIfRootDirectoryDeleted(); - } - callback.onWriteComplete(); - } - } - - /** Clears the cache. Deletes all cached files from disk. */ - @Override - public void clear(OnWriteCompleteCallback callback) { - File[] files = mRootDirectorySupplier.get().listFiles(); - if (files != null) { - for (File file : files) { - deleteFile(file); - } - } - mEntries.clear(); - mTotalSize = 0; - VolleyLog.d("Cache cleared."); - callback.onWriteComplete(); - } - - /** - * Initializes the cache. We are suppressing warnings, since we create the futures above and - * there is no chance this fails. - */ - @Override - @SuppressWarnings("FutureReturnValueIgnored") - public void initialize(final OnWriteCompleteCallback callback) { - File rootDirectory = mRootDirectorySupplier.get(); - if (!rootDirectory.exists()) { - createCacheDirectory(rootDirectory); - callback.onWriteComplete(); - return; - } - File[] files = rootDirectory.listFiles(); - if (files == null) { - callback.onWriteComplete(); - return; - } - List> reads = new ArrayList<>(); - for (File file : files) { - Path path = file.toPath(); - AsynchronousFileChannel channel = null; - final int entrySize = (int) file.length(); - final ByteBuffer buffer = ByteBuffer.allocate(entrySize); - final CompletableFuture fileRead = new CompletableFuture<>(); - try { - channel = AsynchronousFileChannel.open(path, StandardOpenOption.READ); - final AsynchronousFileChannel afc = channel; - afc.read( - buffer, - 0, - file, - new CompletionHandler() { - @Override - public void completed(Integer result, File file) { - if (entrySize != result) { - VolleyLog.e( - "File changed while reading: %s", - file.getAbsolutePath()); - deleteFile(file); - fileRead.complete(null); - return; - } - buffer.flip(); - CacheHeader entry = CacheHeader.readHeader(buffer); - if (entry != null) { - closeChannel(afc, "after successful read"); - entry.size = entrySize; - mTotalSize = - DiskBasedCacheUtility.putEntry( - entry.key, entry, mTotalSize, mEntries); - } else { - closeChannel(afc, "after failed read"); - deleteFile(file); - } - fileRead.complete(null); - } - - @Override - public void failed(Throwable throwable, File file) { - closeChannel(afc, "after failed read"); - deleteFile(file); - fileRead.complete(null); - } - }); - } catch (IOException e) { - closeChannel(channel, "IOException in initialize"); - deleteFile(file); - } - reads.add(fileRead); - } - CompletableFuture voidCompletableFuture = - CompletableFuture.allOf(reads.toArray(new CompletableFuture[0])); - - voidCompletableFuture.thenRun( - new Runnable() { - @Override - public void run() { - callback.onWriteComplete(); - } - }); - } - - /** Invalidates an entry in the cache. */ - @Override - public void invalidate( - final String key, final boolean fullExpire, final OnWriteCompleteCallback callback) { - Cache.Entry entry = null; - get( - key, - new OnGetCompleteCallback() { - @Override - public void onGetComplete(@Nullable Cache.Entry entry) { - if (entry == null) { - callback.onWriteComplete(); - } else { - entry.softTtl = 0; - if (fullExpire) { - entry.ttl = 0; - } - put( - key, - entry, - new OnWriteCompleteCallback() { - @Override - public void onWriteComplete() { - callback.onWriteComplete(); - } - }); - } - } - }); - } - - /** Removes an entry from the cache. */ - @Override - public void remove(String key, OnWriteCompleteCallback callback) { - deleteFile(DiskBasedCacheUtility.getFileForKey(key, mRootDirectorySupplier)); - mTotalSize = DiskBasedCacheUtility.removeEntry(key, mTotalSize, mEntries); - callback.onWriteComplete(); - } - - /** Re-initialize the cache if the directory was deleted. */ - private void initializeIfRootDirectoryDeleted() { - if (mRootDirectorySupplier.get().exists()) { - return; - } - VolleyLog.d("Re-initializing cache after external clearing."); - mEntries.clear(); - mTotalSize = 0; - createCacheDirectory(mRootDirectorySupplier.get()); - } - - /** - * Closes the asynchronous file channel. - * - * @param afc Channel that is being closed. - * @param endOfMessage End of error message that logs where the close is happening. - * @return Returns true if the channel is successfully closed or the channel was null, fails if - * close results in an IOException. - */ - private boolean closeChannel(@Nullable AsynchronousFileChannel afc, String endOfMessage) { - if (afc == null) { - return true; - } - try { - afc.close(); - return true; - } catch (IOException e) { - VolleyLog.e(e, "failed to close file after %s", endOfMessage); - return false; - } - } - - /** Deletes the specified file, and reinitializes the root if it was deleted. */ - private void deleteFile(File file) { - boolean deleted = file.delete(); - if (!deleted) { - VolleyLog.d("Could not clean up file %s", file.getAbsolutePath()); - } - } - - /** Attempts to create the root directory, logging if it was unable to. */ - private void createCacheDirectory(File rootDirectory) { - if (!rootDirectory.mkdirs()) { - VolleyLog.e("Unable to create cache dir %s", rootDirectory.getAbsolutePath()); - } - } - - /** - * Deletes the file, removes the entry from the map, and calls OnGetComplete with a null value. - * - * @param key of the file to be removed. - * @param callback to be called after removing. - * @param file to be deleted. - */ - private void deleteFileAndInvokeCallback( - String key, OnGetCompleteCallback callback, File file) { - deleteFile(file); - mTotalSize = DiskBasedCacheUtility.removeEntry(key, mTotalSize, mEntries); - callback.onGetComplete(null); - } - - /** - * Builder is used to build an instance of {@link DiskBasedAsyncCache} from values configured by - * the setters. - */ - public static class Builder { - @Nullable private FileSupplier rootDirectorySupplier = null; - @Nullable private File rootDirectory = null; - private int maxCacheSizeInBytes = DiskBasedCacheUtility.DEFAULT_DISK_USAGE_BYTES; - - /** - * Sets the root directory of the cache. Must be called if {@link - * Builder#setRootDirectorySupplier(FileSupplier)} is not. - */ - public Builder setRootDirectory(File rootDirectory) { - this.rootDirectory = rootDirectory; - return this; - } - - /** - * Sets the root directory supplier of the cache. Must be called if {@link - * Builder#setRootDirectory(File)} is not. - */ - public Builder setRootDirectorySupplier(FileSupplier rootDirectorySupplier) { - this.rootDirectorySupplier = rootDirectorySupplier; - return this; - } - - /** - * Sets the max size of the cache in bytes. Will default to {@link - * DiskBasedCacheUtility#DEFAULT_DISK_USAGE_BYTES} if not called. - */ - public Builder setMaxCacheSizeInBytes(int maxCacheSizeInBytes) { - this.maxCacheSizeInBytes = maxCacheSizeInBytes; - return this; - } - - /** Builds a DiskBasedAsyncCache from the provided parameters. */ - public DiskBasedAsyncCache build() { - if (rootDirectory == null && rootDirectorySupplier == null) { - throw new IllegalArgumentException("Must set either file or supplier"); - } - if (rootDirectorySupplier == null) { - rootDirectorySupplier = - new FileSupplier() { - @Override - public File get() { - return rootDirectory; - } - }; - } - return new DiskBasedAsyncCache(rootDirectorySupplier, maxCacheSizeInBytes); - } - } -} diff --git a/src/main/java/com/android/volley/toolbox/DiskBasedCache.java b/src/main/java/com/android/volley/toolbox/DiskBasedCache.java index ade5dadf..d4310e0a 100644 --- a/src/main/java/com/android/volley/toolbox/DiskBasedCache.java +++ b/src/main/java/com/android/volley/toolbox/DiskBasedCache.java @@ -16,13 +16,16 @@ package com.android.volley.toolbox; +import android.os.SystemClock; import android.text.TextUtils; import androidx.annotation.VisibleForTesting; import com.android.volley.Cache; +import com.android.volley.Header; import com.android.volley.VolleyLog; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.DataInputStream; +import java.io.EOFException; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; @@ -31,7 +34,11 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; /** @@ -54,6 +61,15 @@ public class DiskBasedCache implements Cache { /** The maximum size of the cache in bytes. */ private final int mMaxCacheSizeInBytes; + /** Default maximum disk usage in bytes. */ + private static final int DEFAULT_DISK_USAGE_BYTES = 5 * 1024 * 1024; + + /** High water mark percentage for the cache */ + @VisibleForTesting static final float HYSTERESIS_FACTOR = 0.9f; + + /** Magic number for current version of cache file format. */ + private static final int CACHE_MAGIC = 0x20150306; + /** * Constructs an instance of the DiskBasedCache at the specified directory. * @@ -93,7 +109,7 @@ public DiskBasedCache(FileSupplier rootDirectorySupplier, int maxCacheSizeInByte * @param rootDirectory The root directory of the cache. */ public DiskBasedCache(File rootDirectory) { - this(rootDirectory, DiskBasedCacheUtility.DEFAULT_DISK_USAGE_BYTES); + this(rootDirectory, DEFAULT_DISK_USAGE_BYTES); } /** @@ -103,7 +119,7 @@ public DiskBasedCache(File rootDirectory) { * @param rootDirectorySupplier The supplier for the root directory of the cache. */ public DiskBasedCache(FileSupplier rootDirectorySupplier) { - this(rootDirectorySupplier, DiskBasedCacheUtility.DEFAULT_DISK_USAGE_BYTES); + this(rootDirectorySupplier, DEFAULT_DISK_USAGE_BYTES); } /** Clears the cache. Deletes all cached files from disk. */ @@ -128,7 +144,7 @@ public synchronized Entry get(String key) { if (entry == null) { return null; } - File file = DiskBasedCacheUtility.getFileForKey(key, mRootDirectorySupplier); + File file = getFileForKey(key); try { CountingInputStream cis = new CountingInputStream( @@ -140,7 +156,7 @@ public synchronized Entry get(String key) { VolleyLog.d( "%s: key=%s, found=%s", file.getAbsolutePath(), key, entryOnDisk.key); // Remove key whose contents on disk have been replaced. - mTotalSize = DiskBasedCacheUtility.removeEntry(key, mTotalSize, mEntries); + removeEntry(key); return null; } byte[] data = streamToBytes(cis, cis.bytesRemaining()); @@ -183,8 +199,7 @@ public synchronized void initialize() { try { CacheHeader entry = CacheHeader.readHeader(cis); entry.size = entrySize; - mTotalSize = - DiskBasedCacheUtility.putEntry(entry.key, entry, mTotalSize, mEntries); + putEntry(entry.key, entry); } finally { // Any IOException thrown here is handled by the below catch block by design. //noinspection ThrowFromFinallyBlock @@ -218,12 +233,15 @@ public synchronized void invalidate(String key, boolean fullExpire) { /** Puts the entry with the specified key into the cache. */ @Override public synchronized void put(String key, Entry entry) { - if (DiskBasedCacheUtility.wouldBePruned( - mTotalSize, entry.data.length, mMaxCacheSizeInBytes)) { + // If adding this entry would trigger a prune, but pruning would cause the new entry to be + // deleted, then skip writing the entry in the first place, as this is just churn. + // Note that we don't include the cache header overhead in this calculation for simplicity, + // so putting entries which are just below the threshold may still cause this churn. + if (mTotalSize + entry.data.length > mMaxCacheSizeInBytes + && entry.data.length > mMaxCacheSizeInBytes * HYSTERESIS_FACTOR) { return; } - - File file = DiskBasedCacheUtility.getFileForKey(key, mRootDirectorySupplier); + File file = getFileForKey(key); try { BufferedOutputStream fos = new BufferedOutputStream(createOutputStream(file)); CacheHeader e = new CacheHeader(key, entry); @@ -236,10 +254,8 @@ public synchronized void put(String key, Entry entry) { fos.write(entry.data); fos.close(); e.size = file.length(); - mTotalSize = DiskBasedCacheUtility.putEntry(key, e, mTotalSize, mEntries); - mTotalSize = - DiskBasedCacheUtility.pruneIfNeeded( - mTotalSize, mMaxCacheSizeInBytes, mEntries, mRootDirectorySupplier); + putEntry(key, e); + pruneIfNeeded(); } catch (IOException e) { boolean deleted = file.delete(); if (!deleted) { @@ -252,21 +268,31 @@ public synchronized void put(String key, Entry entry) { /** Removes the specified key from the cache if it exists. */ @Override public synchronized void remove(String key) { - boolean deleted = DiskBasedCacheUtility.getFileForKey(key, mRootDirectorySupplier).delete(); - mTotalSize = DiskBasedCacheUtility.removeEntry(key, mTotalSize, mEntries); + boolean deleted = getFileForKey(key).delete(); + removeEntry(key); if (!deleted) { VolleyLog.d( "Could not delete cache entry for key=%s, filename=%s", - key, DiskBasedCacheUtility.getFilenameForKey(key)); + key, getFilenameForKey(key)); } } - /** Represents a supplier for {@link File}s. */ - public interface FileSupplier extends com.android.volley.toolbox.FileSupplier {} + /** + * Creates a pseudo-unique filename for the specified cache key. + * + * @param key The key to generate a file name for. + * @return A pseudo-unique filename. + */ + private String getFilenameForKey(String key) { + int firstHalfLength = key.length() / 2; + String localFilename = String.valueOf(key.substring(0, firstHalfLength).hashCode()); + localFilename += String.valueOf(key.substring(firstHalfLength).hashCode()); + return localFilename; + } /** Returns a file object for the given cache key. */ public File getFileForKey(String key) { - return new File(mRootDirectorySupplier.get(), DiskBasedCacheUtility.getFilenameForKey(key)); + return new File(mRootDirectorySupplier.get(), getFilenameForKey(key)); } /** Re-initialize the cache if the directory was deleted. */ @@ -279,6 +305,75 @@ private void initializeIfRootDirectoryDeleted() { } } + /** Represents a supplier for {@link File}s. */ + public interface FileSupplier { + File get(); + } + + /** Prunes the cache to fit the maximum size. */ + private void pruneIfNeeded() { + if (mTotalSize < mMaxCacheSizeInBytes) { + return; + } + if (VolleyLog.DEBUG) { + VolleyLog.v("Pruning old cache entries."); + } + + long before = mTotalSize; + int prunedFiles = 0; + long startTime = SystemClock.elapsedRealtime(); + + Iterator> iterator = mEntries.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry entry = iterator.next(); + CacheHeader e = entry.getValue(); + boolean deleted = getFileForKey(e.key).delete(); + if (deleted) { + mTotalSize -= e.size; + } else { + VolleyLog.d( + "Could not delete cache entry for key=%s, filename=%s", + e.key, getFilenameForKey(e.key)); + } + iterator.remove(); + prunedFiles++; + + if (mTotalSize < mMaxCacheSizeInBytes * HYSTERESIS_FACTOR) { + break; + } + } + + if (VolleyLog.DEBUG) { + VolleyLog.v( + "pruned %d files, %d bytes, %d ms", + prunedFiles, (mTotalSize - before), SystemClock.elapsedRealtime() - startTime); + } + } + + /** + * Puts the entry with the specified key into the cache. + * + * @param key The key to identify the entry by. + * @param entry The entry to cache. + */ + private void putEntry(String key, CacheHeader entry) { + if (!mEntries.containsKey(key)) { + mTotalSize += entry.size; + } else { + CacheHeader oldEntry = mEntries.get(key); + mTotalSize += (entry.size - oldEntry.size); + } + mEntries.put(key, entry); + } + + /** Removes the entry identified by 'key' from the cache. */ + private void removeEntry(String key) { + CacheHeader removed = mEntries.remove(key); + if (removed != null) { + mTotalSize -= removed.size; + } + } + /** * Reads length bytes from CountingInputStream into byte array. * @@ -286,6 +381,7 @@ private void initializeIfRootDirectoryDeleted() { * @param length number of bytes to read * @throws IOException if fails to read all bytes */ + @VisibleForTesting static byte[] streamToBytes(CountingInputStream cis, long length) throws IOException { long maxLength = cis.bytesRemaining(); // Length cannot be negative or greater than bytes remaining, and must not overflow int. @@ -307,6 +403,140 @@ OutputStream createOutputStream(File file) throws FileNotFoundException { return new FileOutputStream(file); } + /** Handles holding onto the cache headers for an entry. */ + @VisibleForTesting + static class CacheHeader { + /** + * The size of the data identified by this CacheHeader on disk (both header and data). + * + *

Must be set by the caller after it has been calculated. + * + *

This is not serialized to disk. + */ + long size; + + /** The key that identifies the cache entry. */ + final String key; + + /** ETag for cache coherence. */ + final String etag; + + /** Date of this response as reported by the server. */ + final long serverDate; + + /** The last modified date for the requested object. */ + final long lastModified; + + /** TTL for this record. */ + final long ttl; + + /** Soft TTL for this record. */ + final long softTtl; + + /** Headers from the response resulting in this cache entry. */ + final List

allResponseHeaders; + + private CacheHeader( + String key, + String etag, + long serverDate, + long lastModified, + long ttl, + long softTtl, + List
allResponseHeaders) { + this.key = key; + this.etag = "".equals(etag) ? null : etag; + this.serverDate = serverDate; + this.lastModified = lastModified; + this.ttl = ttl; + this.softTtl = softTtl; + this.allResponseHeaders = allResponseHeaders; + } + + /** + * Instantiates a new CacheHeader object. + * + * @param key The key that identifies the cache entry + * @param entry The cache entry. + */ + CacheHeader(String key, Entry entry) { + this( + key, + entry.etag, + entry.serverDate, + entry.lastModified, + entry.ttl, + entry.softTtl, + getAllResponseHeaders(entry)); + } + + private static List
getAllResponseHeaders(Entry entry) { + // If the entry contains all the response headers, use that field directly. + if (entry.allResponseHeaders != null) { + return entry.allResponseHeaders; + } + + // Legacy fallback - copy headers from the map. + return HttpHeaderParser.toAllHeaderList(entry.responseHeaders); + } + + /** + * Reads the header from a CountingInputStream and returns a CacheHeader object. + * + * @param is The InputStream to read from. + * @throws IOException if fails to read header + */ + static CacheHeader readHeader(CountingInputStream is) throws IOException { + int magic = readInt(is); + if (magic != CACHE_MAGIC) { + // don't bother deleting, it'll get pruned eventually + throw new IOException(); + } + String key = readString(is); + String etag = readString(is); + long serverDate = readLong(is); + long lastModified = readLong(is); + long ttl = readLong(is); + long softTtl = readLong(is); + List
allResponseHeaders = readHeaderList(is); + return new CacheHeader( + key, etag, serverDate, lastModified, ttl, softTtl, allResponseHeaders); + } + + /** Creates a cache entry for the specified data. */ + Entry toCacheEntry(byte[] data) { + Entry e = new Entry(); + e.data = data; + e.etag = etag; + e.serverDate = serverDate; + e.lastModified = lastModified; + e.ttl = ttl; + e.softTtl = softTtl; + e.responseHeaders = HttpHeaderParser.toHeaderMap(allResponseHeaders); + e.allResponseHeaders = Collections.unmodifiableList(allResponseHeaders); + return e; + } + + /** Writes the contents of this CacheHeader to the specified OutputStream. */ + boolean writeHeader(OutputStream os) { + try { + writeInt(os, CACHE_MAGIC); + writeString(os, key); + writeString(os, etag == null ? "" : etag); + writeLong(os, serverDate); + writeLong(os, lastModified); + writeLong(os, ttl); + writeLong(os, softTtl); + writeHeaderList(allResponseHeaders, os); + os.flush(); + return true; + } catch (IOException e) { + VolleyLog.d("%s", e.toString()); + return false; + } + } + } + @VisibleForTesting static class CountingInputStream extends FilterInputStream { private final long length; @@ -344,4 +574,104 @@ long bytesRemaining() { return length - bytesRead; } } + + /* + * Homebrewed simple serialization system used for reading and writing cache + * headers on disk. Once upon a time, this used the standard Java + * Object{Input,Output}Stream, but the default implementation relies heavily + * on reflection (even for standard types) and generates a ton of garbage. + * + * TODO: Replace by standard DataInput and DataOutput in next cache version. + */ + + /** + * Simple wrapper around {@link InputStream#read()} that throws EOFException instead of + * returning -1. + */ + private static int read(InputStream is) throws IOException { + int b = is.read(); + if (b == -1) { + throw new EOFException(); + } + return b; + } + + static void writeInt(OutputStream os, int n) throws IOException { + os.write((n >> 0) & 0xff); + os.write((n >> 8) & 0xff); + os.write((n >> 16) & 0xff); + os.write((n >> 24) & 0xff); + } + + static int readInt(InputStream is) throws IOException { + int n = 0; + n |= (read(is) << 0); + n |= (read(is) << 8); + n |= (read(is) << 16); + n |= (read(is) << 24); + return n; + } + + static void writeLong(OutputStream os, long n) throws IOException { + os.write((byte) (n >>> 0)); + os.write((byte) (n >>> 8)); + os.write((byte) (n >>> 16)); + os.write((byte) (n >>> 24)); + os.write((byte) (n >>> 32)); + os.write((byte) (n >>> 40)); + os.write((byte) (n >>> 48)); + os.write((byte) (n >>> 56)); + } + + static long readLong(InputStream is) throws IOException { + long n = 0; + n |= ((read(is) & 0xFFL) << 0); + n |= ((read(is) & 0xFFL) << 8); + n |= ((read(is) & 0xFFL) << 16); + n |= ((read(is) & 0xFFL) << 24); + n |= ((read(is) & 0xFFL) << 32); + n |= ((read(is) & 0xFFL) << 40); + n |= ((read(is) & 0xFFL) << 48); + n |= ((read(is) & 0xFFL) << 56); + return n; + } + + static void writeString(OutputStream os, String s) throws IOException { + byte[] b = s.getBytes("UTF-8"); + writeLong(os, b.length); + os.write(b, 0, b.length); + } + + static String readString(CountingInputStream cis) throws IOException { + long n = readLong(cis); + byte[] b = streamToBytes(cis, n); + return new String(b, "UTF-8"); + } + + static void writeHeaderList(List
headers, OutputStream os) throws IOException { + if (headers != null) { + writeInt(os, headers.size()); + for (Header header : headers) { + writeString(os, header.getName()); + writeString(os, header.getValue()); + } + } else { + writeInt(os, 0); + } + } + + static List
readHeaderList(CountingInputStream cis) throws IOException { + int size = readInt(cis); + if (size < 0) { + throw new IOException("readHeaderList size=" + size); + } + List
result = + (size == 0) ? Collections.
emptyList() : new ArrayList
(); + for (int i = 0; i < size; i++) { + String name = readString(cis).intern(); + String value = readString(cis).intern(); + result.add(new Header(name, value)); + } + return result; + } } diff --git a/src/main/java/com/android/volley/toolbox/DiskBasedCacheUtility.java b/src/main/java/com/android/volley/toolbox/DiskBasedCacheUtility.java deleted file mode 100644 index afef874e..00000000 --- a/src/main/java/com/android/volley/toolbox/DiskBasedCacheUtility.java +++ /dev/null @@ -1,333 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.android.volley.toolbox; - -import android.os.SystemClock; -import androidx.annotation.Nullable; -import com.android.volley.Header; -import com.android.volley.VolleyLog; -import com.android.volley.toolbox.DiskBasedCache.CountingInputStream; -import java.io.EOFException; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Map; - -class DiskBasedCacheUtility { - - /** Default maximum disk usage in bytes. */ - static final int DEFAULT_DISK_USAGE_BYTES = 5 * 1024 * 1024; - - /** High water mark percentage for the cache */ - static final float HYSTERESIS_FACTOR = 0.9f; - - /** - * Creates a pseudo-unique filename for the specified cache key. - * - * @param key The key to generate a file name for. - * @return A pseudo-unique filename. - */ - static String getFilenameForKey(String key) { - int firstHalfLength = key.length() / 2; - String localFilename = String.valueOf(key.substring(0, firstHalfLength).hashCode()); - localFilename += String.valueOf(key.substring(firstHalfLength).hashCode()); - return localFilename; - } - - /** Returns a file object for the given cache key. */ - public static File getFileForKey(String key, FileSupplier rootDirectorySupplier) { - return new File(rootDirectorySupplier.get(), getFilenameForKey(key)); - } - - static boolean wouldExceedCacheSize(long newTotalSize, long maxCacheSize) { - return newTotalSize > maxCacheSize; - } - - static boolean doesDataExceedHighWaterMark(long dataLength, long maxCacheSize) { - return dataLength > maxCacheSize * HYSTERESIS_FACTOR; - } - - /** - * If adding this entry would trigger a prune, but pruning would cause the new entry to be - * deleted, then skip writing the entry in the first place, as this is just churn. Note that we - * don't include the cache header overhead in this calculation for simplicity, so putting - * entries which are just below the threshold may still cause this churn. - * - * @param totalSize totalSize of the cache - * @param entryLength length of entry being put into cache - * @param maxCacheSize max size of the cache - * @return true if adding the entry would trigger a prune. - */ - static boolean wouldBePruned(long totalSize, int entryLength, int maxCacheSize) { - return wouldExceedCacheSize(totalSize + entryLength, maxCacheSize) - && doesDataExceedHighWaterMark(entryLength, maxCacheSize); - } - - /** - * Prunes the cache if needed. This method modifies the entries map by removing the pruned - * entries. - * - * @param totalSize The total size of the cache. - * @param maxCacheSizeInBytes Maximum size of the cache. - * @param entries Map of the entries in the cache. - * @param rootDirectorySupplier The supplier for the root directory to use for the cache. - * @return The updated totalSize. - */ - static long pruneIfNeeded( - long totalSize, - int maxCacheSizeInBytes, - Map entries, - FileSupplier rootDirectorySupplier) { - if (!wouldExceedCacheSize(totalSize, maxCacheSizeInBytes)) { - return totalSize; - } - if (VolleyLog.DEBUG) { - VolleyLog.v("Pruning old cache entries."); - } - - long before = totalSize; - int prunedFiles = 0; - long startTime = SystemClock.elapsedRealtime(); - - Iterator> iterator = entries.entrySet().iterator(); - while (iterator.hasNext()) { - Map.Entry entry = iterator.next(); - CacheHeader e = entry.getValue(); - boolean deleted = getFileForKey(e.key, rootDirectorySupplier).delete(); - if (deleted) { - totalSize -= e.size; - } else { - VolleyLog.d( - "Could not delete cache entry for key=%s, filename=%s", - e.key, getFilenameForKey(e.key)); - } - iterator.remove(); - prunedFiles++; - - if (!doesDataExceedHighWaterMark(totalSize, maxCacheSizeInBytes)) { - break; - } - } - - if (VolleyLog.DEBUG) { - VolleyLog.v( - "pruned %d files, %d bytes, %d ms", - prunedFiles, (totalSize - before), SystemClock.elapsedRealtime() - startTime); - } - return totalSize; - } - - /** - * Puts the entry with the specified key into the cache. This method updates the entries map - * with the key, entry pair. - * - * @param key The key to identify the entry by. - * @param entry The entry to cache. - * @param totalSize The total size of the cache. - * @param entries Map of the entries in the cache. - * @return The updated totalSize. - */ - static long putEntry( - String key, CacheHeader entry, long totalSize, Map entries) { - if (!entries.containsKey(key)) { - totalSize += entry.size; - } else { - CacheHeader oldEntry = entries.get(key); - totalSize += (entry.size - oldEntry.size); - } - entries.put(key, entry); - return totalSize; - } - - /** Removes the entry identified by 'key' from the cache. */ - static long removeEntry(String key, long totalSize, Map entries) { - CacheHeader removed = entries.remove(key); - if (removed != null) { - totalSize -= removed.size; - } - return totalSize; - } - - /* - * Homebrewed simple serialization system used for reading and writing cache - * headers on disk. Once upon a time, this used the standard Java - * Object{Input,Output}Stream, but the default implementation relies heavily - * on reflection (even for standard types) and generates a ton of garbage. - * - * TODO: Replace by standard DataInput and DataOutput in next cache version. - */ - - /** - * Simple wrapper around {@link InputStream#read()} that throws EOFException instead of - * returning -1. - */ - private static int read(InputStream is) throws IOException { - int b = is.read(); - if (b == -1) { - throw new EOFException(); - } - return b; - } - - static void writeInt(OutputStream os, int n) throws IOException { - os.write(n & 0xff); - os.write((n >> 8) & 0xff); - os.write((n >> 16) & 0xff); - os.write((n >> 24) & 0xff); - } - - static int readInt(InputStream is) throws IOException { - int n = 0; - n |= read(is); - n |= (read(is) << 8); - n |= (read(is) << 16); - n |= (read(is) << 24); - return n; - } - - static void writeLong(OutputStream os, long n) throws IOException { - os.write((byte) n); - os.write((byte) (n >>> 8)); - os.write((byte) (n >>> 16)); - os.write((byte) (n >>> 24)); - os.write((byte) (n >>> 32)); - os.write((byte) (n >>> 40)); - os.write((byte) (n >>> 48)); - os.write((byte) (n >>> 56)); - } - - static long readLong(InputStream is) throws IOException { - long n = 0; - n |= (read(is) & 0xFFL); - n |= ((read(is) & 0xFFL) << 8); - n |= ((read(is) & 0xFFL) << 16); - n |= ((read(is) & 0xFFL) << 24); - n |= ((read(is) & 0xFFL) << 32); - n |= ((read(is) & 0xFFL) << 40); - n |= ((read(is) & 0xFFL) << 48); - n |= ((read(is) & 0xFFL) << 56); - return n; - } - - static void writeString(OutputStream os, String s) throws IOException { - byte[] b = s.getBytes("UTF-8"); - writeLong(os, b.length); - os.write(b, 0, b.length); - } - - static void writeString(ByteBuffer buffer, @Nullable String s) throws IOException { - // if the string is null, put the length as 0. - if (s == null) { - buffer.putLong(0); - return; - } - byte[] b = s.getBytes("UTF-8"); - buffer.putLong(b.length); - buffer.put(b); - } - - static String readString(CountingInputStream cis) throws IOException { - long n = readLong(cis); - byte[] b = DiskBasedCache.streamToBytes(cis, n); - return new String(b, "UTF-8"); - } - - static String readString(ByteBuffer buffer) throws IOException { - int length = (int) buffer.getLong(); - byte[] array = new byte[length]; - for (int i = 0; i < length; i++) { - array[i] = buffer.get(); - } - return new String(array, "UTF-8"); - } - - static void writeHeaderList(@Nullable List
headers, OutputStream os) - throws IOException { - if (headers != null) { - writeInt(os, headers.size()); - for (Header header : headers) { - writeString(os, header.getName()); - writeString(os, header.getValue()); - } - } else { - writeInt(os, 0); - } - } - - static void writeHeaderList(@Nullable List
headers, ByteBuffer buffer) - throws IOException { - if (headers != null) { - buffer.putInt(headers.size()); - for (Header header : headers) { - writeString(buffer, header.getName()); - writeString(buffer, header.getValue()); - } - } else { - buffer.putInt(0); - } - } - - static List
readHeaderList(CountingInputStream cis) throws IOException { - int size = readInt(cis); - if (size < 0) { - throw new IOException("readHeaderList size=" + size); - } - List
result = - (size == 0) ? Collections.
emptyList() : new ArrayList
(); - for (int i = 0; i < size; i++) { - String name = readString(cis).intern(); - String value = readString(cis).intern(); - result.add(new Header(name, value)); - } - return result; - } - - static List
readHeaderList(ByteBuffer buffer) throws IOException { - int size = buffer.getInt(); - if (size < 0) { - throw new IOException("readHeaderList size=" + size); - } - List
result = new ArrayList
(); - for (int i = 0; i < size; i++) { - String name = readString(buffer); - String value = readString(buffer); - result.add(new Header(name, value)); - } - return result; - } - - static int headerListSize(@Nullable List
headers) throws IOException { - if (headers == null) { - return 4; - } - int bytes = 4; - - for (Header header : headers) { - bytes += header.getName().getBytes("UTF-8").length; - bytes += header.getValue().getBytes("UTF-8").length; - bytes += 16; // two longs denoting length of strings - } - - return bytes; - } -} diff --git a/src/test/java/com/android/volley/toolbox/DiskBasedAsyncCacheTest.java b/src/test/java/com/android/volley/toolbox/DiskBasedAsyncCacheTest.java deleted file mode 100644 index b6e92c02..00000000 --- a/src/test/java/com/android/volley/toolbox/DiskBasedAsyncCacheTest.java +++ /dev/null @@ -1,463 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.android.volley.toolbox; - -import static org.hamcrest.Matchers.arrayWithSize; -import static org.hamcrest.Matchers.emptyArray; -import static org.hamcrest.Matchers.is; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; - -import androidx.annotation.Nullable; -import com.android.volley.AsyncCache; -import com.android.volley.Cache; -import com.android.volley.utils.CacheTestUtils; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.AsynchronousFileChannel; -import java.nio.file.StandardOpenOption; -import java.util.HashMap; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.TemporaryFolder; -import org.junit.runner.RunWith; -import org.robolectric.RobolectricTestRunner; -import org.robolectric.annotation.Config; - -@RunWith(RobolectricTestRunner.class) -@Config(sdk = 26) -public class DiskBasedAsyncCacheTest { - - private static final int MAX_SIZE = 1024 * 1024; - - private DiskBasedAsyncCache cache; - - @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); - - @Rule public ExpectedException exception = ExpectedException.none(); - - @Before - public void setup() throws IOException, ExecutionException, InterruptedException { - final CompletableFuture future = new CompletableFuture<>(); - AsyncCache.OnWriteCompleteCallback futureCallback = - new AsyncCache.OnWriteCompleteCallback() { - @Override - public void onWriteComplete() { - future.complete(null); - } - }; - // Initialize empty cache - cache = - new DiskBasedAsyncCache.Builder() - .setRootDirectory(temporaryFolder.getRoot()) - .setMaxCacheSizeInBytes(MAX_SIZE) - .build(); - cache.initialize(futureCallback); - future.get(); - } - - @Test - public void testEmptyInitialize() throws ExecutionException, InterruptedException { - assertNull(getEntry("key").get()); - } - - @Test - public void testPutGetZeroBytes() throws ExecutionException, InterruptedException { - final Cache.Entry entry = new Cache.Entry(); - - entry.data = new byte[0]; - entry.serverDate = 1234567L; - entry.lastModified = 13572468L; - entry.ttl = 9876543L; - entry.softTtl = 8765432L; - entry.etag = "etag"; - entry.responseHeaders = new HashMap<>(); - entry.responseHeaders.put("fruit", "banana"); - entry.responseHeaders.put("color", "yellow"); - - putEntry("my-magical-key", entry).get(); - - CacheTestUtils.assertThatEntriesAreEqual(getEntry("my-magical-key").get(), entry); - assertNull(getEntry("unknown-key").get()); - } - - @Test - public void testTooLargeEntry() throws ExecutionException, InterruptedException { - Cache.Entry entry = - CacheTestUtils.randomData( - MAX_SIZE - CacheTestUtils.getEntrySizeOnDisk("oversize") + 1); - putEntry("oversize", entry).get(); - - assertNull(getEntry("oversize").get()); - } - - @Test - public void testMaxSizeEntry() throws ExecutionException, InterruptedException { - Cache.Entry entry = - CacheTestUtils.randomData( - MAX_SIZE - CacheTestUtils.getEntrySizeOnDisk("maxsize") - 1); - putEntry("maxsize", entry).get(); - - CacheTestUtils.assertThatEntriesAreEqual(getEntry("maxsize").get(), entry); - } - - @Test - public void testTrimAtThreshold() throws ExecutionException, InterruptedException { - // Start with the largest possible entry. - Cache.Entry entry = - CacheTestUtils.randomData(MAX_SIZE - CacheTestUtils.getEntrySizeOnDisk("maxsize")); - putEntry("maxsize", entry).get(); - - CacheTestUtils.assertThatEntriesAreEqual(getEntry("maxsize").get(), entry); - - // Now any new entry should cause the first one to be cleared. - entry = CacheTestUtils.randomData(0); - putEntry("bit", entry).get(); - - assertNull(getEntry("maxsize").get()); - CacheTestUtils.assertThatEntriesAreEqual(getEntry("bit").get(), entry); - } - - @Test - public void testTrimWithMultipleEvictions_underHysteresisThreshold() - throws ExecutionException, InterruptedException { - final Cache.Entry entry1 = - CacheTestUtils.randomData( - MAX_SIZE / 3 - CacheTestUtils.getEntrySizeOnDisk("entry1") - 1); - final Cache.Entry entry2 = - CacheTestUtils.randomData( - MAX_SIZE / 3 - CacheTestUtils.getEntrySizeOnDisk("entry2") - 1); - final Cache.Entry entry3 = - CacheTestUtils.randomData( - MAX_SIZE / 3 - CacheTestUtils.getEntrySizeOnDisk("entry3") - 1); - - putEntry("entry1", entry1).get(); - putEntry("entry2", entry2).get(); - putEntry("entry3", entry3).get(); - - CacheTestUtils.assertThatEntriesAreEqual(getEntry("entry1").get(), entry1); - CacheTestUtils.assertThatEntriesAreEqual(getEntry("entry2").get(), entry2); - CacheTestUtils.assertThatEntriesAreEqual(getEntry("entry3").get(), entry3); - - final Cache.Entry entry = - CacheTestUtils.randomData( - (int) (DiskBasedCacheUtility.HYSTERESIS_FACTOR * MAX_SIZE) - - CacheTestUtils.getEntrySizeOnDisk("max")); - - putEntry("max", entry).get(); - - assertNull(getEntry("entry1").get()); - assertNull(getEntry("entry2").get()); - assertNull(getEntry("entry3").get()); - CacheTestUtils.assertThatEntriesAreEqual(getEntry("max").get(), entry); - } - - @Test - public void testTrimWithMultipleEvictions_atHysteresisThreshold() - throws ExecutionException, InterruptedException { - final Cache.Entry entry1 = - CacheTestUtils.randomData( - MAX_SIZE / 3 - CacheTestUtils.getEntrySizeOnDisk("entry1") - 1); - final Cache.Entry entry2 = - CacheTestUtils.randomData( - MAX_SIZE / 3 - CacheTestUtils.getEntrySizeOnDisk("entry2") - 1); - final Cache.Entry entry3 = - CacheTestUtils.randomData( - MAX_SIZE / 3 - CacheTestUtils.getEntrySizeOnDisk("entry3") - 1); - - putEntry("entry1", entry1).get(); - putEntry("entry2", entry2).get(); - putEntry("entry3", entry3).get(); - - CacheTestUtils.assertThatEntriesAreEqual(getEntry("entry1").get(), entry1); - CacheTestUtils.assertThatEntriesAreEqual(getEntry("entry2").get(), entry2); - CacheTestUtils.assertThatEntriesAreEqual(getEntry("entry3").get(), entry3); - - final Cache.Entry entry = - CacheTestUtils.randomData( - (int) (DiskBasedCacheUtility.HYSTERESIS_FACTOR * MAX_SIZE) - - CacheTestUtils.getEntrySizeOnDisk("max") - + 1); - - putEntry("max", entry).get(); - - assertNull(getEntry("entry1").get()); - assertNull(getEntry("entry2").get()); - assertNull(getEntry("entry3").get()); - assertNull(getEntry("max").get()); - } - - @Test - public void testTrimWithPartialEvictions() throws ExecutionException, InterruptedException { - final Cache.Entry entry1 = - CacheTestUtils.randomData( - MAX_SIZE / 3 - CacheTestUtils.getEntrySizeOnDisk("entry1") - 1); - final Cache.Entry entry2 = - CacheTestUtils.randomData( - MAX_SIZE / 3 - CacheTestUtils.getEntrySizeOnDisk("entry2") - 1); - final Cache.Entry entry3 = - CacheTestUtils.randomData( - MAX_SIZE / 3 - CacheTestUtils.getEntrySizeOnDisk("entry3") - 1); - final Cache.Entry entry4 = - CacheTestUtils.randomData( - (MAX_SIZE - CacheTestUtils.getEntrySizeOnDisk("entry4") - 1) / 2); - - putEntry("entry1", entry1).get(); - putEntry("entry2", entry2).get(); - putEntry("entry3", entry3).get(); - - CacheTestUtils.assertThatEntriesAreEqual(getEntry("entry1").get(), entry1); - CacheTestUtils.assertThatEntriesAreEqual(getEntry("entry2").get(), entry2); - CacheTestUtils.assertThatEntriesAreEqual(getEntry("entry3").get(), entry3); - - putEntry("entry4", entry4).get(); - - assertNull(getEntry("entry1").get()); - assertNull(getEntry("entry2").get()); - CacheTestUtils.assertThatEntriesAreEqual(getEntry("entry3").get(), entry3); - CacheTestUtils.assertThatEntriesAreEqual(getEntry("entry4").get(), entry4); - } - - @Test - public void testGetBadMagic() throws IOException, ExecutionException, InterruptedException { - // Cache something - Cache.Entry entry = CacheTestUtils.randomData(1023); - putEntry("key", entry).get(); - CacheTestUtils.assertThatEntriesAreEqual(getEntry("key").get(), entry); - - // Overwrite the magic header - File cacheFolder = temporaryFolder.getRoot(); - File file = cacheFolder.listFiles()[0]; - FileOutputStream fos = new FileOutputStream(file); - DiskBasedCacheUtility.writeInt(fos, 0); - fos.close(); - assertNull(getEntry("key").get()); - assertThat(listCachedFiles(), is(emptyArray())); // should this be?? - } - - @Test - public void testGetWrongKey() throws IOException, ExecutionException, InterruptedException { - // Cache something - Cache.Entry entry = CacheTestUtils.randomData(1023); - putEntry("key", entry).get(); - CacheTestUtils.assertThatEntriesAreEqual(getEntry("key").get(), entry); - - // Access the cached file - File cacheFolder = temporaryFolder.getRoot(); - File file = cacheFolder.listFiles()[0]; - - // Write a new header to file associated with key - AsynchronousFileChannel afc = - AsynchronousFileChannel.open(file.toPath(), StandardOpenOption.WRITE); - CacheHeader wrongHeader = new CacheHeader("bad", entry); - ByteBuffer buffer = ByteBuffer.allocate(59); - wrongHeader.writeHeader(buffer); - buffer.flip(); - Future operation = afc.write(buffer, 0); - operation.get(); - afc.close(); - - // key is gone, and file is deleted - assertNull(getEntry("key").get()); - assertThat(listCachedFiles(), is(arrayWithSize(0))); - - // Note: file is now a zombie because its key does not map to its name - } - - @Test - public void testPutRemoveGet() throws ExecutionException, InterruptedException { - Cache.Entry entry = CacheTestUtils.randomData(511); - putEntry("key", entry).get(); - - CacheTestUtils.assertThatEntriesAreEqual(getEntry("key").get(), entry); - - removeEntry("key").get(); - assertNull(getEntry("key").get()); - assertThat(listCachedFiles(), is(emptyArray())); - } - - @Test - public void testPutClearGet() throws ExecutionException, InterruptedException { - Cache.Entry entry = CacheTestUtils.randomData(511); - putEntry("key", entry).get(); - - CacheTestUtils.assertThatEntriesAreEqual(getEntry("key").get(), entry); - - clearEntries().get(); - assertNull(getEntry("key").get()); - assertThat(listCachedFiles(), is(emptyArray())); - } - - @Test - public void testReinitialize() throws ExecutionException, InterruptedException { - Cache.Entry entry = CacheTestUtils.randomData(1023); - putEntry("key", entry).get(); - - final AsyncCache copy = - new DiskBasedAsyncCache.Builder() - .setRootDirectory(temporaryFolder.getRoot()) - .setMaxCacheSizeInBytes(MAX_SIZE) - .build(); - final CompletableFuture getEntry = new CompletableFuture<>(); - copy.initialize( - new AsyncCache.OnWriteCompleteCallback() { - @Override - public void onWriteComplete() { - copy.get( - "key", - new AsyncCache.OnGetCompleteCallback() { - @Override - public void onGetComplete(@Nullable Cache.Entry entry) { - getEntry.complete(entry); - } - }); - } - }); - CacheTestUtils.assertThatEntriesAreEqual(getEntry.get(), entry); - } - - @Test - public void testInvalidate() throws ExecutionException, InterruptedException { - Cache.Entry entry = CacheTestUtils.randomData(32); - entry.softTtl = 8765432L; - entry.ttl = 9876543L; - putEntry("key", entry).get(); - - invalidateEntry("key", /* fullExpire= */ false).get(); - entry.softTtl = 0; // expired - CacheTestUtils.assertThatEntriesAreEqual(getEntry("key").get(), entry); - } - - @Test - public void testInvalidateFullExpire() throws ExecutionException, InterruptedException { - Cache.Entry entry = CacheTestUtils.randomData(32); - entry.softTtl = 8765432L; - entry.ttl = 9876543L; - putEntry("key", entry).get(); - - invalidateEntry("key", /* fullExpire= */ true).get(); - entry.softTtl = 0; // expired - entry.ttl = 0; // expired - CacheTestUtils.assertThatEntriesAreEqual(getEntry("key").get(), entry); - } - - @Test - public void testManyResponseHeaders() throws ExecutionException, InterruptedException { - Cache.Entry entry = new Cache.Entry(); - entry.data = new byte[0]; - entry.responseHeaders = new HashMap<>(); - for (int i = 0; i < 0xFFFF; i++) { - entry.responseHeaders.put(Integer.toString(i), ""); - } - putEntry("key", entry).get(); - } - - @Test - public void initializeIfRootDirectoryDeleted() throws ExecutionException, InterruptedException { - temporaryFolder.delete(); - - Cache.Entry entry = CacheTestUtils.randomData(101); - putEntry("key1", entry).get(); - - assertNull(getEntry("key1").get()); - - // confirm that we can now store entries - putEntry("key2", entry).get(); - CacheTestUtils.assertThatEntriesAreEqual(getEntry("key2").get(), entry); - } - - /* Test helpers */ - - /** Puts entry into the cache, and returns a CompletableFuture after putting the entry. */ - private CompletableFuture putEntry(final String key, Cache.Entry entry) { - final CompletableFuture put = new CompletableFuture<>(); - cache.put( - key, - entry, - new AsyncCache.OnWriteCompleteCallback() { - @Override - public void onWriteComplete() { - put.complete(null); - } - }); - return put; - } - - /** Gets an entry from the cache, and returns a CompletableFuture containing the entry. */ - private CompletableFuture getEntry(final String key) { - final CompletableFuture get = new CompletableFuture<>(); - cache.get( - key, - new AsyncCache.OnGetCompleteCallback() { - @Override - public void onGetComplete(@Nullable Cache.Entry entry) { - get.complete(entry); - } - }); - return get; - } - - private CompletableFuture removeEntry(final String key) { - final CompletableFuture remove = new CompletableFuture<>(); - cache.remove( - key, - new AsyncCache.OnWriteCompleteCallback() { - @Override - public void onWriteComplete() { - remove.complete(null); - } - }); - return remove; - } - - private CompletableFuture invalidateEntry(final String key, final boolean fullExpire) { - final CompletableFuture remove = new CompletableFuture<>(); - cache.invalidate( - key, - fullExpire, - new AsyncCache.OnWriteCompleteCallback() { - @Override - public void onWriteComplete() { - remove.complete(null); - } - }); - return remove; - } - - private CompletableFuture clearEntries() { - final CompletableFuture clear = new CompletableFuture<>(); - cache.clear( - new AsyncCache.OnWriteCompleteCallback() { - @Override - public void onWriteComplete() { - clear.complete(null); - } - }); - return clear; - } - - private File[] listCachedFiles() { - return temporaryFolder.getRoot().listFiles(); - } -} diff --git a/src/test/java/com/android/volley/toolbox/DiskBasedCacheTest.java b/src/test/java/com/android/volley/toolbox/DiskBasedCacheTest.java index e57dc75c..db6e4913 100644 --- a/src/test/java/com/android/volley/toolbox/DiskBasedCacheTest.java +++ b/src/test/java/com/android/volley/toolbox/DiskBasedCacheTest.java @@ -18,9 +18,11 @@ import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.emptyArray; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; import static org.junit.Assert.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; @@ -31,7 +33,9 @@ import static org.mockito.Mockito.verify; import com.android.volley.Cache; -import com.android.volley.utils.CacheTestUtils; +import com.android.volley.Header; +import com.android.volley.toolbox.DiskBasedCache.CacheHeader; +import com.android.volley.toolbox.DiskBasedCache.CountingInputStream; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.EOFException; @@ -40,7 +44,10 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; +import java.util.Random; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -77,7 +84,7 @@ public void teardown() { @Test public void testEmptyInitialize() { - assertNull(cache.get("key")); + assertThat(cache.get("key"), is(nullValue())); } @Test @@ -94,60 +101,60 @@ public void testPutGetZeroBytes() { entry.responseHeaders.put("color", "yellow"); cache.put("my-magical-key", entry); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("my-magical-key"), entry); - assertNull(cache.get("unknown-key")); + assertThatEntriesAreEqual(cache.get("my-magical-key"), entry); + assertThat(cache.get("unknown-key"), is(nullValue())); } @Test public void testPutRemoveGet() { - Cache.Entry entry = CacheTestUtils.randomData(511); + Cache.Entry entry = randomData(511); cache.put("key", entry); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("key"), entry); + assertThatEntriesAreEqual(cache.get("key"), entry); cache.remove("key"); - assertNull(cache.get("key")); + assertThat(cache.get("key"), is(nullValue())); assertThat(listCachedFiles(), is(emptyArray())); } @Test public void testPutClearGet() { - Cache.Entry entry = CacheTestUtils.randomData(511); + Cache.Entry entry = randomData(511); cache.put("key", entry); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("key"), entry); + assertThatEntriesAreEqual(cache.get("key"), entry); cache.clear(); - assertNull(cache.get("key")); + assertThat(cache.get("key"), is(nullValue())); assertThat(listCachedFiles(), is(emptyArray())); } @Test public void testReinitialize() { - Cache.Entry entry = CacheTestUtils.randomData(1023); + Cache.Entry entry = randomData(1023); cache.put("key", entry); Cache copy = new DiskBasedCache(temporaryFolder.getRoot(), MAX_SIZE); copy.initialize(); - CacheTestUtils.assertThatEntriesAreEqual(copy.get("key"), entry); + assertThatEntriesAreEqual(copy.get("key"), entry); } @Test public void testInvalidate() { - Cache.Entry entry = CacheTestUtils.randomData(32); + Cache.Entry entry = randomData(32); entry.softTtl = 8765432L; entry.ttl = 9876543L; cache.put("key", entry); cache.invalidate("key", false); entry.softTtl = 0; // expired - CacheTestUtils.assertThatEntriesAreEqual(cache.get("key"), entry); + assertThatEntriesAreEqual(cache.get("key"), entry); } @Test public void testInvalidateFullExpire() { - Cache.Entry entry = CacheTestUtils.randomData(32); + Cache.Entry entry = randomData(32); entry.softTtl = 8765432L; entry.ttl = 9876543L; cache.put("key", entry); @@ -155,186 +162,157 @@ public void testInvalidateFullExpire() { cache.invalidate("key", true); entry.softTtl = 0; // expired entry.ttl = 0; // expired - CacheTestUtils.assertThatEntriesAreEqual(cache.get("key"), entry); + assertThatEntriesAreEqual(cache.get("key"), entry); } @Test public void testTooLargeEntry() { - Cache.Entry entry = - CacheTestUtils.randomData( - MAX_SIZE - CacheTestUtils.getEntrySizeOnDisk("oversize") + 1); + Cache.Entry entry = randomData(MAX_SIZE - getEntrySizeOnDisk("oversize")); cache.put("oversize", entry); - assertNull(cache.get("oversize")); + assertThat(cache.get("oversize"), is(nullValue())); } @Test public void testMaxSizeEntry() { - Cache.Entry entry = - CacheTestUtils.randomData( - MAX_SIZE - CacheTestUtils.getEntrySizeOnDisk("maxsize") - 1); + Cache.Entry entry = randomData(MAX_SIZE - getEntrySizeOnDisk("maxsize") - 1); cache.put("maxsize", entry); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("maxsize"), entry); + assertThatEntriesAreEqual(cache.get("maxsize"), entry); } @Test public void testTrimAtThreshold() { // Start with the largest possible entry. - Cache.Entry entry = - CacheTestUtils.randomData(MAX_SIZE - CacheTestUtils.getEntrySizeOnDisk("maxsize")); + Cache.Entry entry = randomData(MAX_SIZE - getEntrySizeOnDisk("maxsize") - 1); cache.put("maxsize", entry); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("maxsize"), entry); + assertThatEntriesAreEqual(cache.get("maxsize"), entry); // Now any new entry should cause the first one to be cleared. - entry = CacheTestUtils.randomData(0); + entry = randomData(0); cache.put("bit", entry); - assertNull(cache.get("maxsize")); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("bit"), entry); + assertThat(cache.get("goodsize"), is(nullValue())); + assertThatEntriesAreEqual(cache.get("bit"), entry); } @Test public void testTrimWithMultipleEvictions_underHysteresisThreshold() { - Cache.Entry entry1 = - CacheTestUtils.randomData( - MAX_SIZE / 3 - CacheTestUtils.getEntrySizeOnDisk("entry1") - 1); + Cache.Entry entry1 = randomData(MAX_SIZE / 3 - getEntrySizeOnDisk("entry1") - 1); cache.put("entry1", entry1); - Cache.Entry entry2 = - CacheTestUtils.randomData( - MAX_SIZE / 3 - CacheTestUtils.getEntrySizeOnDisk("entry2") - 1); + Cache.Entry entry2 = randomData(MAX_SIZE / 3 - getEntrySizeOnDisk("entry2") - 1); cache.put("entry2", entry2); - Cache.Entry entry3 = - CacheTestUtils.randomData( - MAX_SIZE / 3 - CacheTestUtils.getEntrySizeOnDisk("entry3") - 1); + Cache.Entry entry3 = randomData(MAX_SIZE / 3 - getEntrySizeOnDisk("entry3") - 1); cache.put("entry3", entry3); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("entry1"), entry1); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("entry2"), entry2); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("entry3"), entry3); + assertThatEntriesAreEqual(cache.get("entry1"), entry1); + assertThatEntriesAreEqual(cache.get("entry2"), entry2); + assertThatEntriesAreEqual(cache.get("entry3"), entry3); Cache.Entry entry = - CacheTestUtils.randomData( - (int) (DiskBasedCacheUtility.HYSTERESIS_FACTOR * MAX_SIZE) - - CacheTestUtils.getEntrySizeOnDisk("max")); + randomData( + (int) (DiskBasedCache.HYSTERESIS_FACTOR * MAX_SIZE) + - getEntrySizeOnDisk("max")); cache.put("max", entry); - assertNull(cache.get("entry1")); - assertNull(cache.get("entry2")); - assertNull(cache.get("entry3")); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("max"), entry); + assertThat(cache.get("entry1"), is(nullValue())); + assertThat(cache.get("entry2"), is(nullValue())); + assertThat(cache.get("entry3"), is(nullValue())); + assertThatEntriesAreEqual(cache.get("max"), entry); } @Test public void testTrimWithMultipleEvictions_atHysteresisThreshold() { - Cache.Entry entry1 = - CacheTestUtils.randomData( - MAX_SIZE / 3 - CacheTestUtils.getEntrySizeOnDisk("entry1") - 1); + Cache.Entry entry1 = randomData(MAX_SIZE / 3 - getEntrySizeOnDisk("entry1") - 1); cache.put("entry1", entry1); - Cache.Entry entry2 = - CacheTestUtils.randomData( - MAX_SIZE / 3 - CacheTestUtils.getEntrySizeOnDisk("entry2") - 1); + Cache.Entry entry2 = randomData(MAX_SIZE / 3 - getEntrySizeOnDisk("entry2") - 1); cache.put("entry2", entry2); - Cache.Entry entry3 = - CacheTestUtils.randomData( - MAX_SIZE / 3 - CacheTestUtils.getEntrySizeOnDisk("entry3") - 1); + Cache.Entry entry3 = randomData(MAX_SIZE / 3 - getEntrySizeOnDisk("entry3") - 1); cache.put("entry3", entry3); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("entry1"), entry1); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("entry2"), entry2); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("entry3"), entry3); + assertThatEntriesAreEqual(cache.get("entry1"), entry1); + assertThatEntriesAreEqual(cache.get("entry2"), entry2); + assertThatEntriesAreEqual(cache.get("entry3"), entry3); Cache.Entry entry = - CacheTestUtils.randomData( - (int) (DiskBasedCacheUtility.HYSTERESIS_FACTOR * MAX_SIZE) - - CacheTestUtils.getEntrySizeOnDisk("max") + randomData( + (int) (DiskBasedCache.HYSTERESIS_FACTOR * MAX_SIZE) + - getEntrySizeOnDisk("max") + 1); cache.put("max", entry); - assertNull(cache.get("entry1")); - assertNull(cache.get("entry2")); - assertNull(cache.get("entry3")); - assertNull(cache.get("max")); + assertThat(cache.get("entry1"), is(nullValue())); + assertThat(cache.get("entry2"), is(nullValue())); + assertThat(cache.get("entry3"), is(nullValue())); + assertThat(cache.get("max"), is(nullValue())); } @Test public void testTrimWithPartialEvictions() { - Cache.Entry entry1 = - CacheTestUtils.randomData( - MAX_SIZE / 3 - CacheTestUtils.getEntrySizeOnDisk("entry1") - 1); + Cache.Entry entry1 = randomData(MAX_SIZE / 3 - getEntrySizeOnDisk("entry1") - 1); cache.put("entry1", entry1); - Cache.Entry entry2 = - CacheTestUtils.randomData( - MAX_SIZE / 3 - CacheTestUtils.getEntrySizeOnDisk("entry2") - 1); + Cache.Entry entry2 = randomData(MAX_SIZE / 3 - getEntrySizeOnDisk("entry2") - 1); cache.put("entry2", entry2); - Cache.Entry entry3 = - CacheTestUtils.randomData( - MAX_SIZE / 3 - CacheTestUtils.getEntrySizeOnDisk("entry3") - 1); + Cache.Entry entry3 = randomData(MAX_SIZE / 3 - getEntrySizeOnDisk("entry3") - 1); cache.put("entry3", entry3); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("entry1"), entry1); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("entry2"), entry2); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("entry3"), entry3); + assertThatEntriesAreEqual(cache.get("entry1"), entry1); + assertThatEntriesAreEqual(cache.get("entry2"), entry2); + assertThatEntriesAreEqual(cache.get("entry3"), entry3); - Cache.Entry entry4 = - CacheTestUtils.randomData( - (MAX_SIZE - CacheTestUtils.getEntrySizeOnDisk("entry4") - 1) / 2); + Cache.Entry entry4 = randomData((MAX_SIZE - getEntrySizeOnDisk("entry4") - 1) / 2); cache.put("entry4", entry4); - assertNull(cache.get("entry1")); - assertNull(cache.get("entry2")); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("entry3"), entry3); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("entry4"), entry4); + assertThat(cache.get("entry1"), is(nullValue())); + assertThat(cache.get("entry2"), is(nullValue())); + assertThatEntriesAreEqual(cache.get("entry3"), entry3); + assertThatEntriesAreEqual(cache.get("entry4"), entry4); } @Test public void testLargeEntryDoesntClearCache() { // Writing a large entry to an empty cache should succeed - Cache.Entry largeEntry = - CacheTestUtils.randomData( - MAX_SIZE - CacheTestUtils.getEntrySizeOnDisk("largeEntry") - 1); + Cache.Entry largeEntry = randomData(MAX_SIZE - getEntrySizeOnDisk("largeEntry") - 1); cache.put("largeEntry", largeEntry); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("largeEntry"), largeEntry); + assertThatEntriesAreEqual(cache.get("largeEntry"), largeEntry); // Reset and fill up ~half the cache. cache.clear(); - Cache.Entry entry = - CacheTestUtils.randomData( - MAX_SIZE / 2 - CacheTestUtils.getEntrySizeOnDisk("entry") - 1); + Cache.Entry entry = randomData(MAX_SIZE / 2 - getEntrySizeOnDisk("entry") - 1); cache.put("entry", entry); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("entry"), entry); + assertThatEntriesAreEqual(cache.get("entry"), entry); // Writing the large entry should no-op, because otherwise the pruning algorithm would clear // the whole cache, since the large entry is above the hysteresis threshold. cache.put("largeEntry", largeEntry); - assertNull(cache.get("largeEntry")); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("entry"), entry); + assertThat(cache.get("largeEntry"), is(nullValue())); + assertThatEntriesAreEqual(cache.get("entry"), entry); } @Test @SuppressWarnings("TryFinallyCanBeTryWithResources") public void testGetBadMagic() throws IOException { // Cache something - Cache.Entry entry = CacheTestUtils.randomData(1023); + Cache.Entry entry = randomData(1023); cache.put("key", entry); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("key"), entry); + assertThatEntriesAreEqual(cache.get("key"), entry); // Overwrite the magic header File cacheFolder = temporaryFolder.getRoot(); File file = cacheFolder.listFiles()[0]; FileOutputStream fos = new FileOutputStream(file); try { - DiskBasedCacheUtility.writeInt(fos, 0); // overwrite magic + DiskBasedCache.writeInt(fos, 0); // overwrite magic } finally { //noinspection ThrowFromFinallyBlock fos.close(); } - assertNull(cache.get("key")); + assertThat(cache.get("key"), is(nullValue())); assertThat(listCachedFiles(), is(emptyArray())); } @@ -342,9 +320,9 @@ public void testGetBadMagic() throws IOException { @SuppressWarnings("TryFinallyCanBeTryWithResources") public void testGetWrongKey() throws IOException { // Cache something - Cache.Entry entry = CacheTestUtils.randomData(1023); + Cache.Entry entry = randomData(1023); cache.put("key", entry); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("key"), entry); + assertThatEntriesAreEqual(cache.get("key"), entry); // Access the cached file File cacheFolder = temporaryFolder.getRoot(); @@ -360,7 +338,7 @@ public void testGetWrongKey() throws IOException { } // key is gone, but file is still there - assertNull(cache.get("key")); + assertThat(cache.get("key"), is(nullValue())); assertThat(listCachedFiles(), is(arrayWithSize(1))); // Note: file is now a zombie because its key does not map to its name @@ -369,8 +347,8 @@ public void testGetWrongKey() throws IOException { @Test public void testStreamToBytesNegativeLength() throws IOException { byte[] data = new byte[1]; - DiskBasedCache.CountingInputStream cis = - new DiskBasedCache.CountingInputStream(new ByteArrayInputStream(data), data.length); + CountingInputStream cis = + new CountingInputStream(new ByteArrayInputStream(data), data.length); exception.expect(IOException.class); DiskBasedCache.streamToBytes(cis, -1); } @@ -378,8 +356,8 @@ public void testStreamToBytesNegativeLength() throws IOException { @Test public void testStreamToBytesExcessiveLength() throws IOException { byte[] data = new byte[1]; - DiskBasedCache.CountingInputStream cis = - new DiskBasedCache.CountingInputStream(new ByteArrayInputStream(data), data.length); + CountingInputStream cis = + new CountingInputStream(new ByteArrayInputStream(data), data.length); exception.expect(IOException.class); DiskBasedCache.streamToBytes(cis, 2); } @@ -387,13 +365,39 @@ public void testStreamToBytesExcessiveLength() throws IOException { @Test public void testStreamToBytesOverflow() throws IOException { byte[] data = new byte[0]; - DiskBasedCache.CountingInputStream cis = - new DiskBasedCache.CountingInputStream( - new ByteArrayInputStream(data), 0x100000000L); + CountingInputStream cis = + new CountingInputStream(new ByteArrayInputStream(data), 0x100000000L); exception.expect(IOException.class); DiskBasedCache.streamToBytes(cis, 0x100000000L); // int value is 0 } + @Test + public void testReadHeaderListWithNegativeSize() throws IOException { + // If a cached header list is corrupted and begins with a negative size, + // verify that readHeaderList will throw an IOException. + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DiskBasedCache.writeInt(baos, -1); // negative size + CountingInputStream cis = + new CountingInputStream( + new ByteArrayInputStream(baos.toByteArray()), Integer.MAX_VALUE); + // Expect IOException due to negative size + exception.expect(IOException.class); + DiskBasedCache.readHeaderList(cis); + } + + @Test + public void testReadHeaderListWithGinormousSize() throws IOException { + // If a cached header list is corrupted and begins with 2GB size, verify + // that readHeaderList will throw EOFException rather than OutOfMemoryError. + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DiskBasedCache.writeInt(baos, Integer.MAX_VALUE); // 2GB size + CountingInputStream cis = + new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()), baos.size()); + // Expect EOFException when end of stream is reached + exception.expect(EOFException.class); + DiskBasedCache.readHeaderList(cis); + } + @Test public void testFileIsDeletedWhenWriteHeaderFails() throws IOException { // Create DataOutputStream that throws IOException @@ -405,23 +409,23 @@ public void testFileIsDeletedWhenWriteHeaderFails() throws IOException { doReturn(mockedOutputStream).when(readonly).createOutputStream(any(File.class)); // Attempt to write - readonly.put("key", CacheTestUtils.randomData(1111)); + readonly.put("key", randomData(1111)); // write is called at least once because each linked stream flushes when closed verify(mockedOutputStream, atLeastOnce()).write(anyInt()); - assertNull(cache.get("key")); + assertThat(readonly.get("key"), is(nullValue())); assertThat(listCachedFiles(), is(emptyArray())); // Note: original cache will try (without success) to read from file - assertNull(cache.get("key")); + assertThat(cache.get("key"), is(nullValue())); } @Test public void testIOExceptionInInitialize() throws IOException { // Cache a few kilobytes - cache.put("kilobyte", CacheTestUtils.randomData(1024)); - cache.put("kilobyte2", CacheTestUtils.randomData(1024)); - cache.put("kilobyte3", CacheTestUtils.randomData(1024)); + cache.put("kilobyte", randomData(1024)); + cache.put("kilobyte2", randomData(1024)); + cache.put("kilobyte3", randomData(1024)); // Create DataInputStream that throws IOException InputStream mockedInputStream = spy(InputStream.class); @@ -436,15 +440,15 @@ public void testIOExceptionInInitialize() throws IOException { broken.initialize(); // Everything is gone - assertNull(broken.get("kilobyte")); - assertNull(broken.get("kilobyte2")); - assertNull(broken.get("kilobyte3")); + assertThat(broken.get("kilobyte"), is(nullValue())); + assertThat(broken.get("kilobyte2"), is(nullValue())); + assertThat(broken.get("kilobyte3"), is(nullValue())); assertThat(listCachedFiles(), is(emptyArray())); // Verify that original cache can cope with missing files - assertNull(cache.get("kilobyte")); - assertNull(cache.get("kilobyte2")); - assertNull(cache.get("kilobyte3")); + assertThat(cache.get("kilobyte"), is(nullValue())); + assertThat(cache.get("kilobyte2"), is(nullValue())); + assertThat(cache.get("kilobyte3"), is(nullValue())); } @Test @@ -465,9 +469,9 @@ public void testCountingInputStreamByteCount() throws IOException { ByteArrayOutputStream out = new ByteArrayOutputStream(); //noinspection ThrowFromFinallyBlock try { - DiskBasedCacheUtility.writeInt(out, 1); - DiskBasedCacheUtility.writeLong(out, -1L); - DiskBasedCacheUtility.writeString(out, "hamburger"); + DiskBasedCache.writeInt(out, 1); + DiskBasedCache.writeLong(out, -1L); + DiskBasedCache.writeString(out, "hamburger"); } finally { //noinspection ThrowFromFinallyBlock out.close(); @@ -475,15 +479,14 @@ public void testCountingInputStreamByteCount() throws IOException { long bytesWritten = out.size(); // Read the bytes and compare the counts - DiskBasedCache.CountingInputStream cis = - new DiskBasedCache.CountingInputStream( - new ByteArrayInputStream(out.toByteArray()), bytesWritten); + CountingInputStream cis = + new CountingInputStream(new ByteArrayInputStream(out.toByteArray()), bytesWritten); try { assertThat(cis.bytesRemaining(), is(bytesWritten)); assertThat(cis.bytesRead(), is(0L)); - assertThat(DiskBasedCacheUtility.readInt(cis), is(1)); - assertThat(DiskBasedCacheUtility.readLong(cis), is(-1L)); - assertThat(DiskBasedCacheUtility.readString(cis), is("hamburger")); + assertThat(DiskBasedCache.readInt(cis), is(1)); + assertThat(DiskBasedCache.readLong(cis), is(-1L)); + assertThat(DiskBasedCache.readString(cis), is("hamburger")); assertThat(cis.bytesRead(), is(bytesWritten)); assertThat(cis.bytesRemaining(), is(0L)); } finally { @@ -498,7 +501,86 @@ public void testCountingInputStreamByteCount() throws IOException { public void testEmptyReadThrowsEOF() throws IOException { ByteArrayInputStream empty = new ByteArrayInputStream(new byte[] {}); exception.expect(EOFException.class); - DiskBasedCacheUtility.readInt(empty); + DiskBasedCache.readInt(empty); + } + + @Test + public void serializeInt() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DiskBasedCache.writeInt(baos, 0); + DiskBasedCache.writeInt(baos, 19791214); + DiskBasedCache.writeInt(baos, -20050711); + DiskBasedCache.writeInt(baos, Integer.MIN_VALUE); + DiskBasedCache.writeInt(baos, Integer.MAX_VALUE); + ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); + assertEquals(DiskBasedCache.readInt(bais), 0); + assertEquals(DiskBasedCache.readInt(bais), 19791214); + assertEquals(DiskBasedCache.readInt(bais), -20050711); + assertEquals(DiskBasedCache.readInt(bais), Integer.MIN_VALUE); + assertEquals(DiskBasedCache.readInt(bais), Integer.MAX_VALUE); + } + + @Test + public void serializeLong() throws Exception { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DiskBasedCache.writeLong(baos, 0); + DiskBasedCache.writeLong(baos, 31337); + DiskBasedCache.writeLong(baos, -4160); + DiskBasedCache.writeLong(baos, 4295032832L); + DiskBasedCache.writeLong(baos, -4314824046L); + DiskBasedCache.writeLong(baos, Long.MIN_VALUE); + DiskBasedCache.writeLong(baos, Long.MAX_VALUE); + ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); + assertEquals(DiskBasedCache.readLong(bais), 0); + assertEquals(DiskBasedCache.readLong(bais), 31337); + assertEquals(DiskBasedCache.readLong(bais), -4160); + assertEquals(DiskBasedCache.readLong(bais), 4295032832L); + assertEquals(DiskBasedCache.readLong(bais), -4314824046L); + assertEquals(DiskBasedCache.readLong(bais), Long.MIN_VALUE); + assertEquals(DiskBasedCache.readLong(bais), Long.MAX_VALUE); + } + + @Test + public void serializeString() throws Exception { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DiskBasedCache.writeString(baos, ""); + DiskBasedCache.writeString(baos, "This is a string."); + DiskBasedCache.writeString(baos, "ファイカス"); + CountingInputStream cis = + new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()), baos.size()); + assertEquals(DiskBasedCache.readString(cis), ""); + assertEquals(DiskBasedCache.readString(cis), "This is a string."); + assertEquals(DiskBasedCache.readString(cis), "ファイカス"); + } + + @Test + public void serializeHeaders() throws Exception { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + List
empty = new ArrayList<>(); + DiskBasedCache.writeHeaderList(empty, baos); + DiskBasedCache.writeHeaderList(null, baos); + List
twoThings = new ArrayList<>(); + twoThings.add(new Header("first", "thing")); + twoThings.add(new Header("second", "item")); + DiskBasedCache.writeHeaderList(twoThings, baos); + List
emptyKey = new ArrayList<>(); + emptyKey.add(new Header("", "value")); + DiskBasedCache.writeHeaderList(emptyKey, baos); + List
emptyValue = new ArrayList<>(); + emptyValue.add(new Header("key", "")); + DiskBasedCache.writeHeaderList(emptyValue, baos); + List
sameKeys = new ArrayList<>(); + sameKeys.add(new Header("key", "value")); + sameKeys.add(new Header("key", "value2")); + DiskBasedCache.writeHeaderList(sameKeys, baos); + CountingInputStream cis = + new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()), baos.size()); + assertEquals(DiskBasedCache.readHeaderList(cis), empty); + assertEquals(DiskBasedCache.readHeaderList(cis), empty); // null reads back empty + assertEquals(DiskBasedCache.readHeaderList(cis), twoThings); + assertEquals(DiskBasedCache.readHeaderList(cis), emptyKey); + assertEquals(DiskBasedCache.readHeaderList(cis), emptyValue); + assertEquals(DiskBasedCache.readHeaderList(cis), sameKeys); } @Test @@ -509,6 +591,7 @@ public void publicMethods() throws Exception { DiskBasedCache.class.getConstructor(DiskBasedCache.FileSupplier.class, int.class)); assertNotNull(DiskBasedCache.class.getConstructor(File.class)); assertNotNull(DiskBasedCache.class.getConstructor(DiskBasedCache.FileSupplier.class)); + assertNotNull(DiskBasedCache.class.getMethod("getFileForKey", String.class)); } @@ -516,19 +599,48 @@ public void publicMethods() throws Exception { public void initializeIfRootDirectoryDeleted() { temporaryFolder.delete(); - Cache.Entry entry = CacheTestUtils.randomData(101); + Cache.Entry entry = randomData(101); cache.put("key1", entry); - assertNull(cache.get("key1")); + assertThat(cache.get("key1"), is(nullValue())); // confirm that we can now store entries cache.put("key2", entry); - CacheTestUtils.assertThatEntriesAreEqual(cache.get("key2"), entry); + assertThatEntriesAreEqual(cache.get("key2"), entry); } /* Test helpers */ + private void assertThatEntriesAreEqual(Cache.Entry actual, Cache.Entry expected) { + assertThat(actual.data, is(equalTo(expected.data))); + assertThat(actual.etag, is(equalTo(expected.etag))); + assertThat(actual.lastModified, is(equalTo(expected.lastModified))); + assertThat(actual.responseHeaders, is(equalTo(expected.responseHeaders))); + assertThat(actual.serverDate, is(equalTo(expected.serverDate))); + assertThat(actual.softTtl, is(equalTo(expected.softTtl))); + assertThat(actual.ttl, is(equalTo(expected.ttl))); + } + + private Cache.Entry randomData(int length) { + Cache.Entry entry = new Cache.Entry(); + byte[] data = new byte[length]; + new Random(42).nextBytes(data); // explicit seed for reproducible results + entry.data = data; + return entry; + } + private File[] listCachedFiles() { return temporaryFolder.getRoot().listFiles(); } + + private int getEntrySizeOnDisk(String key) { + // Header size is: + // 4 bytes for magic int + // 8 + len(key) bytes for key (long length) + // 8 bytes for etag (long length + 0 characters) + // 32 bytes for serverDate, lastModified, ttl, and softTtl longs + // 4 bytes for length of header list int + // == 56 + len(key) bytes total. + return 56 + key.length(); + } } diff --git a/src/test/java/com/android/volley/toolbox/DiskBasedCacheUtilityTest.java b/src/test/java/com/android/volley/toolbox/DiskBasedCacheUtilityTest.java deleted file mode 100644 index fa12210e..00000000 --- a/src/test/java/com/android/volley/toolbox/DiskBasedCacheUtilityTest.java +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.android.volley.toolbox; - -import static org.junit.Assert.assertEquals; - -import com.android.volley.Header; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.EOFException; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.runner.RunWith; -import org.robolectric.RobolectricTestRunner; -import org.robolectric.annotation.Config; - -@RunWith(RobolectricTestRunner.class) -@Config(sdk = 16) -public class DiskBasedCacheUtilityTest { - - @Rule public ExpectedException exception = ExpectedException.none(); - - @Test - public void testReadHeaderListWithNegativeSize() throws IOException { - // If a cached header list is corrupted and begins with a negative size, - // verify that readHeaderList will throw an IOException. - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DiskBasedCacheUtility.writeInt(baos, -1); // negative size - DiskBasedCache.CountingInputStream cis = - new DiskBasedCache.CountingInputStream( - new ByteArrayInputStream(baos.toByteArray()), Integer.MAX_VALUE); - // Expect IOException due to negative size - exception.expect(IOException.class); - DiskBasedCacheUtility.readHeaderList(cis); - } - - @Test - public void testReadHeaderListWithGinormousSize() throws IOException { - // If a cached header list is corrupted and begins with 2GB size, verify - // that readHeaderList will throw EOFException rather than OutOfMemoryError. - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DiskBasedCacheUtility.writeInt(baos, Integer.MAX_VALUE); // 2GB size - DiskBasedCache.CountingInputStream cis = - new DiskBasedCache.CountingInputStream( - new ByteArrayInputStream(baos.toByteArray()), baos.size()); - // Expect EOFException when end of stream is reached - exception.expect(EOFException.class); - DiskBasedCacheUtility.readHeaderList(cis); - } - - @Test - public void serializeInt() throws IOException { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DiskBasedCacheUtility.writeInt(baos, 0); - DiskBasedCacheUtility.writeInt(baos, 19791214); - DiskBasedCacheUtility.writeInt(baos, -20050711); - DiskBasedCacheUtility.writeInt(baos, Integer.MIN_VALUE); - DiskBasedCacheUtility.writeInt(baos, Integer.MAX_VALUE); - ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); - assertEquals(DiskBasedCacheUtility.readInt(bais), 0); - assertEquals(DiskBasedCacheUtility.readInt(bais), 19791214); - assertEquals(DiskBasedCacheUtility.readInt(bais), -20050711); - assertEquals(DiskBasedCacheUtility.readInt(bais), Integer.MIN_VALUE); - assertEquals(DiskBasedCacheUtility.readInt(bais), Integer.MAX_VALUE); - } - - @Test - public void serializeLong() throws Exception { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DiskBasedCacheUtility.writeLong(baos, 0); - DiskBasedCacheUtility.writeLong(baos, 31337); - DiskBasedCacheUtility.writeLong(baos, -4160); - DiskBasedCacheUtility.writeLong(baos, 4295032832L); - DiskBasedCacheUtility.writeLong(baos, -4314824046L); - DiskBasedCacheUtility.writeLong(baos, Long.MIN_VALUE); - DiskBasedCacheUtility.writeLong(baos, Long.MAX_VALUE); - ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); - assertEquals(DiskBasedCacheUtility.readLong(bais), 0); - assertEquals(DiskBasedCacheUtility.readLong(bais), 31337); - assertEquals(DiskBasedCacheUtility.readLong(bais), -4160); - assertEquals(DiskBasedCacheUtility.readLong(bais), 4295032832L); - assertEquals(DiskBasedCacheUtility.readLong(bais), -4314824046L); - assertEquals(DiskBasedCacheUtility.readLong(bais), Long.MIN_VALUE); - assertEquals(DiskBasedCacheUtility.readLong(bais), Long.MAX_VALUE); - } - - @Test - public void serializeString() throws Exception { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DiskBasedCacheUtility.writeString(baos, ""); - DiskBasedCacheUtility.writeString(baos, "This is a string."); - DiskBasedCacheUtility.writeString(baos, "ファイカス"); - DiskBasedCache.CountingInputStream cis = - new DiskBasedCache.CountingInputStream( - new ByteArrayInputStream(baos.toByteArray()), baos.size()); - assertEquals(DiskBasedCacheUtility.readString(cis), ""); - assertEquals(DiskBasedCacheUtility.readString(cis), "This is a string."); - assertEquals(DiskBasedCacheUtility.readString(cis), "ファイカス"); - } - - @Test - public void serializeHeaders() throws Exception { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - List
empty = new ArrayList<>(); - DiskBasedCacheUtility.writeHeaderList(empty, baos); - DiskBasedCacheUtility.writeHeaderList(null, baos); - List
twoThings = new ArrayList<>(); - twoThings.add(new Header("first", "thing")); - twoThings.add(new Header("second", "item")); - DiskBasedCacheUtility.writeHeaderList(twoThings, baos); - List
emptyKey = new ArrayList<>(); - emptyKey.add(new Header("", "value")); - DiskBasedCacheUtility.writeHeaderList(emptyKey, baos); - List
emptyValue = new ArrayList<>(); - emptyValue.add(new Header("key", "")); - DiskBasedCacheUtility.writeHeaderList(emptyValue, baos); - List
sameKeys = new ArrayList<>(); - sameKeys.add(new Header("key", "value")); - sameKeys.add(new Header("key", "value2")); - DiskBasedCacheUtility.writeHeaderList(sameKeys, baos); - DiskBasedCache.CountingInputStream cis = - new DiskBasedCache.CountingInputStream( - new ByteArrayInputStream(baos.toByteArray()), baos.size()); - assertEquals(DiskBasedCacheUtility.readHeaderList(cis), empty); - assertEquals(DiskBasedCacheUtility.readHeaderList(cis), empty); // null reads back empty - assertEquals(DiskBasedCacheUtility.readHeaderList(cis), twoThings); - assertEquals(DiskBasedCacheUtility.readHeaderList(cis), emptyKey); - assertEquals(DiskBasedCacheUtility.readHeaderList(cis), emptyValue); - assertEquals(DiskBasedCacheUtility.readHeaderList(cis), sameKeys); - } -}