diff --git a/.github/workflows/build-report.yml b/.github/workflows/build-report.yml
new file mode 100644
index 00000000..9c2f3384
--- /dev/null
+++ b/.github/workflows/build-report.yml
@@ -0,0 +1,56 @@
+# Copyright © 2024 Cask Data, Inc.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# This workflow will build a Java project with Maven
+# For more information see: https://help.github.com/actions/language-and-framework-guides/building-and-testing-java-with-maven
+# Note: Any changes to this workflow would be used only after merging into develop
+name: Build Unit Tests Report
+
+on:
+ workflow_run:
+ workflows:
+ - Build with unit tests
+ types:
+ - completed
+
+permissions:
+ actions: read # Allows reading workflow run information
+ statuses: write # Required if the action updates commit statuses
+ checks: write # Required if it updates GitHub Checks API
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ if: ${{ github.event.workflow_run.conclusion != 'skipped' }}
+
+ steps:
+ # Pinned 1.0.0 version
+ - uses: marocchino/action-workflow_run-status@54b6e87d6cb552fc5f36dbe9a722a6048725917a
+
+ - name: Download artifact
+ uses: actions/download-artifact@v4
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ run-id: ${{ github.event.workflow_run.id }}
+ path: artifacts/
+
+ - name: Surefire Report
+ # Pinned 3.5.2 version
+ uses: mikepenz/action-junit-report@16a9560bd02f11e7e3bf6b3e2ef6bba6c9d07c32
+ if: always()
+ with:
+ report_paths: '**/target/surefire-reports/TEST-*.xml'
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ detailed_summary: true
+ commit: ${{ github.event.workflow_run.head_sha }}
+ check_name: Build Test Report
+
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
new file mode 100644
index 00000000..34580676
--- /dev/null
+++ b/.github/workflows/build.yml
@@ -0,0 +1,67 @@
+# Copyright © 2020 Cask Data, Inc.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# This workflow will build a Java project with Maven
+# For more information see: https://help.github.com/actions/language-and-framework-guides/building-and-testing-java-with-maven
+# Note: Any changes to this workflow would be used only after merging into develop
+name: Build with unit tests
+
+on:
+ push:
+ branches: [ develop, release/** ]
+ pull_request:
+ branches: [ develop, release/** ]
+ types: [opened, synchronize, reopened, labeled]
+
+jobs:
+ build:
+ runs-on: k8s-runner-build
+
+ # We allow builds:
+ # 1) When it's a merge into a branch
+ # 2) For PRs that are labeled as build and
+ # - It's a code change
+ # - A build label was just added
+ # A bit complex, but prevents builds when other labels are manipulated
+ if: >
+ github.event_name == 'push'
+ || (contains(github.event.pull_request.labels.*.name, 'build')
+ && (github.event.action != 'labeled' || github.event.label.name == 'build')
+ )
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ ref: ${{ github.event.workflow_run.head_sha }}
+ submodules: recursive
+ - name: Cache
+ uses: actions/cache@v3
+ with:
+ path: ~/.m2/repository
+ key: ${{ runner.os }}-maven-${{ github.workflow }}-${{ hashFiles('**/pom.xml') }}
+ restore-keys: |
+ ${{ runner.os }}-maven-${{ github.workflow }}
+ - name: Build with Maven
+ run: mvn clean test -fae -T 2 -B -V -DcloudBuild -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.httpconnectionManager.ttlSeconds=25
+ - name: Archive build artifacts
+ uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: reports-${{ github.run_id }}
+ path: |
+ **/target/rat.txt
+ **/target/surefire-reports/*
+
+ - name: Checkstyle report
+ uses: tivv/checkstyle-github-action@fcf8ffb7c6a5c110bbc5dafb84aca54caf359b80
+ if: always()
+ with:
+ path: '**/checkstyle-result.xml'
+ commit: ${{ github.event.workflow_run.head_sha }}
diff --git a/checkstyle.xml b/checkstyle.xml
new file mode 100644
index 00000000..8b161228
--- /dev/null
+++ b/checkstyle.xml
@@ -0,0 +1,514 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/leveldb-benchmark/src/main/java/org/iq80/leveldb/benchmark/DbBenchmark.java b/leveldb-benchmark/src/main/java/org/iq80/leveldb/benchmark/DbBenchmark.java
index 7959c9c1..1017ac10 100644
--- a/leveldb-benchmark/src/main/java/org/iq80/leveldb/benchmark/DbBenchmark.java
+++ b/leveldb-benchmark/src/main/java/org/iq80/leveldb/benchmark/DbBenchmark.java
@@ -22,6 +22,7 @@
import com.google.common.collect.ImmutableList;
import com.google.common.io.CharStreams;
import com.google.common.io.Files;
+import org.iq80.leveldb.CompressionType;
import org.iq80.leveldb.DB;
import org.iq80.leveldb.DBFactory;
import org.iq80.leveldb.DBIterator;
@@ -40,11 +41,16 @@
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
+import java.util.ArrayList;
import java.util.Date;
import java.util.EnumMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static com.google.common.base.Preconditions.checkArgument;
@@ -58,9 +64,12 @@
public class DbBenchmark
{
private final boolean useExisting;
+ private final Integer blockSize;
private final Integer writeBufferSize;
private final File databaseDir;
+ private final boolean compress;
private final double compressionRatio;
+ private final Integer numConcurrentThreads;
private long startTime;
enum Order
@@ -105,8 +114,11 @@ public DbBenchmark(Map flags)
num = (Integer) flags.get(Flag.num);
reads = (Integer) (flags.get(Flag.reads) == null ? flags.get(Flag.num) : flags.get(Flag.reads));
valueSize = (Integer) flags.get(Flag.value_size);
+ blockSize = (Integer) flags.get(Flag.block_size);
writeBufferSize = (Integer) flags.get(Flag.write_buffer_size);
+ compress = (Boolean) flags.get(Flag.compress);
compressionRatio = (Double) flags.get(Flag.compression_ratio);
+ numConcurrentThreads = (Integer) flags.get(Flag.num_concurrent_threads);
useExisting = (Boolean) flags.get(Flag.use_existing_db);
heapCounter = 0;
bytes = 0;
@@ -229,12 +241,12 @@ private void printHeader()
System.out.printf("Keys: %d bytes each\n", kKeySize);
System.out.printf("Values: %d bytes each (%d bytes after compression)\n",
valueSize,
- (int) (valueSize * compressionRatio + 0.5));
+ (int) (valueSize * (compress ? compressionRatio : 1) + 0.5));
System.out.printf("Entries: %d\n", num);
System.out.printf("RawSize: %.1f MB (estimated)\n",
((kKeySize + valueSize) * num) / 1048576.0);
System.out.printf("FileSize: %.1f MB (estimated)\n",
- (((kKeySize + valueSize * compressionRatio) * num)
+ (((kKeySize + valueSize * (compress ? compressionRatio : 1)) * num)
/ 1048576.0));
printWarnings();
System.out.printf("------------------------------------------------\n");
@@ -306,6 +318,8 @@ private void open()
if (writeBufferSize != null) {
options.writeBufferSize(writeBufferSize);
}
+ options.compressionType(compress ? CompressionType.SNAPPY : CompressionType.NONE);
+ options.blockSize(blockSize.intValue());
db = factory.open(databaseDir, options);
}
@@ -445,14 +459,67 @@ else if (nextReport < 500000) {
private void readSequential()
{
- for (int loops = 0; loops < 5; loops++) {
- DBIterator iterator = db.iterator();
- for (int i = 0; i < reads && iterator.hasNext(); i++) {
- Map.Entry entry = iterator.next();
- bytes += entry.getKey().length + entry.getValue().length;
- finishedSingleOp();
+ class Result
+ {
+ private final long opCount;
+ private final long bytes;
+
+ public Result(long opCount, long bytes)
+ {
+ this.opCount = opCount;
+ this.bytes = bytes;
+ }
+
+ public long getOpCount()
+ {
+ return opCount;
+ }
+
+ public long getBytes()
+ {
+ return bytes;
+ }
+ }
+
+ Callable run = new Callable()
+ {
+ @Override
+ public Result call() throws Exception
+ {
+ int logicalBytes = 0;
+ int opCount = 0;
+ try (DBIterator iterator = db.iterator()) {
+ for (int i = 0; i < reads && iterator.hasNext(); i++) {
+ opCount++;
+ Map.Entry entry = iterator.next();
+ logicalBytes += entry.getKey().length + entry.getValue().length;
+ }
+ }
+ return new Result(opCount, logicalBytes);
}
- Closeables.closeQuietly(iterator);
+ };
+
+ ExecutorService executor = Executors.newFixedThreadPool(numConcurrentThreads);
+ List> futureList = new ArrayList>();
+ for (int i = 0; i < numConcurrentThreads; i++) {
+ futureList.add(executor.submit(run));
+ }
+ for (Future future : futureList) {
+ try {
+ Result result = future.get();
+ done += result.getOpCount();
+ bytes += result.getBytes();
+ }
+ catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ executor.shutdownNow();
+ try {
+ executor.awaitTermination(30, TimeUnit.SECONDS);
+ }
+ catch (Exception e) {
+ e.printStackTrace();
}
}
@@ -706,6 +773,15 @@ public Object parseValue(String value)
return ImmutableList.copyOf(Splitter.on(",").trimResults().omitEmptyStrings().split(value));
}
},
+ // Whether compress data or not
+ compress(true)
+ {
+ @Override
+ public Object parseValue(String value)
+ {
+ return Boolean.parseBoolean(value);
+ }
+ },
// Arrange to generate values that shrink to this fraction of
// their original size after compression
@@ -760,6 +836,16 @@ public Object parseValue(String value)
}
},
+ // Size of sstable block
+ block_size(4096)
+ {
+ @Override
+ public Object parseValue(String value)
+ {
+ return Integer.parseInt(value);
+ }
+ },
+
// Size of each value
value_size(100)
{
@@ -802,6 +888,16 @@ public Object parseValue(String value)
}
},
+ // Number of concurrent threads. Used by sequential read operation only at the moment.
+ num_concurrent_threads(1)
+ {
+ @Override
+ public Object parseValue(String value)
+ {
+ return Integer.parseInt(value);
+ }
+ },
+
// Use the db with the following name.
db("/tmp/dbbench")
{
diff --git a/leveldb/src/main/java/org/iq80/leveldb/impl/FileMetaData.java b/leveldb/src/main/java/org/iq80/leveldb/impl/FileMetaData.java
index cf32c5df..a1eccb86 100644
--- a/leveldb/src/main/java/org/iq80/leveldb/impl/FileMetaData.java
+++ b/leveldb/src/main/java/org/iq80/leveldb/impl/FileMetaData.java
@@ -17,6 +17,8 @@
*/
package org.iq80.leveldb.impl;
+import org.iq80.leveldb.util.Slice;
+
import java.util.concurrent.atomic.AtomicInteger;
public class FileMetaData
@@ -48,8 +50,14 @@ public FileMetaData(long number, long fileSize, InternalKey smallest, InternalKe
{
this.number = number;
this.fileSize = fileSize;
- this.smallest = smallest;
- this.largest = largest;
+ // Copy bytes of the key from slice so the byte array backing the slice can be freed.
+ // This is necessary to avoid pinning down the byte array which could be large because of
+ // containing both key and value.
+ this.smallest = new InternalKey(new Slice(smallest.getUserKey().getBytes()),
+ smallest.getSequenceNumber(), smallest.getValueType());
+
+ this.largest = new InternalKey(new Slice(largest.getUserKey().getBytes()),
+ largest.getSequenceNumber(), largest.getValueType());
}
public long getFileSize()