Skip to content

Commit

Permalink
'spark3-metadata' patch applied (#61)
Browse files Browse the repository at this point in the history
* 'spark3-metadata' patch applied

remove unnecessary variables from MetadataMicroBatchInputPartitionReader
add test
timestamp fix
WIP Initial metadata reporting commit

* remove unused import in ArchiveBatchSliceCollection.java

* remove @NotNull from ArchiveBatchSliceCollection.java
  • Loading branch information
eemhu authored Aug 7, 2024
1 parent 91fafd9 commit 706a696
Show file tree
Hide file tree
Showing 14 changed files with 396 additions and 84 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,6 @@ public InputPartition[] planInputPartitions(Offset start, Offset end) {

@Override
public PartitionReaderFactory createReaderFactory() {
return new TeragrepPartitionReaderFactory();
return new TeragrepPartitionReaderFactory(config.isMetadataQuery);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,9 @@ public class ArchiveS3ObjectMetadata implements Serializable {
public final String host;
public final long logtimeEpoch;
public final long compressedSize;
public final long uncompressedSize;

public ArchiveS3ObjectMetadata(String id, String bucket, String path, String directory, String stream, String host, long logtimeEpoch, long compressedSize) {
public ArchiveS3ObjectMetadata(String id, String bucket, String path, String directory, String stream, String host, long logtimeEpoch, long compressedSize, long uncompressedSize) {
this.id = id;
this.bucket = bucket;
this.path = path;
Expand All @@ -75,6 +76,7 @@ public ArchiveS3ObjectMetadata(String id, String bucket, String path, String dir
this.host = host;
this.logtimeEpoch = logtimeEpoch;
this.compressedSize = compressedSize;
this.uncompressedSize = uncompressedSize;
}

@Override
Expand Down
5 changes: 5 additions & 0 deletions src/main/java/com/teragrep/pth_06/config/Config.java
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,8 @@ public final class Config {
public final boolean isArchiveEnabled;
public final boolean isKafkaEnabled;

public final boolean isMetadataQuery;

public Config(Map<String, String> opts) {
this.query = opts.get("queryXML");
if (this.query == null){
Expand Down Expand Up @@ -96,5 +98,8 @@ public Config(Map<String, String> opts) {
if (!isArchiveEnabled && !isKafkaEnabled) {
throw new IllegalStateException("No datasources enabled");
}

// fetch metadata (defaults to false)
isMetadataQuery = opts.getOrDefault("metadataQuery.enabled", "false").equalsIgnoreCase("true");
}
}
3 changes: 2 additions & 1 deletion src/main/java/com/teragrep/pth_06/planner/ArchiveQuery.java
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
package com.teragrep.pth_06.planner;

import org.jooq.Record10;
import org.jooq.Record11;
import org.jooq.Result;
import org.jooq.types.ULong;

Expand All @@ -61,7 +62,7 @@
* @author Mikko Kortelainen
*/
public interface ArchiveQuery {
Result<Record10<ULong, String, String, String, String, Date, String, String, Long, ULong>> processBetweenUnixEpochHours(long startHour, long endHour);
Result<Record11<ULong, String, String, String, String, Date, String, String, Long, ULong, ULong>> processBetweenUnixEpochHours(long startHour, long endHour);

void commit(long offset);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -135,12 +135,13 @@ private void seekToResults() {

/**
* Get data from the SliceTable between startHour and endHour.
*
* @param startHour Exclusive start hour
* @param endHour Inclusive end hour
* @param endHour Inclusive end hour
* @return Data between start hour and end hour.
*/
@Override
public Result<Record10<ULong, String, String, String, String, Date, String, String, Long, ULong>> processBetweenUnixEpochHours(
public Result<Record11<ULong, String, String, String, String, Date, String, String, Long, ULong, ULong>> processBetweenUnixEpochHours(
long startHour, long endHour) {
LOGGER.debug("ArchiveQueryProcessor.processBetweenUnixEpochHours> [{}, {}[", startHour, endHour);

Expand Down
16 changes: 11 additions & 5 deletions src/main/java/com/teragrep/pth_06/planner/StreamDBClient.java
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ void setIncludeBeforeEpoch(long includeBeforeEpoch) {

int pullToSliceTable(Date day) {
NestedTopNQuery nestedTopNQuery = new NestedTopNQuery();
SelectOnConditionStep<Record10<ULong, String, String, String, String, Date, String, String, Long, ULong>> select =
@NotNull SelectOnConditionStep<Record11<ULong, String, String, String, String, Date, String, String, Long, ULong, ULong>> select =
ctx.select(
JOURNALDB.LOGFILE.ID,
nestedTopNQuery.directory,
Expand All @@ -174,7 +174,8 @@ int pullToSliceTable(Date day) {
JOURNALDB.BUCKET.NAME,
JOURNALDB.LOGFILE.PATH,
nestedTopNQuery.logtime,
JOURNALDB.LOGFILE.FILE_SIZE
JOURNALDB.LOGFILE.FILE_SIZE,
JOURNALDB.LOGFILE.UNCOMPRESSED_FILE_SIZE
)
.from(nestedTopNQuery.getTableStatement(journaldbCondition, day))
.join(JOURNALDB.LOGFILE).on(JOURNALDB.LOGFILE.ID.eq(nestedTopNQuery.id))
Expand Down Expand Up @@ -228,7 +229,7 @@ void deleteRangeFromSliceTable(long start, long end) {
.execute();
}

Result<Record10<ULong, String, String, String, String, Date, String, String, Long, ULong>> getHourRange(long excludedStartHour, long includedEndHour) {
@NotNull Result<Record11<ULong, String, String, String, String, Date, String, String, Long, ULong, ULong>> getHourRange(long excludedStartHour, long includedEndHour) {
return ctx.select(
SliceTable.id,
SliceTable.directory,
Expand All @@ -239,7 +240,8 @@ Result<Record10<ULong, String, String, String, String, Date, String, String, Lon
SliceTable.bucket,
SliceTable.path,
SliceTable.logtime,
SliceTable.filesize
SliceTable.filesize,
SliceTable.uncompressedFilesize
)
.from(SliceTable.SLICE_TABLE)
.where(
Expand Down Expand Up @@ -269,6 +271,9 @@ public static class SliceTable {
public static final Field<String> path = DSL.field(DSL.name(sliceTableName, "path"), String.class);
public static final Field<Long> logtime = DSL.field(DSL.name(sliceTableName, "logtime"), Long.class);
public static final Field<ULong> filesize = DSL.field(DSL.name(sliceTableName, "filesize"), ULong.class);
// additional metadata
public static final Field<ULong> uncompressedFilesize = DSL.field(DSL.name(sliceTableName, "uncompressed_filesize"), ULong.class);


private static final Index logtimeIndex = DSL.index(DSL.name("ix_logtime"));

Expand All @@ -286,7 +291,8 @@ private static void create(DSLContext ctx) {
bucket,
path,
logtime,
filesize
filesize,
uncompressedFilesize
);
query.execute();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@
import com.teragrep.pth_06.planner.offset.DatasourceOffset;
import org.apache.spark.sql.connector.read.streaming.Offset;
import org.jooq.Record;
import org.jooq.Record10;
import org.jooq.Record11;
import org.jooq.Result;
import org.jooq.types.ULong;
import org.slf4j.Logger;
Expand All @@ -74,7 +74,7 @@ public ArchiveBatchSliceCollection processRange(Offset start, Offset end) {

this.clear(); // clear internal list

Result<Record10<ULong, String, String, String, String, Date, String, String, Long, ULong>>
Result<Record11<ULong, String, String, String, String, Date, String, String, Long, ULong, ULong>>
result = aq.processBetweenUnixEpochHours(((DatasourceOffset)start).getArchiveOffset().offset(),
((DatasourceOffset)end).getArchiveOffset().offset());

Expand All @@ -89,7 +89,8 @@ public ArchiveBatchSliceCollection processRange(Offset start, Offset end) {
r.get(2, String.class), // stream
r.get(3, String.class), // host
r.get(8, Long.class), // logtime
r.get(9, Long.class) // compressedSize
r.get(9, Long.class), // compressedSize
r.get(10, Long.class) // uncompressedSize
)
)
);
Expand Down
68 changes: 68 additions & 0 deletions src/main/java/com/teragrep/pth_06/task/Metadata.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
/*
* This program handles user requests that require archive access.
* Copyright (C) 2022 Suomen Kanuuna Oy
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://github.com/teragrep/teragrep/blob/main/LICENSE>.
*
*
* Additional permission under GNU Affero General Public License version 3
* section 7
*
* If you modify this Program, or any covered work, by linking or combining it
* with other code, such other code is not for that reason alone subject to any
* of the requirements of the GNU Affero GPL version 3 as long as this Program
* is the same Program as licensed from Suomen Kanuuna Oy without any additional
* modifications.
*
* Supplemented terms under GNU Affero General Public License version 3
* section 7
*
* Origin of the software must be attributed to Suomen Kanuuna Oy. Any modified
* versions must be marked as "Modified version of" The Program.
*
* Names of the licensors and authors may not be used for publicity purposes.
*
* No rights are granted for use of trade names, trademarks, or service marks
* which are in The Program if any.
*
* Licensee must indemnify licensors and authors for any liability that these
* contractual assumptions impose on licensors and authors.
*
* To the extent this program is licensed as part of the Commercial versions of
* Teragrep, the applicable Commercial License may apply to this file if you as
* a licensee so wish it.
*/
package com.teragrep.pth_06.task;

import java.io.Serializable;

public class Metadata implements Serializable {
private static final long serialVersionUID = 1L;

private final long uncompressed;
private final long compressed;

public Metadata(long uncompressed, long compressed) {
this.uncompressed = uncompressed;
this.compressed = compressed;
}

public long getCompressed() {
return compressed;
}

public long getUncompressed() {
return uncompressed;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
/*
* This program handles user requests that require archive access.
* Copyright (C) 2022 Suomen Kanuuna Oy
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://github.com/teragrep/teragrep/blob/main/LICENSE>.
*
*
* Additional permission under GNU Affero General Public License version 3
* section 7
*
* If you modify this Program, or any covered work, by linking or combining it
* with other code, such other code is not for that reason alone subject to any
* of the requirements of the GNU Affero GPL version 3 as long as this Program
* is the same Program as licensed from Suomen Kanuuna Oy without any additional
* modifications.
*
* Supplemented terms under GNU Affero General Public License version 3
* section 7
*
* Origin of the software must be attributed to Suomen Kanuuna Oy. Any modified
* versions must be marked as "Modified version of" The Program.
*
* Names of the licensors and authors may not be used for publicity purposes.
*
* No rights are granted for use of trade names, trademarks, or service marks
* which are in The Program if any.
*
* Licensee must indemnify licensors and authors for any liability that these
* contractual assumptions impose on licensors and authors.
*
* To the extent this program is licensed as part of the Commercial versions of
* Teragrep, the applicable Commercial License may apply to this file if you as
* a licensee so wish it.
*/
package com.teragrep.pth_06.task;

import com.amazonaws.services.s3.AmazonS3;
import com.google.gson.Gson;
import com.teragrep.pth_06.ArchiveS3ObjectMetadata;
import com.teragrep.pth_06.task.s3.Pth06S3Client;
import com.teragrep.rad_01.AuditPlugin;
import com.teragrep.rad_01.AuditPluginFactory;
import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter;
import org.apache.spark.sql.connector.read.PartitionReader;
import org.apache.spark.unsafe.types.UTF8String;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.time.Instant;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.util.LinkedList;

public class MetadataMicroBatchInputPartitionReader implements PartitionReader<InternalRow> {
final Logger LOGGER = LoggerFactory.getLogger(MetadataMicroBatchInputPartitionReader.class);

private final AuditPlugin auditPlugin;
private UnsafeRowWriter rowWriter;

private final LinkedList<ArchiveS3ObjectMetadata> taskObjectList;

private long currentOffset;

public MetadataMicroBatchInputPartitionReader(LinkedList<ArchiveS3ObjectMetadata> taskObjectList,
String TeragrepAuditQuery,
String TeragrepAuditReason,
String TeragrepAuditUser,
String TeragrepAuditPluginClassName) {
this.taskObjectList = taskObjectList;

AuditPluginFactory auditPluginFactory = new AuditPluginFactory(TeragrepAuditPluginClassName);

try {
this.auditPlugin = auditPluginFactory.getAuditPlugin();
this.auditPlugin.setQuery(TeragrepAuditQuery);
this.auditPlugin.setReason(TeragrepAuditReason);
this.auditPlugin.setUser(TeragrepAuditUser);

} catch (ClassNotFoundException | InvocationTargetException | InstantiationException | IllegalAccessException | NoSuchMethodException e) {
e.printStackTrace();
throw new RuntimeException(e);
}

this.rowWriter = new UnsafeRowWriter(11);
this.currentOffset = 0L;

}
@Override
public boolean next() throws IOException {
if (taskObjectList.isEmpty()) {
return false;
} else {
rowWriter.reset();
rowWriter.zeroOutNullBytes();
ArchiveS3ObjectMetadata taskObject = taskObjectList.removeFirst();

// Use metadata java object to easily form a json representation of metadata
final String rawColumn = new Gson().toJson(new Metadata(taskObject.uncompressedSize, taskObject.compressedSize));
// use logtimeEpoch as _time
rowWriter.write(0, rfc3339ToEpoch(Instant.ofEpochSecond(taskObject.logtimeEpoch).atZone(ZoneId.systemDefault())));
rowWriter.write(1, UTF8String.fromString(rawColumn));
rowWriter.write(2, UTF8String.fromString(taskObject.directory));
rowWriter.write(3, UTF8String.fromString(taskObject.stream));
rowWriter.write(4, UTF8String.fromString(taskObject.host));
rowWriter.write(5, UTF8String.fromString(""));
rowWriter.write(6, UTF8String.fromString(taskObject.id));
rowWriter.write(7, currentOffset++);
rowWriter.write(8, UTF8String.fromString(""));
return true;
}
}

@Override
public InternalRow get() {
/*auditPlugin.audit(
epochMicros,
rfc5424Frame.msg.toBytes(),
this.directory.getBytes(),
this.stream.getBytes(),
this.host.getBytes(),
source,
this.id.toString(),
currentOffset
);*/
return rowWriter.getRow();
}

@Override
public void close() throws IOException {
// no-op
}

static long rfc3339ToEpoch(ZonedDateTime zonedDateTime) {
final Instant instant = zonedDateTime.toInstant();

final long MICROS_PER_SECOND = 1000L * 1000L;
final long NANOS_PER_MICROS = 1000L;
final long sec = Math.multiplyExact(instant.getEpochSecond(), MICROS_PER_SECOND);

return Math.addExact(sec, instant.getNano() / NANOS_PER_MICROS);
}
}
Loading

0 comments on commit 706a696

Please sign in to comment.