Skip to content

Commit

Permalink
feat(core): adds bulk rewrap to sdk and service (#1835)
Browse files Browse the repository at this point in the history
### Proposed Changes

* Creates a new Rewrap API to support Bulk Rewrap Operations

### Checklist

- [x] I have added or updated unit tests
- [x] I have added or updated integration tests (if appropriate)
- [x] I have added or updated documentation

### Testing Instructions

- Tests already check for backwards compatibility.
- Adds Bulk Benchmark to example for testing.
- Adds Bulk Round Trip tests.

---------

Co-authored-by: Dave Mihalcik <[email protected]>
Co-authored-by: Tyler Biscoe <[email protected]>
  • Loading branch information
3 people authored Jan 17, 2025
1 parent 456639e commit 11698ae
Show file tree
Hide file tree
Showing 18 changed files with 2,765 additions and 651 deletions.
436 changes: 433 additions & 3 deletions docs/grpc/index.html

Large diffs are not rendered by default.

45 changes: 45 additions & 0 deletions docs/openapi/kas/kas.swagger.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

9 changes: 7 additions & 2 deletions examples/cmd/benchmark.go
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,10 @@ func runBenchmark(cmd *cobra.Command, args []string) error {
}

totalTime := time.Since(startTime)
averageLatency := totalDuration / time.Duration(successCount)
var averageLatency time.Duration
if successCount > 0 {
averageLatency = totalDuration / time.Duration(successCount)
}
throughput := float64(successCount) / totalTime.Seconds()

// Print results
Expand All @@ -214,7 +217,9 @@ func runBenchmark(cmd *cobra.Command, args []string) error {
cmd.Printf("Failed Requests: %d\n", errorCount)
cmd.Printf("Concurrent Requests: %d\n", config.ConcurrentRequests)
cmd.Printf("Total Time: %s\n", totalTime)
cmd.Printf("Average Latency: %s\n", averageLatency)
if successCount > 0 {
cmd.Printf("Average Latency: %s\n", averageLatency)
}
cmd.Printf("Throughput: %.2f requests/second\n", throughput)

if errorCount > 0 {
Expand Down
169 changes: 169 additions & 0 deletions examples/cmd/benchmark_bulk.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,169 @@
package cmd

import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"os"
"strings"
"time"

"github.com/opentdf/platform/sdk"
"github.com/spf13/cobra"
)

func init() {
benchmarkCmd := &cobra.Command{
Use: "benchmark-bulk",
Short: "OpenTDF benchmark tool",
Long: `A OpenTDF benchmark tool to measure Bulk Rewrap.`,
RunE: runBenchmarkBulk,
}

benchmarkCmd.Flags().IntVar(&config.RequestCount, "count", 100, "Total number of requests")
benchmarkCmd.Flags().Var(&config.TDFFormat, "tdf", "TDF format (tdf3 or nanotdf)")
ExamplesCmd.AddCommand(benchmarkCmd)
}

func runBenchmarkBulk(cmd *cobra.Command, args []string) error {
in := strings.NewReader("Hello, World!")

// Create new offline client
client, err := newSDK()
if err != nil {
return err
}

out := os.Stdout
if outputName != "-" {
out, err = os.Create("sensitive.txt.tdf")
if err != nil {
return err
}
}
defer func() {
if outputName != "-" {
out.Close()
}
}()

dataAttributes := []string{"https://example.com/attr/attr1/value/value1"}
if config.TDFFormat == NanoTDF {
nanoTDFConfig, err := client.NewNanoTDFConfig()
if err != nil {
return err
}
nanoTDFConfig.SetAttributes(dataAttributes)
nanoTDFConfig.EnableECDSAPolicyBinding()
err = nanoTDFConfig.SetKasURL(fmt.Sprintf("http://%s/kas", "localhost:8080"))
if err != nil {
return err
}

_, err = client.CreateNanoTDF(out, in, *nanoTDFConfig)
if err != nil {
return err
}

if outputName != "-" {
err = cat(cmd, outputName)
if err != nil {
return err
}
}
} else {
tdf, err :=
client.CreateTDF(
out, in,
sdk.WithDataAttributes(dataAttributes...),
sdk.WithKasInformation(
sdk.KASInfo{
URL: fmt.Sprintf("http://%s", "localhost:8080"),
PublicKey: "",
}),
sdk.WithAutoconfigure(false))
if err != nil {
return err
}

manifestJSON, err := json.MarshalIndent(tdf.Manifest(), "", " ")
if err != nil {
return err
}
cmd.Println(string(manifestJSON))
}

var errors []error
var requestFailure error

// Function to perform the operation
operation := func() {
file, err := os.Open("sensitive.txt.tdf")
if err != nil {
requestFailure = fmt.Errorf("file open error: %v", err)
return
}
defer file.Close()
cipher, _ := io.ReadAll(file)

file.Seek(0, 0)
format := sdk.Nano
var bulkTdfs []*sdk.BulkTDF
if config.TDFFormat == "tdf3" {
format = sdk.Standard
}
for i := 0; i < config.RequestCount; i++ {
bulkTdfs = append(bulkTdfs, &sdk.BulkTDF{Reader: bytes.NewReader(cipher), Writer: io.Discard})
}
err = client.BulkDecrypt(context.Background(), sdk.WithTDFs(bulkTdfs...), sdk.WithTDFType(format))
if err != nil {
if errList, ok := sdk.FromBulkErrors(err); ok {
errors = errList
} else {
requestFailure = err
}
}

}

// Start the benchmark
startTime := time.Now()
operation()
totalTime := time.Since(startTime)

// Count errors and collect error messages
errorCount := 0
successCount := 0
if requestFailure != nil {
errorCount = config.RequestCount
errors = append(errors, requestFailure)
} else {
errorCount = len(errors)
successCount = config.RequestCount - errorCount
}
throughput := float64(successCount) / totalTime.Seconds()

errorMsgs := make(map[string]int)
for _, err := range errors {
errorMsgs[err.Error()] += 1
}

// Print results
cmd.Printf("\nBenchmark Results:\n")
cmd.Printf("Total Decrypts: %d\n", config.RequestCount)
cmd.Printf("Successful Decrypts: %d\n", successCount)
cmd.Printf("Failed Decrypts: %d\n", errorCount)
cmd.Printf("Total Time: %s\n", totalTime)
cmd.Printf("Throughput: %.2f requests/second\n", throughput)

if errorCount > 0 {
cmd.Printf("\nError Summary:\n")
for errMsg, count := range errorMsgs {
cmd.Printf("%s: %d occurrences\n", errMsg, count)
}
}

return nil
}
Loading

0 comments on commit 11698ae

Please sign in to comment.