diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index af727bb5..4bf16af1 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -91,3 +91,161 @@ jobs: else echo "SQL Server container 'sqlserver' was not found." fi + + benchmarks: + runs-on: ubuntu-latest + timeout-minutes: 75 + permissions: + contents: read + pull-requests: write + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + fetch-depth: 0 + - name: Setup go + uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6 + with: + go-version: '1.25.7' + - name: Install sqlcmd + run: | + curl -sSL https://packages.microsoft.com/keys/microsoft.asc | sudo tee /etc/apt/trusted.gpg.d/microsoft.asc + curl -sSL https://packages.microsoft.com/config/ubuntu/$(lsb_release -rs)/prod.list | sudo tee /etc/apt/sources.list.d/mssql-release.list + sudo apt-get update + sudo ACCEPT_EULA=Y apt-get install -y mssql-tools18 + echo "/opt/mssql-tools18/bin" >> $GITHUB_PATH + - name: Start SQL Server container + shell: bash + run: | + export SQLCMDPASSWORD=$(date +%s|sha256sum|base64|head -c 32) + echo "SQLCMDPASSWORD=$SQLCMDPASSWORD" >> $GITHUB_ENV + docker run -m 2GB -e ACCEPT_EULA=1 -d --name sqlserver \ + -p 1433:1433 -e SA_PASSWORD=$SQLCMDPASSWORD \ + mcr.microsoft.com/mssql/server:2025-latest + # Wait for SQL Server to be ready + for i in {1..30}; do + if sqlcmd -S localhost -U sa -P "$SQLCMDPASSWORD" -C -Q "SELECT 1" > /dev/null 2>&1; then + echo "SQL Server is ready (attempt $i)" + break + fi + echo "Waiting for SQL Server... (attempt $i/30)" + sleep 2 + done + - name: Warmup run (stabilize CPU/caches) + shell: bash + run: | + export SQLUSER=sa + export SQLPASSWORD=$SQLCMDPASSWORD + export DATABASE=master + export HOST=localhost + export SQLSERVER_DSN="sqlserver://${SQLUSER}:${SQLPASSWORD}@localhost:1433?database=${DATABASE}&trustServerCertificate=true" + BENCH_PATTERN='Benchmark(BulkMakeParam|ConvertAssign|Decode|Encode|ManglePassword|Parse|Read|RoundTrip|Send|Str2ucs2|TdsBuffer|Ucs22str|Write)' + # Throwaway run to warm CPU caches, prime the Go runtime, and settle + # the OS scheduler. Results are discarded — ensures both measurement + # runs start from the same steady-state conditions. + go test -run='^$' -bench="$BENCH_PATTERN" \ + -benchtime=100ms -count=1 -timeout=10m . ./msdsn > /dev/null 2>&1 + - name: Run baseline benchmarks (main) + shell: bash + run: | + export SQLUSER=sa + export SQLPASSWORD=$SQLCMDPASSWORD + export DATABASE=master + export HOST=localhost + export SQLSERVER_DSN="sqlserver://${SQLUSER}:${SQLPASSWORD}@localhost:1433?database=${DATABASE}&trustServerCertificate=true" + BENCH_PATTERN='Benchmark(BulkMakeParam|ConvertAssign|Decode|Encode|ManglePassword|Parse|Read|RoundTrip|Send|Str2ucs2|TdsBuffer|Ucs22str|Write)' + git worktree add ../main-bench origin/main + cp -v *_benchmark_test.go ../main-bench/ 2>/dev/null || true + cp -v msdsn/*_benchmark_test.go ../main-bench/msdsn/ 2>/dev/null || true + cd ../main-bench + go test -run='^$' -bench="$BENCH_PATTERN" \ + -benchtime=1s -count=10 -benchmem -timeout=25m . ./msdsn 2>&1 | \ + tee "$GITHUB_WORKSPACE/bench_old_full.log" + grep -E '^(Benchmark|goos:|goarch:|pkg:|cpu:)' "$GITHUB_WORKSPACE/bench_old_full.log" > "$GITHUB_WORKSPACE/bench_old.txt" + cd "$GITHUB_WORKSPACE" + git worktree remove ../main-bench --force + - name: Run PR benchmarks + shell: bash + run: | + export SQLUSER=sa + export SQLPASSWORD=$SQLCMDPASSWORD + export DATABASE=master + export HOST=localhost + export SQLSERVER_DSN="sqlserver://${SQLUSER}:${SQLPASSWORD}@localhost:1433?database=${DATABASE}&trustServerCertificate=true" + BENCH_PATTERN='Benchmark(BulkMakeParam|ConvertAssign|Decode|Encode|ManglePassword|Parse|Read|RoundTrip|Send|Str2ucs2|TdsBuffer|Ucs22str|Write)' + go test -run='^$' -bench="$BENCH_PATTERN" \ + -benchtime=1s -count=10 -benchmem -timeout=25m . ./msdsn 2>&1 | \ + tee bench_new_full.log + grep -E '^(Benchmark|goos:|goarch:|pkg:|cpu:)' bench_new_full.log > bench_new.txt + - name: Compare benchmarks + shell: bash + run: | + go install golang.org/x/perf/cmd/benchstat@latest + echo "## Benchmark Comparison (main vs PR)" >> "$GITHUB_STEP_SUMMARY" + echo '```' >> "$GITHUB_STEP_SUMMARY" + benchstat -alpha=0.01 bench_old.txt bench_new.txt | tee -a "$GITHUB_STEP_SUMMARY" + echo '```' >> "$GITHUB_STEP_SUMMARY" + benchstat -alpha=0.01 bench_old.txt bench_new.txt > bench_diff.txt + - name: Check for regressions + shell: bash + run: | + if [ ! -f bench_diff.txt ]; then + echo "No comparison available, skipping regression check." + exit 0 + fi + # Report statistically significant improvements (real %, not ~) + if grep -v '~' bench_diff.txt | grep -E '^\S+\s+.+\s+-[0-9]+\.[0-9]+%'; then + echo "" + echo "::notice::Performance improvements detected (see above)" + fi + # Fail on statistically significant regressions exceeding 15% + # Sequential CI runs produce systematic drift up to ~12%, so we require + # both statistical significance (no ~ marker) AND >15% magnitude. + # Exclude TdsBuffer_Write_Large: ~120ns operation with multi-flush path + # shows 30-46% swings between sequential CI runs due to cache sensitivity. + REGRESSED=$(grep -v '~' bench_diff.txt | grep -v 'TdsBuffer_Write_Large' | grep -E '^\S+\s+.+\s+\+[0-9]+\.[0-9]+%' | awk -F'+' '{split($2,a,"%"); if (a[1]+0 >= 15) print}') + if [ -n "$REGRESSED" ]; then + echo "$REGRESSED" + echo "::error::Statistically significant regression detected (>15%, p<0.01)" + exit 1 + fi + echo "No significant regressions detected." + - name: Post benchmark results to PR + # Fork PRs get read-only tokens; comment will be skipped gracefully. + continue-on-error: true + if: always() && github.event_name == 'pull_request' + shell: bash + env: + GH_TOKEN: ${{ github.token }} + run: | + if [ ! -f bench_diff.txt ]; then + echo "No benchmark comparison to post." + exit 0 + fi + BODY="## Benchmark Results (main vs PR) + +
+ Click to expand benchstat output + + \`\`\` + $(cat bench_diff.txt) + \`\`\` + +
+ + *Generated by CI — commit $(git rev-parse --short HEAD)*" + + # Remove leading whitespace from heredoc-style indentation + BODY=$(echo "$BODY" | sed 's/^ //') + + # Find and update existing benchmark comment, or create a new one + COMMENT_ID=$(gh api "repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments" \ + --jq '.[] | select(.body | startswith("## Benchmark Results")) | .id' | head -1) + + if [ -n "$COMMENT_ID" ]; then + gh api "repos/${{ github.repository }}/issues/comments/$COMMENT_ID" \ + -X PATCH -f body="$BODY" + echo "Updated existing benchmark comment." + else + gh pr comment "${{ github.event.pull_request.number }}" --body "$BODY" + echo "Posted new benchmark comment." + fi diff --git a/buf_benchmark_test.go b/buf_benchmark_test.go new file mode 100644 index 00000000..831ac96e --- /dev/null +++ b/buf_benchmark_test.go @@ -0,0 +1,190 @@ +package mssql + +import ( + "io" + "testing" +) + +// Benchmarks for TDS buffer operations — the core I/O layer for all packet framing. + +// discardTransport implements io.ReadWriteCloser, discarding writes and providing zeros on read. +type discardTransport struct{} + +func (discardTransport) Read(p []byte) (int, error) { return len(p), nil } +func (discardTransport) Write(p []byte) (int, error) { return len(p), nil } +func (discardTransport) Close() error { return nil } + +func BenchmarkTdsBuffer_Write_Small(b *testing.B) { + buf := newTdsBuffer(4096, discardTransport{}) + payload := make([]byte, 64) + for i := range payload { + payload[i] = byte(i) + } + + b.SetBytes(int64(len(payload))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.BeginPacket(packSQLBatch, false) + buf.Write(payload) + buf.FinishPacket() + } +} + +func BenchmarkTdsBuffer_Write_Medium(b *testing.B) { + buf := newTdsBuffer(4096, discardTransport{}) + payload := make([]byte, 1024) + for i := range payload { + payload[i] = byte(i % 256) + } + + b.SetBytes(int64(len(payload))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.BeginPacket(packSQLBatch, false) + buf.Write(payload) + buf.FinishPacket() + } +} + +func BenchmarkTdsBuffer_Write_Large(b *testing.B) { + // Payload larger than packet size forces multiple flushes + buf := newTdsBuffer(4096, discardTransport{}) + payload := make([]byte, 8192) + for i := range payload { + payload[i] = byte(i % 256) + } + + b.SetBytes(int64(len(payload))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.BeginPacket(packSQLBatch, false) + buf.Write(payload) + buf.FinishPacket() + } +} + +func BenchmarkTdsBuffer_WriteByte(b *testing.B) { + buf := newTdsBuffer(4096, discardTransport{}) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.BeginPacket(packSQLBatch, false) + for j := 0; j < 100; j++ { + buf.WriteByte(byte(j)) + } + buf.FinishPacket() + } +} + +func BenchmarkTdsBuffer_Read_Small(b *testing.B) { + // Simulate reading a small packet from transport + packetSize := uint16(512) + buf := newTdsBuffer(packetSize, nil) + + // Pre-fill read buffer with a valid packet + data := make([]byte, 64) + for i := range data { + data[i] = byte(i) + } + totalSize := 8 + len(data) // header + payload + copy(buf.rbuf[8:], data) + buf.rpos = 8 + buf.rsize = totalSize + buf.final = true + + dest := make([]byte, 64) + b.SetBytes(int64(len(dest))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.rpos = 8 + io.ReadFull(buf, dest) + } +} + +func BenchmarkTdsBuffer_ReadByte(b *testing.B) { + buf := newTdsBuffer(4096, nil) + // Fill buffer with data + for i := 0; i < 1000; i++ { + buf.rbuf[i] = byte(i % 256) + } + buf.rpos = 0 + buf.rsize = 1000 + buf.final = true + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.rpos = 0 + for j := 0; j < 100; j++ { + buf.ReadByte() + } + } +} + +func BenchmarkTdsBuffer_Uint16(b *testing.B) { + buf := newTdsBuffer(4096, nil) + // Fill with uint16 values + for i := 0; i < 200; i += 2 { + buf.rbuf[i] = byte(i) + buf.rbuf[i+1] = byte(i >> 8) + } + buf.rpos = 0 + buf.rsize = 200 + buf.final = true + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.rpos = 0 + for j := 0; j < 50; j++ { + buf.uint16() + } + } +} + +func BenchmarkTdsBuffer_Uint32(b *testing.B) { + buf := newTdsBuffer(4096, nil) + for i := 0; i < 400; i += 4 { + buf.rbuf[i] = byte(i) + buf.rbuf[i+1] = byte(i >> 8) + buf.rbuf[i+2] = byte(i >> 16) + buf.rbuf[i+3] = byte(i >> 24) + } + buf.rpos = 0 + buf.rsize = 400 + buf.final = true + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.rpos = 0 + for j := 0; j < 50; j++ { + buf.uint32() + } + } +} + +func BenchmarkTdsBuffer_Uint64(b *testing.B) { + buf := newTdsBuffer(4096, nil) + for i := 0; i < 400; i++ { + buf.rbuf[i] = byte(i % 256) + } + buf.rpos = 0 + buf.rsize = 400 + buf.final = true + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.rpos = 0 + for j := 0; j < 50; j++ { + buf.uint64() + } + } +} + +func BenchmarkTdsBuffer_BeginFinishPacket(b *testing.B) { + buf := newTdsBuffer(4096, discardTransport{}) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.BeginPacket(packSQLBatch, false) + buf.FinishPacket() + } +} diff --git a/bulkcopy_benchmark_test.go b/bulkcopy_benchmark_test.go new file mode 100644 index 00000000..eec28e5f --- /dev/null +++ b/bulkcopy_benchmark_test.go @@ -0,0 +1,147 @@ +package mssql + +import ( + "testing" + "time" +) + +// Benchmarks for bulk copy parameter encoding — the hot path for every row in bulk insert. + +func benchBulkCol(typeId byte, size int) columnStruct { + return columnStruct{ + ti: typeInfo{ + TypeId: typeId, + Size: size, + }, + } +} + +func BenchmarkBulkMakeParam_Int64(b *testing.B) { + bulk := &Bulk{cn: &Conn{sess: &tdsSession{encoding: benchmarkEncoding()}}} + col := benchBulkCol(typeInt8, 8) + + for i := 0; i < b.N; i++ { + _, err := bulk.makeParam(int64(1234567890), col) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkBulkMakeParam_Int32(b *testing.B) { + bulk := &Bulk{cn: &Conn{sess: &tdsSession{encoding: benchmarkEncoding()}}} + col := benchBulkCol(typeInt4, 4) + + for i := 0; i < b.N; i++ { + _, err := bulk.makeParam(int64(12345), col) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkBulkMakeParam_Float64(b *testing.B) { + bulk := &Bulk{cn: &Conn{sess: &tdsSession{encoding: benchmarkEncoding()}}} + col := benchBulkCol(typeFlt8, 8) + + for i := 0; i < b.N; i++ { + _, err := bulk.makeParam(float64(3.14159), col) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkBulkMakeParam_String_NVarChar(b *testing.B) { + bulk := &Bulk{cn: &Conn{sess: &tdsSession{encoding: benchmarkEncoding()}}} + col := benchBulkCol(typeNVarChar, 200) + + val := "hello world benchmark test string" + for i := 0; i < b.N; i++ { + _, err := bulk.makeParam(val, col) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkBulkMakeParam_String_VarChar(b *testing.B) { + bulk := &Bulk{cn: &Conn{sess: &tdsSession{encoding: benchmarkEncoding()}}} + col := benchBulkCol(typeBigVarChar, 200) + + val := "hello world benchmark test string" + for i := 0; i < b.N; i++ { + _, err := bulk.makeParam(val, col) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkBulkMakeParam_DateTime(b *testing.B) { + bulk := &Bulk{cn: &Conn{sess: &tdsSession{encoding: benchmarkEncoding()}}} + col := benchBulkCol(typeDateTime, 8) + + val := time.Date(2024, 6, 15, 14, 30, 45, 0, time.UTC) + for i := 0; i < b.N; i++ { + _, err := bulk.makeParam(val, col) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkBulkMakeParam_DateTime2(b *testing.B) { + bulk := &Bulk{cn: &Conn{sess: &tdsSession{encoding: benchmarkEncoding()}}} + col := columnStruct{ + ti: typeInfo{ + TypeId: typeDateTime2N, + Size: 8, + Scale: 7, + }, + } + + val := time.Date(2024, 6, 15, 14, 30, 45, 123456789, time.UTC) + for i := 0; i < b.N; i++ { + _, err := bulk.makeParam(val, col) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkBulkMakeParam_Bool(b *testing.B) { + bulk := &Bulk{cn: &Conn{sess: &tdsSession{encoding: benchmarkEncoding()}}} + col := benchBulkCol(typeBitN, 1) + + for i := 0; i < b.N; i++ { + _, err := bulk.makeParam(true, col) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkBulkMakeParam_MultiColumn(b *testing.B) { + bulk := &Bulk{ + cn: &Conn{sess: &tdsSession{encoding: benchmarkEncoding()}}, + bulkColumns: []columnStruct{ + benchBulkCol(typeIntN, 8), + benchBulkCol(typeNVarChar, 100), + benchBulkCol(typeFltN, 8), + benchBulkCol(typeBitN, 1), + }, + } + + values := []interface{}{int64(42), "test string", float64(3.14), true} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + for colIdx, val := range values { + _, err := bulk.makeParam(val, bulk.bulkColumns[colIdx]) + if err != nil { + b.Fatal(err) + } + } + } +} diff --git a/convert_benchmark_test.go b/convert_benchmark_test.go new file mode 100644 index 00000000..9a67ec44 --- /dev/null +++ b/convert_benchmark_test.go @@ -0,0 +1,138 @@ +package mssql + +import ( + "database/sql" + "testing" + "time" +) + +// Benchmarks for convertAssign — the hot path called per-column per-row during Scan. + +func BenchmarkConvertAssign_StringToString(b *testing.B) { + src := "hello world" + for i := 0; i < b.N; i++ { + var dest string + if err := convertAssign(&dest, src); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkConvertAssign_StringToBytes(b *testing.B) { + src := "hello world test data for benchmark" + for i := 0; i < b.N; i++ { + var dest []byte + if err := convertAssign(&dest, src); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkConvertAssign_BytesToString(b *testing.B) { + src := []byte("hello world test data for benchmark") + for i := 0; i < b.N; i++ { + var dest string + if err := convertAssign(&dest, src); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkConvertAssign_BytesToBytes(b *testing.B) { + src := []byte("hello world test data for benchmark") + for i := 0; i < b.N; i++ { + var dest []byte + if err := convertAssign(&dest, src); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkConvertAssign_Int64ToInt64(b *testing.B) { + src := int64(1234567890) + for i := 0; i < b.N; i++ { + var dest int64 + if err := convertAssign(&dest, src); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkConvertAssign_Int64ToString(b *testing.B) { + src := int64(1234567890) + for i := 0; i < b.N; i++ { + var dest string + if err := convertAssign(&dest, src); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkConvertAssign_Float64ToFloat64(b *testing.B) { + src := float64(3.14159265358979) + for i := 0; i < b.N; i++ { + var dest float64 + if err := convertAssign(&dest, src); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkConvertAssign_TimeToTime(b *testing.B) { + src := time.Now() + for i := 0; i < b.N; i++ { + var dest time.Time + if err := convertAssign(&dest, src); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkConvertAssign_TimeToString(b *testing.B) { + src := time.Now() + for i := 0; i < b.N; i++ { + var dest string + if err := convertAssign(&dest, src); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkConvertAssign_NilToBytes(b *testing.B) { + for i := 0; i < b.N; i++ { + var dest []byte + if err := convertAssign(&dest, nil); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkConvertAssign_Int64ToInterface(b *testing.B) { + src := int64(42) + for i := 0; i < b.N; i++ { + var dest interface{} + if err := convertAssign(&dest, src); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkConvertAssign_StringToRawBytes(b *testing.B) { + src := "hello world test data for scan benchmark" + for i := 0; i < b.N; i++ { + var dest sql.RawBytes + if err := convertAssign(&dest, src); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkConvertAssign_BoolToBool(b *testing.B) { + src := true + for i := 0; i < b.N; i++ { + var dest bool + if err := convertAssign(&dest, src); err != nil { + b.Fatal(err) + } + } +} diff --git a/integration_benchmark_test.go b/integration_benchmark_test.go new file mode 100644 index 00000000..ebe761ad --- /dev/null +++ b/integration_benchmark_test.go @@ -0,0 +1,394 @@ +package mssql + +import ( + "context" + "database/sql" + "fmt" + "strings" + "testing" + "time" + + "github.com/golang-sql/sqlexp" +) + +// Integration benchmarks exercise full end-to-end paths through the driver. +// They require a SQL Server connection (SQLSERVER_DSN or HOST/DATABASE env vars). +// Without a connection, they are skipped gracefully. + +func benchmarkDB(b *testing.B) *sql.DB { + b.Helper() + connector, err := NewConnector(makeConnStr(b).String()) + if err != nil { + b.Fatal("Open connection failed:", err.Error()) + } + db := sql.OpenDB(connector) + db.SetMaxOpenConns(1) + db.SetMaxIdleConns(1) + db.SetConnMaxLifetime(0) + db.SetConnMaxIdleTime(0) + b.Cleanup(func() { db.Close() }) + // Warm up the connection + if err := db.Ping(); err != nil { + b.Fatal("Ping failed:", err) + } + return db +} + +// benchmarkConn returns a pinned single connection for benchmarks that use +// session-scoped state like temp tables. +func benchmarkConn(b *testing.B) (*sql.Conn, context.Context) { + b.Helper() + connector, err := NewConnector(makeConnStr(b).String()) + if err != nil { + b.Fatal("Open connection failed:", err.Error()) + } + db := sql.OpenDB(connector) + db.SetMaxOpenConns(1) + db.SetMaxIdleConns(1) + db.SetConnMaxLifetime(0) + db.SetConnMaxIdleTime(0) + ctx := context.Background() + conn, err := db.Conn(ctx) + if err != nil { + b.Fatal("Conn failed:", err) + } + b.Cleanup(func() { + conn.Close() + db.Close() + }) + return conn, ctx +} + +func BenchmarkRoundTrip_ConnectDisconnect(b *testing.B) { + // Measures the full connection establishment cost: TCP + TLS + TDS login + auth + connStr := makeConnStr(b).String() + b.ResetTimer() + for i := 0; i < b.N; i++ { + connector, err := NewConnector(connStr) + if err != nil { + b.Fatal(err) + } + db := sql.OpenDB(connector) + db.SetMaxOpenConns(1) + if err := db.Ping(); err != nil { + b.Fatal(err) + } + db.Close() + } +} + +func BenchmarkRoundTrip_Select1(b *testing.B) { + db := benchmarkDB(b) + ctx := context.Background() + b.ResetTimer() + for i := 0; i < b.N; i++ { + var n int + if err := db.QueryRowContext(ctx, "SELECT 1").Scan(&n); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRoundTrip_ParamQuery(b *testing.B) { + db := benchmarkDB(b) + ctx := context.Background() + b.ResetTimer() + for i := 0; i < b.N; i++ { + var s string + err := db.QueryRowContext(ctx, "SELECT @p1 + @p2", + sql.Named("p1", "hello"), + sql.Named("p2", "world"), + ).Scan(&s) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRoundTrip_MultiRow(b *testing.B) { + db := benchmarkDB(b) + ctx := context.Background() + // Returns ~100 rows from system catalog + query := "SELECT TOP 100 object_id, name, type FROM sys.all_objects" + b.ResetTimer() + for i := 0; i < b.N; i++ { + rows, err := db.QueryContext(ctx, query) + if err != nil { + b.Fatal(err) + } + count := 0 + for rows.Next() { + var id int + var name, typ string + if err := rows.Scan(&id, &name, &typ); err != nil { + b.Fatal(err) + } + count++ + } + rows.Close() + if count == 0 { + b.Fatal("expected rows") + } + } +} + +func BenchmarkRoundTrip_LargeResultSet(b *testing.B) { + db := benchmarkDB(b) + ctx := context.Background() + // Cross join to get ~1000 rows + query := `SELECT TOP 1000 a.object_id, a.name + FROM sys.all_objects a CROSS JOIN sys.all_objects b` + b.ResetTimer() + for i := 0; i < b.N; i++ { + rows, err := db.QueryContext(ctx, query) + if err != nil { + b.Fatal(err) + } + count := 0 + for rows.Next() { + var id int + var name string + if err := rows.Scan(&id, &name); err != nil { + b.Fatal(err) + } + count++ + } + rows.Close() + if count != 1000 { + b.Fatalf("expected 1000 rows, got %d", count) + } + } +} + +func BenchmarkRoundTrip_ExecInsert(b *testing.B) { + conn, ctx := benchmarkConn(b) + _, err := conn.ExecContext(ctx, `CREATE TABLE #bench_insert ( + id INT IDENTITY PRIMARY KEY, + val NVARCHAR(100), + num INT, + ts DATETIME2 + )`) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := conn.ExecContext(ctx, + "INSERT INTO #bench_insert (val, num, ts) VALUES (@p1, @p2, @p3)", + fmt.Sprintf("row-%d", i), i, time.Now(), + ) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRoundTrip_StoredProc(b *testing.B) { + db := benchmarkDB(b) + ctx := context.Background() + // Use built-in sp_executesql as a stored procedure benchmark + b.ResetTimer() + for i := 0; i < b.N; i++ { + var n int + err := db.QueryRowContext(ctx, + "EXEC sp_executesql N'SELECT @val', N'@val INT', @val = @p1", + sql.Named("p1", 42), + ).Scan(&n) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRoundTrip_Transaction(b *testing.B) { + conn, ctx := benchmarkConn(b) + _, err := conn.ExecContext(ctx, `CREATE TABLE #bench_tx (id INT, val NVARCHAR(50))`) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + tx, err := conn.BeginTx(ctx, nil) + if err != nil { + b.Fatal(err) + } + _, err = tx.ExecContext(ctx, "INSERT INTO #bench_tx VALUES (@p1, @p2)", i, "txn-test") + if err != nil { + tx.Rollback() + b.Fatal(err) + } + if err := tx.Commit(); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRoundTrip_BulkInsert(b *testing.B) { + conn, ctx := benchmarkConn(b) + + for _, rowCount := range []int{100, 1000} { + b.Run(fmt.Sprintf("Rows_%d", rowCount), func(b *testing.B) { + for i := 0; i < b.N; i++ { + // Re-create table each iteration + _, err := conn.ExecContext(ctx, ` + IF OBJECT_ID('tempdb..#bench_bulk') IS NOT NULL DROP TABLE #bench_bulk; + CREATE TABLE #bench_bulk ( + id INT, + name NVARCHAR(100), + amount FLOAT, + created DATETIME2 + )`) + if err != nil { + b.Fatal(err) + } + + stmt, err := conn.PrepareContext(ctx, CopyIn("#bench_bulk", BulkOptions{}, "id", "name", "amount", "created")) + if err != nil { + b.Fatal(err) + } + now := time.Now() + for r := 0; r < rowCount; r++ { + _, err = stmt.Exec(r, fmt.Sprintf("name-%d", r), float64(r)*1.5, now) + if err != nil { + b.Fatal(err) + } + } + _, err = stmt.Exec() + if err != nil { + b.Fatal(err) + } + stmt.Close() + } + }) + } +} + +func BenchmarkRoundTrip_ConcurrentQueries(b *testing.B) { + connector, err := NewConnector(makeConnStr(b).String()) + if err != nil { + b.Fatal("Open connection failed:", err.Error()) + } + db := sql.OpenDB(connector) + db.SetMaxOpenConns(10) + db.SetMaxIdleConns(10) + b.Cleanup(func() { db.Close() }) + if err := db.Ping(); err != nil { + b.Fatal("Ping failed:", err) + } + ctx := context.Background() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + var n int + if err := db.QueryRowContext(ctx, "SELECT 1").Scan(&n); err != nil { + b.Fatal(err) + } + } + }) +} + +func BenchmarkRoundTrip_MixedTypes(b *testing.B) { + db := benchmarkDB(b) + ctx := context.Background() + query := `SELECT + CAST(12345 AS INT), + CAST(9876543210 AS BIGINT), + CAST(3.14159 AS FLOAT), + CAST('hello world' AS NVARCHAR(100)), + CAST(1 AS BIT), + CAST('2024-01-15T10:30:00' AS DATETIME2), + CAST(NULL AS NVARCHAR(50))` + b.ResetTimer() + for i := 0; i < b.N; i++ { + var ( + intVal int + bigintVal int64 + floatVal float64 + strVal string + boolVal bool + timeVal time.Time + nullVal sql.NullString + ) + err := db.QueryRowContext(ctx, query).Scan( + &intVal, &bigintVal, &floatVal, &strVal, &boolVal, &timeVal, &nullVal, + ) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRoundTrip_LargePayload(b *testing.B) { + db := benchmarkDB(b) + ctx := context.Background() + // 8KB string to test larger NVarChar handling + largeStr := strings.Repeat("abcdefgh", 1024) + b.ResetTimer() + for i := 0; i < b.N; i++ { + var result string + err := db.QueryRowContext(ctx, "SELECT @p1", sql.Named("p1", largeStr)).Scan(&result) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRoundTrip_PreparedStmt(b *testing.B) { + db := benchmarkDB(b) + ctx := context.Background() + stmt, err := db.PrepareContext(ctx, "SELECT @p1 + @p2") + if err != nil { + b.Fatal(err) + } + b.Cleanup(func() { stmt.Close() }) + b.ResetTimer() + for i := 0; i < b.N; i++ { + var n int + if err := stmt.QueryRowContext(ctx, 1, 2).Scan(&n); err != nil { + b.Fatal(err) + } + } +} + +// BenchmarkRoundTrip_MessageQuery exercises the sqlexp message-based query loop, +// which is a distinct code path from the standard Rows iteration. The query +// produces a mix of result sets, print messages, and errors so all message +// types are dispatched. +func BenchmarkRoundTrip_MessageQuery(b *testing.B) { + db := benchmarkDB(b) + ctx := context.Background() + const query = `select top 5 name from sys.system_columns +select getdate() +PRINT N'This is a message' +select 199 +RAISERROR (N'Testing!' , 11, 1) +declare @d int = 300 +select @d +` + b.ResetTimer() + for i := 0; i < b.N; i++ { + retmsg := &sqlexp.ReturnMessage{} + rows, err := db.QueryContext(ctx, query, retmsg) + if err != nil { + b.Fatal(err) + } + active := true + for active { + switch m := retmsg.Message(ctx).(type) { + case sqlexp.MsgNext: + for rows.Next() { + var d interface{} + if err := rows.Scan(&d); err != nil { + b.Fatal(err) + } + } + case sqlexp.MsgNextResultSet: + active = rows.NextResultSet() + case sqlexp.MsgError: + _ = m.Error + case sqlexp.MsgNotice, sqlexp.MsgRowsAffected: + } + } + rows.Close() + } +} diff --git a/msdsn/conn_str_benchmark_test.go b/msdsn/conn_str_benchmark_test.go new file mode 100644 index 00000000..eba6616e --- /dev/null +++ b/msdsn/conn_str_benchmark_test.go @@ -0,0 +1,33 @@ +package msdsn + +import "testing" + +func benchmarkParse(b *testing.B, dsn string) { + b.Helper() + for i := 0; i < b.N; i++ { + _, err := Parse(dsn) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkParse_URL(b *testing.B) { + benchmarkParse(b, "sqlserver://user:password@localhost:1433?database=mydb&encrypt=true&TrustServerCertificate=true&connection+timeout=30") +} + +func BenchmarkParse_URL_Azure(b *testing.B) { + benchmarkParse(b, "sqlserver://user:password@myserver.database.windows.net:1433?database=mydb&encrypt=true&TrustServerCertificate=false&connection+timeout=30&fedauth=ActiveDirectoryDefault") +} + +func BenchmarkParse_ADO(b *testing.B) { + benchmarkParse(b, "server=localhost;user id=sa;password=secret;database=mydb;encrypt=true;TrustServerCertificate=true;connection timeout=30") +} + +func BenchmarkParse_URL_Minimal(b *testing.B) { + benchmarkParse(b, "sqlserver://sa:pwd@localhost") +} + +func BenchmarkParse_URL_ManyParams(b *testing.B) { + benchmarkParse(b, "sqlserver://user:password@localhost:1433?database=mydb&encrypt=true&TrustServerCertificate=true&connection+timeout=30&dial+timeout=10&keepAlive=30&failoverpartner=mirror&failoverport=1434&packet+size=16384&log=63&app+name=myapp") +} diff --git a/mssql_go110_perf_test.go b/mssql_go110_perf_test.go index b9ebd29d..cef644da 100644 --- a/mssql_go110_perf_test.go +++ b/mssql_go110_perf_test.go @@ -36,12 +36,12 @@ func BenchmarkSelectWithTypeMismatch(b *testing.B) { if err != nil { b.Fatal("Query failed:", err.Error()) } - defer rows.Close() for rows.Next() { } if rows.Err() != nil { b.Fatal("Rows error:", rows.Err()) } + rows.Close() } }) b.Run("NoIntPromotion", func(b *testing.B) { @@ -50,12 +50,12 @@ func BenchmarkSelectWithTypeMismatch(b *testing.B) { if err != nil { b.Fatal("Query failed:", err.Error()) } - defer rows.Close() for rows.Next() { } if rows.Err() != nil { b.Fatal("Rows error:", rows.Err()) } + rows.Close() } }) diff --git a/rpc_benchmark_test.go b/rpc_benchmark_test.go new file mode 100644 index 00000000..06850be7 --- /dev/null +++ b/rpc_benchmark_test.go @@ -0,0 +1,137 @@ +package mssql + +import ( + "bytes" + "encoding/binary" + "io" + "testing" + + "github.com/microsoft/go-mssqldb/msdsn" +) + +// Benchmarks for RPC parameter encoding — the hot path for every parameterized query. + +func BenchmarkWriteTypeInfo_Int8(b *testing.B) { + ti := typeInfo{TypeId: typeIntN, Size: 8} + buf := new(bytes.Buffer) + enc := msdsn.EncodeParameters{} + + for i := 0; i < b.N; i++ { + buf.Reset() + if err := writeTypeInfo(buf, &ti, false, enc); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkWriteTypeInfo_NVarChar(b *testing.B) { + ti := typeInfo{TypeId: typeNVarChar, Size: 100} + buf := new(bytes.Buffer) + enc := msdsn.EncodeParameters{} + + for i := 0; i < b.N; i++ { + buf.Reset() + if err := writeTypeInfo(buf, &ti, false, enc); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkWriteTypeInfo_NVarCharMax(b *testing.B) { + ti := typeInfo{TypeId: typeNVarChar, Size: 0} + buf := new(bytes.Buffer) + enc := msdsn.EncodeParameters{} + + for i := 0; i < b.N; i++ { + buf.Reset() + if err := writeTypeInfo(buf, &ti, false, enc); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkWriteByteLenType(b *testing.B) { + ti := typeInfo{TypeId: typeIntN, Size: 8} + data := make([]byte, 8) + binary.LittleEndian.PutUint64(data, 1234567890) + buf := new(bytes.Buffer) + enc := msdsn.EncodeParameters{} + + for i := 0; i < b.N; i++ { + buf.Reset() + if err := writeByteLenType(buf, ti, data, enc); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkWriteShortLenType(b *testing.B) { + ti := typeInfo{TypeId: typeNVarChar, Size: 100} + data := make([]byte, 100) + for i := range data { + data[i] = byte(i) + } + buf := new(bytes.Buffer) + enc := msdsn.EncodeParameters{} + + for i := 0; i < b.N; i++ { + buf.Reset() + if err := writeShortLenType(buf, ti, data, enc); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkWritePLPType_Short(b *testing.B) { + ti := typeInfo{TypeId: typeNVarChar, Size: 0} + // 50-char string in UTF-16LE = 100 bytes + data := make([]byte, 100) + for i := 0; i < 50; i++ { + data[i*2] = byte('A' + (i % 26)) + data[i*2+1] = 0 + } + buf := new(bytes.Buffer) + enc := msdsn.EncodeParameters{} + + for i := 0; i < b.N; i++ { + buf.Reset() + if err := writePLPType(buf, ti, data, enc); err != nil { + b.Fatal(err) + } + } +} + +type discardRWC struct{} + +func (discardRWC) Read([]byte) (int, error) { return 0, io.EOF } +func (discardRWC) Write(p []byte) (int, error) { return len(p), nil } +func (discardRWC) Close() error { return nil } + +func BenchmarkSendRpc_SingleIntParam(b *testing.B) { + paramData := make([]byte, 8) + binary.LittleEndian.PutUint64(paramData, 42) + + params := []param{ + { + Name: "@p1", + Flags: 0, + ti: typeInfo{ + TypeId: typeIntN, + Size: 8, + Writer: writeByteLenType, + }, + buffer: paramData, + }, + } + headers := []headerStruct{ + {hdrtype: dataStmHdrTransDescr, data: transDescrHdr{0, 1}.pack()}, + } + enc := msdsn.EncodeParameters{} + + for i := 0; i < b.N; i++ { + tdsBuf := newTdsBuffer(defaultPacketSize, discardRWC{}) + if err := sendRpc(tdsBuf, headers, sp_ExecuteSql, 0, params, false, enc); err != nil { + b.Fatal(err) + } + } +} diff --git a/tds_benchmark_test.go b/tds_benchmark_test.go new file mode 100644 index 00000000..13303a55 --- /dev/null +++ b/tds_benchmark_test.go @@ -0,0 +1,118 @@ +package mssql + +import ( + "testing" +) + +// Benchmarks for TDS string encoding/decoding and login packet construction. + +func BenchmarkStr2ucs2_Short(b *testing.B) { + s := "master" + b.SetBytes(int64(len(s))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + sideeffect = str2ucs2(s) + } +} + +func BenchmarkStr2ucs2_Medium(b *testing.B) { + s := "server.database.windows.net" + b.SetBytes(int64(len(s))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + sideeffect = str2ucs2(s) + } +} + +func BenchmarkStr2ucs2_Long(b *testing.B) { + s := "This is a longer string that might appear in connection strings or query text for benchmark purposes" + b.SetBytes(int64(len(s))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + sideeffect = str2ucs2(s) + } +} + +func BenchmarkUcs22str_ASCII(b *testing.B) { + // Pure ASCII — exercises the fast path + input := str2ucs2("SELECT * FROM dbo.Users WHERE id = 1") + b.SetBytes(int64(len(input))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + sideeffect, _ = ucs22str(input) + } +} + +func BenchmarkUcs22str_Unicode(b *testing.B) { + // Contains non-ASCII characters — exercises the slow path + input := str2ucs2("Ñoño données über Straße") + b.SetBytes(int64(len(input))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + sideeffect, _ = ucs22str(input) + } +} + +func BenchmarkUcs22str_LongASCII(b *testing.B) { + input := str2ucs2("SELECT col1, col2, col3, col4, col5 FROM schema.very_long_table_name WHERE condition1 = 1 AND condition2 = 2 ORDER BY col1 DESC") + b.SetBytes(int64(len(input))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + sideeffect, _ = ucs22str(input) + } +} + +func BenchmarkManglePassword_Short(b *testing.B) { + pw := "P@ssw0rd" + b.ResetTimer() + for i := 0; i < b.N; i++ { + sideeffect = manglePassword(pw) + } +} + +func BenchmarkManglePassword_Long(b *testing.B) { + pw := "ThisIsAVeryLongAndComplexP@ssw0rd!2024#Secure" + b.ResetTimer() + for i := 0; i < b.N; i++ { + sideeffect = manglePassword(pw) + } +} + +func BenchmarkSendLogin(b *testing.B) { + buf := newTdsBuffer(4096, discardTransport{}) + l := &login{ + TDSVersion: verTDS74, + PacketSize: 4096, + ClientProgVer: 0x07000000, + ClientPID: 1234, + HostName: "WORKSTATION", + UserName: "testuser", + Password: "P@ssw0rd!", + AppName: "go-mssqldb-benchmark", + ServerName: "localhost", + CtlIntName: "go-mssqldb", + Language: "", + Database: "master", + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + sendLogin(buf, l) + } +} + +func BenchmarkWritePrelogin(b *testing.B) { + buf := newTdsBuffer(4096, discardTransport{}) + fields := map[uint8][]byte{ + preloginVERSION: {0x10, 0x00, 0x00, 0x00, 0x00, 0x00}, + preloginENCRYPTION: {encryptOn}, + preloginINSTOPT: {0x00}, + preloginTHREADID: {0x00, 0x00, 0x00, 0x00}, + preloginMARS: {0x00}, + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + writePrelogin(packPrelogin, buf, fields) + } +} diff --git a/token_benchmark_test.go b/token_benchmark_test.go new file mode 100644 index 00000000..ae9d8d70 --- /dev/null +++ b/token_benchmark_test.go @@ -0,0 +1,234 @@ +package mssql + +import ( + "encoding/binary" + "testing" +) + +// Benchmarks for token parsing functions — these decode every response token from the wire. + +func makeTdsBufferFromBytes(data []byte) *tdsBuffer { + buf := newTdsBuffer(uint16(len(data)+8), nil) + copy(buf.rbuf[0:], data) + buf.rpos = 0 + buf.rsize = len(data) + buf.final = true + return buf +} + +func BenchmarkParseDone(b *testing.B) { + // doneStruct: Status(2) + CurCmd(2) + RowCount(8) = 12 bytes + data := make([]byte, 12) + binary.LittleEndian.PutUint16(data[0:], doneCount) // Status: has row count + binary.LittleEndian.PutUint16(data[2:], cmdSelect) // CurCmd + binary.LittleEndian.PutUint64(data[4:], 42) // RowCount + + buf := makeTdsBufferFromBytes(data) + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.rpos = 0 + parseDone(buf) + } +} + +func BenchmarkParseDoneInProc(b *testing.B) { + data := make([]byte, 12) + binary.LittleEndian.PutUint16(data[0:], doneMore|doneCount) + binary.LittleEndian.PutUint16(data[2:], cmdSelect) + binary.LittleEndian.PutUint64(data[4:], 100) + + buf := makeTdsBufferFromBytes(data) + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.rpos = 0 + parseDoneInProc(buf) + } +} + +func BenchmarkParseReturnStatus(b *testing.B) { + data := make([]byte, 4) + binary.LittleEndian.PutUint32(data[0:], 0) // return status 0 + + buf := makeTdsBufferFromBytes(data) + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.rpos = 0 + parseReturnStatus(buf) + } +} + +func BenchmarkParseOrder(b *testing.B) { + // 4 columns in ORDER BY + numCols := 4 + data := make([]byte, 2+numCols*2) + binary.LittleEndian.PutUint16(data[0:], uint16(numCols*2)) + for i := 0; i < numCols; i++ { + binary.LittleEndian.PutUint16(data[2+i*2:], uint16(i+1)) + } + + buf := makeTdsBufferFromBytes(data) + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.rpos = 0 + parseOrder(buf) + } +} + +func BenchmarkParseError72(b *testing.B) { + // Build a realistic error token payload: + // Length(2) + Number(4) + State(1) + Class(1) + Message(UsVarChar) + ServerName(BVarChar) + ProcName(BVarChar) + LineNo(4) + msg := str2ucs2("Login failed for user 'sa'.") + server := str2ucs2("MYSERVER") + proc := str2ucs2("") + + // UsVarChar: uint16 length (in chars) + UTF-16 data + // BVarChar: byte length (in chars) + UTF-16 data + msgChars := len(msg) / 2 + serverChars := len(server) / 2 + procChars := len(proc) / 2 + + payloadSize := 4 + 1 + 1 + 2 + len(msg) + 1 + len(server) + 1 + len(proc) + 4 + data := make([]byte, 2+payloadSize) + binary.LittleEndian.PutUint16(data[0:], uint16(payloadSize)) + off := 2 + binary.LittleEndian.PutUint32(data[off:], 18456) // Error number + off += 4 + data[off] = 1 // State + off++ + data[off] = 14 // Class (severity) + off++ + // Message (UsVarChar) + binary.LittleEndian.PutUint16(data[off:], uint16(msgChars)) + off += 2 + copy(data[off:], msg) + off += len(msg) + // ServerName (BVarChar) + data[off] = byte(serverChars) + off++ + copy(data[off:], server) + off += len(server) + // ProcName (BVarChar) + data[off] = byte(procChars) + off++ + copy(data[off:], proc) + off += len(proc) + // LineNo + binary.LittleEndian.PutUint32(data[off:], 1) + + buf := makeTdsBufferFromBytes(data) + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.rpos = 0 + parseError72(buf) + } +} + +func BenchmarkParseInfo(b *testing.B) { + // Same structure as parseError72 but with informational message + msg := str2ucs2("Changed database context to 'master'.") + server := str2ucs2("MYSERVER") + proc := str2ucs2("") + + msgChars := len(msg) / 2 + serverChars := len(server) / 2 + procChars := len(proc) / 2 + + payloadSize := 4 + 1 + 1 + 2 + len(msg) + 1 + len(server) + 1 + len(proc) + 4 + data := make([]byte, 2+payloadSize) + binary.LittleEndian.PutUint16(data[0:], uint16(payloadSize)) + off := 2 + binary.LittleEndian.PutUint32(data[off:], 5701) // Info number + off += 4 + data[off] = 2 // State + off++ + data[off] = 0 // Class + off++ + binary.LittleEndian.PutUint16(data[off:], uint16(msgChars)) + off += 2 + copy(data[off:], msg) + off += len(msg) + data[off] = byte(serverChars) + off++ + copy(data[off:], server) + off += len(server) + data[off] = byte(procChars) + off++ + copy(data[off:], proc) + off += len(proc) + binary.LittleEndian.PutUint32(data[off:], 0) + + buf := makeTdsBufferFromBytes(data) + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.rpos = 0 + parseInfo(buf) + } +} + +func BenchmarkParseLoginAck(b *testing.B) { + // loginAckStruct: size(2) + Interface(1) + TDSVersion(4) + ProgNameLen(1) + ProgName(UTF-16) + ProgVer(4) + progName := str2ucs2("Microsoft SQL Server") + progNameChars := len(progName) / 2 + payloadSize := 1 + 4 + 1 + len(progName) + 4 + data := make([]byte, 2+payloadSize) + binary.LittleEndian.PutUint16(data[0:], uint16(payloadSize)) + off := 2 + data[off] = 1 // Interface: SQL_DFLT + off++ + binary.BigEndian.PutUint32(data[off:], 0x74000004) // TDS 7.4 + off += 4 + data[off] = byte(progNameChars) + off++ + copy(data[off:], progName) + off += len(progName) + binary.BigEndian.PutUint32(data[off:], 0x10000000) // ProgVer 16.0.0.0 + + buf := makeTdsBufferFromBytes(data) + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.rpos = 0 + parseLoginAck(buf) + } +} + +func BenchmarkParseFeatureExtAck_Empty(b *testing.B) { + // Just a terminator byte + data := []byte{featExtTERMINATOR} + + buf := makeTdsBufferFromBytes(data) + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.rpos = 0 + parseFeatureExtAck(buf) + } +} + +func BenchmarkParseTabName(b *testing.B) { + // Simulate a TABNAME token with table name data + tableName := str2ucs2("dbo.Users") + data := make([]byte, 2+len(tableName)) + binary.LittleEndian.PutUint16(data[0:], uint16(len(tableName))) + copy(data[2:], tableName) + + buf := makeTdsBufferFromBytes(data) + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.rpos = 0 + parseTabName(buf) + } +} + +func BenchmarkParseColInfo(b *testing.B) { + // Simulate COLINFO with some column info bytes + colInfoData := make([]byte, 20) // arbitrary column info + data := make([]byte, 2+len(colInfoData)) + binary.LittleEndian.PutUint16(data[0:], uint16(len(colInfoData))) + copy(data[2:], colInfoData) + + buf := makeTdsBufferFromBytes(data) + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.rpos = 0 + parseColInfo(buf) + } +} diff --git a/types_benchmark_test.go b/types_benchmark_test.go new file mode 100644 index 00000000..775781bd --- /dev/null +++ b/types_benchmark_test.go @@ -0,0 +1,298 @@ +package mssql + +import ( + "encoding/binary" + "math" + "testing" + "time" + + "github.com/microsoft/go-mssqldb/internal/cp" + "github.com/microsoft/go-mssqldb/msdsn" +) + +// Benchmarks for type reading functions — the hot path for decoding every column value from the wire. + +var sideeffect interface{} + +func benchmarkEncoding() msdsn.EncodeParameters { + return msdsn.EncodeParameters{Timezone: time.UTC} +} + +func BenchmarkReadFixedType_Int64(b *testing.B) { + ti := typeInfo{TypeId: typeInt8, Size: 8, Buffer: make([]byte, 8)} + binary.LittleEndian.PutUint64(ti.Buffer, 1234567890) + ti.Reader = readFixedType + + buf := newTdsBuffer(512, nil) + enc := benchmarkEncoding() + + for i := 0; i < b.N; i++ { + // Simulate reading from buffer by resetting the pre-filled buffer + copy(buf.rbuf[:8], ti.Buffer) + buf.rpos = 0 + buf.rsize = 8 + sideeffect = ti.Reader(&ti, buf, nil, enc) + } +} + +func BenchmarkReadFixedType_Int32(b *testing.B) { + ti := typeInfo{TypeId: typeInt4, Size: 4, Buffer: make([]byte, 4)} + binary.LittleEndian.PutUint32(ti.Buffer, 42) + ti.Reader = readFixedType + + buf := newTdsBuffer(512, nil) + enc := benchmarkEncoding() + + for i := 0; i < b.N; i++ { + copy(buf.rbuf[:4], ti.Buffer) + buf.rpos = 0 + buf.rsize = 4 + sideeffect = ti.Reader(&ti, buf, nil, enc) + } +} + +func BenchmarkReadFixedType_Float64(b *testing.B) { + ti := typeInfo{TypeId: typeFlt8, Size: 8, Buffer: make([]byte, 8)} + binary.LittleEndian.PutUint64(ti.Buffer, math.Float64bits(3.14159)) + ti.Reader = readFixedType + + buf := newTdsBuffer(512, nil) + enc := benchmarkEncoding() + + for i := 0; i < b.N; i++ { + copy(buf.rbuf[:8], ti.Buffer) + buf.rpos = 0 + buf.rsize = 8 + sideeffect = ti.Reader(&ti, buf, nil, enc) + } +} + +func BenchmarkReadFixedType_DateTime(b *testing.B) { + ti := typeInfo{TypeId: typeDateTime, Size: 8, Buffer: make([]byte, 8)} + // Encode a valid datetime + binary.LittleEndian.PutUint32(ti.Buffer[0:4], 43000) // days since 1900 + binary.LittleEndian.PutUint32(ti.Buffer[4:8], 9000000) + ti.Reader = readFixedType + + buf := newTdsBuffer(512, nil) + enc := benchmarkEncoding() + + for i := 0; i < b.N; i++ { + copy(buf.rbuf[:8], ti.Buffer) + buf.rpos = 0 + buf.rsize = 8 + sideeffect = ti.Reader(&ti, buf, nil, enc) + } +} + +func BenchmarkReadByteLenType_IntN_8(b *testing.B) { + // Simulate reading an 8-byte INTNTYPE (int64) + data := make([]byte, 9) // 1 byte length + 8 bytes data + data[0] = 8 + binary.LittleEndian.PutUint64(data[1:], 9876543210) + + ti := typeInfo{TypeId: typeIntN, Size: 8, Buffer: make([]byte, 8)} + ti.Reader = readByteLenTypeWithEncoding + + buf := newTdsBuffer(512, nil) + enc := benchmarkEncoding() + + for i := 0; i < b.N; i++ { + copy(buf.rbuf[:9], data) + buf.rpos = 0 + buf.rsize = 9 + sideeffect = ti.Reader(&ti, buf, nil, enc) + } +} + +func BenchmarkReadByteLenType_IntN_4(b *testing.B) { + data := make([]byte, 5) // 1 byte length + 4 bytes data + data[0] = 4 + binary.LittleEndian.PutUint32(data[1:], 12345) + + ti := typeInfo{TypeId: typeIntN, Size: 4, Buffer: make([]byte, 4)} + ti.Reader = readByteLenTypeWithEncoding + + buf := newTdsBuffer(512, nil) + enc := benchmarkEncoding() + + for i := 0; i < b.N; i++ { + copy(buf.rbuf[:5], data) + buf.rpos = 0 + buf.rsize = 5 + sideeffect = ti.Reader(&ti, buf, nil, enc) + } +} + +func BenchmarkReadByteLenType_FloatN_8(b *testing.B) { + data := make([]byte, 9) + data[0] = 8 + binary.LittleEndian.PutUint64(data[1:], math.Float64bits(2.71828)) + + ti := typeInfo{TypeId: typeFltN, Size: 8, Buffer: make([]byte, 8)} + ti.Reader = readByteLenTypeWithEncoding + + buf := newTdsBuffer(512, nil) + enc := benchmarkEncoding() + + for i := 0; i < b.N; i++ { + copy(buf.rbuf[:9], data) + buf.rpos = 0 + buf.rsize = 9 + sideeffect = ti.Reader(&ti, buf, nil, enc) + } +} + +func BenchmarkReadByteLenType_BitN(b *testing.B) { + data := []byte{1, 1} // length=1, value=true + + ti := typeInfo{TypeId: typeBitN, Size: 1, Buffer: make([]byte, 1)} + ti.Reader = readByteLenTypeWithEncoding + + buf := newTdsBuffer(512, nil) + enc := benchmarkEncoding() + + for i := 0; i < b.N; i++ { + copy(buf.rbuf[:2], data) + buf.rpos = 0 + buf.rsize = 2 + sideeffect = ti.Reader(&ti, buf, nil, enc) + } +} + +func BenchmarkReadByteLenType_Null(b *testing.B) { + data := []byte{0} // length=0 means NULL + + ti := typeInfo{TypeId: typeIntN, Size: 8, Buffer: make([]byte, 8)} + ti.Reader = readByteLenTypeWithEncoding + + buf := newTdsBuffer(512, nil) + enc := benchmarkEncoding() + + for i := 0; i < b.N; i++ { + copy(buf.rbuf[:1], data) + buf.rpos = 0 + buf.rsize = 1 + sideeffect = ti.Reader(&ti, buf, nil, enc) + } +} + +func BenchmarkReadShortLenType_NVarChar_Short(b *testing.B) { + // "hello" in UTF-16LE = 10 bytes + str := []byte{0x68, 0x00, 0x65, 0x00, 0x6c, 0x00, 0x6c, 0x00, 0x6f, 0x00} + data := make([]byte, 2+len(str)) + binary.LittleEndian.PutUint16(data, uint16(len(str))) + copy(data[2:], str) + + ti := typeInfo{TypeId: typeNVarChar, Size: 100, Buffer: make([]byte, 100)} + ti.Reader = readShortLenType + + buf := newTdsBuffer(512, nil) + enc := benchmarkEncoding() + + for i := 0; i < b.N; i++ { + copy(buf.rbuf[:len(data)], data) + buf.rpos = 0 + buf.rsize = len(data) + sideeffect = ti.Reader(&ti, buf, nil, enc) + } +} + +func BenchmarkReadShortLenType_NVarChar_Medium(b *testing.B) { + // 50-char ASCII string in UTF-16LE = 100 bytes + str := make([]byte, 100) + for i := 0; i < 50; i++ { + str[i*2] = byte('A' + (i % 26)) + str[i*2+1] = 0 + } + data := make([]byte, 2+len(str)) + binary.LittleEndian.PutUint16(data, uint16(len(str))) + copy(data[2:], str) + + ti := typeInfo{TypeId: typeNVarChar, Size: 200, Buffer: make([]byte, 200)} + ti.Reader = readShortLenType + + buf := newTdsBuffer(512, nil) + enc := benchmarkEncoding() + + for i := 0; i < b.N; i++ { + copy(buf.rbuf[:len(data)], data) + buf.rpos = 0 + buf.rsize = len(data) + sideeffect = ti.Reader(&ti, buf, nil, enc) + } +} + +func BenchmarkReadShortLenType_VarBinary(b *testing.B) { + // 64 bytes of binary data + str := make([]byte, 64) + for i := range str { + str[i] = byte(i) + } + data := make([]byte, 2+len(str)) + binary.LittleEndian.PutUint16(data, uint16(len(str))) + copy(data[2:], str) + + ti := typeInfo{TypeId: typeBigVarBin, Size: 100, Buffer: make([]byte, 100)} + ti.Reader = readShortLenType + + buf := newTdsBuffer(512, nil) + enc := benchmarkEncoding() + + for i := 0; i < b.N; i++ { + copy(buf.rbuf[:len(data)], data) + buf.rpos = 0 + buf.rsize = len(data) + sideeffect = ti.Reader(&ti, buf, nil, enc) + } +} + +func BenchmarkReadByteLenType_VarChar(b *testing.B) { + // "hello world" as ASCII with default collation + str := []byte("hello world") + data := make([]byte, 1+len(str)) + data[0] = byte(len(str)) + copy(data[1:], str) + + ti := typeInfo{TypeId: typeVarChar, Size: 50, Buffer: make([]byte, 50), Collation: cp.Collation{LcidAndFlags: 0x0409}} + ti.Reader = readByteLenTypeWithEncoding + + buf := newTdsBuffer(512, nil) + enc := benchmarkEncoding() + + for i := 0; i < b.N; i++ { + copy(buf.rbuf[:len(data)], data) + buf.rpos = 0 + buf.rsize = len(data) + sideeffect = ti.Reader(&ti, buf, nil, enc) + } +} + +func BenchmarkDecodeDateTime(b *testing.B) { + buf := make([]byte, 8) + binary.LittleEndian.PutUint32(buf[0:4], 43000) + binary.LittleEndian.PutUint32(buf[4:8], 9000000) + loc := time.UTC + + for i := 0; i < b.N; i++ { + sideeffect = decodeDateTime(buf, loc) + } +} + +func BenchmarkDecodeDateTim4(b *testing.B) { + buf := make([]byte, 4) + binary.LittleEndian.PutUint16(buf[0:2], 20000) + binary.LittleEndian.PutUint16(buf[2:4], 720) + loc := time.UTC + + for i := 0; i < b.N; i++ { + sideeffect = decodeDateTim4(buf, loc) + } +} + +func BenchmarkEncodeDateTime(b *testing.B) { + t := time.Date(2024, 6, 15, 14, 30, 45, 123456789, time.UTC) + for i := 0; i < b.N; i++ { + sideeffect = encodeDateTime(t) + } +}