diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index ee407ea44..8243449ba 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -98,9 +98,10 @@ jobs: run: | docker run -d --name ojp-server \ --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ rrobetti/ojp:0.4.1-SNAPSHOT + - name: Wait for ojp-server to start run: sleep 10 @@ -117,11 +118,12 @@ jobs: # This is the key test step - only H2 tests are enabled # All other database tests are disabled by default - name: Test (ojp-jdbc-driver) with H2 only - run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableH2Tests=true + run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableH2Tests=true -DenableH2PrefetchCacheTests=true - name: Show ojp-server.log if: always() # ensures it runs even if previous steps fail - run: docker logs ojp-server 2>&1 || echo "ojp-server container not found" + run: | + docker logs ojp-server 2>&1 || echo "ojp-server container not found" # =========================================================================== # JOB 2: PostgreSQL Integration Tests @@ -205,7 +207,7 @@ jobs: run: | docker run -d --name ojp-server \ --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ rrobetti/ojp:0.4.1-SNAPSHOT # Start second OJP server WITH SQL enhancer enabled in OPTIMIZE mode @@ -235,7 +237,7 @@ jobs: # Run PostgreSQL-specific tests with -DenablePostgresTests flag - name: Test (ojp-jdbc-driver) with PostgreSQL enabled - run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenablePostgresTests=true + run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenablePostgresTests=true -DenablePostgresPrefetchCacheTests=true # =================================================================== # SQL Enhancer Integration Test @@ -341,9 +343,10 @@ jobs: run: | docker run -d --name ojp-server \ --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ rrobetti/ojp:0.4.1-SNAPSHOT + - name: Wait for ojp-server to start run: sleep 10 @@ -359,11 +362,12 @@ jobs: # Run MySQL-specific tests with -DenableMySQLTests flag - name: Test (ojp-jdbc-driver) with MySQL enabled - run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableMySQLTests=true + run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableMySQLTests=true -DenableMySQLPrefetchCacheTests=true - name: Show ojp-server.log if: always() - run: docker logs ojp-server 2>&1 || echo "ojp-server container not found" + run: | + docker logs ojp-server 2>&1 || echo "ojp-server container not found" # =========================================================================== # =========================================================================== @@ -437,9 +441,10 @@ jobs: run: | docker run -d --name ojp-server \ --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ rrobetti/ojp:0.4.1-SNAPSHOT + - name: Wait for ojp-server to start run: sleep 10 @@ -455,11 +460,12 @@ jobs: # Run MariaDB-specific tests with -DenableMariaDBTests flag - name: Test (ojp-jdbc-driver) with MariaDB enabled - run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableMariaDBTests=true + run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableMariaDBTests=true -DenableMariaDBPrefetchCacheTests=true - name: Show ojp-server.log if: always() - run: docker logs ojp-server 2>&1 || echo "ojp-server container not found" + run: | + docker logs ojp-server 2>&1 || echo "ojp-server container not found" # =========================================================================== # JOB 5: CockroachDB Integration Tests @@ -524,9 +530,10 @@ jobs: run: | docker run -d --name ojp-server \ --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ rrobetti/ojp:0.4.1-SNAPSHOT + - name: Wait for ojp-server to start run: sleep 10 @@ -542,11 +549,12 @@ jobs: # Run CockroachDB-specific tests with -DenableCockroachDBTests flag - name: Test (ojp-jdbc-driver) with CockroachDB enabled - run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableCockroachDBTests=true + run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableCockroachDBTests=true -DenableCockroachDBPrefetchCacheTests=true - name: Show ojp-server.log if: always() - run: docker logs ojp-server 2>&1 || echo "ojp-server container not found" + run: | + docker logs ojp-server 2>&1 || echo "ojp-server container not found" # =========================================================================== # JOB 6: DB2 Integration Tests @@ -692,9 +700,10 @@ jobs: run: | docker run -d --name ojp-server \ --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ rrobetti/ojp:0.4.1-SNAPSHOT + - name: Wait for ojp-server to start run: sleep 10 @@ -710,11 +719,12 @@ jobs: # Run DB2-specific tests with -DenableDb2Tests flag - name: Test (ojp-jdbc-driver) with DB2 enabled - run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableDb2Tests=true + run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableDb2Tests=true -DenableDb2PrefetchCacheTests=true - name: Show ojp-server.log if: always() - run: docker logs ojp-server 2>&1 || echo "ojp-server container not found" + run: | + docker logs ojp-server 2>&1 || echo "ojp-server container not found" # =========================================================================== # JOB 7: Multinode Integration Tests @@ -1688,9 +1698,10 @@ jobs: run: | docker run -d --name ojp-server \ --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ rrobetti/ojp:0.4.1-SNAPSHOT + - name: Wait for ojp-server to start run: sleep 10 @@ -1706,11 +1717,12 @@ jobs: # Run Oracle-specific tests with -DenableOracleTests flag - name: Test (ojp-jdbc-driver) with Oracle enabled - run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableOracleTests=true + run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableOracleTests=true -DenableOraclePrefetchCacheTests=true - name: Show ojp-server.log if: always() # ensures it runs even if previous steps fail - run: docker logs ojp-server 2>&1 || echo "ojp-server container not found" + run: | + docker logs ojp-server 2>&1 || echo "ojp-server container not found" # =========================================================================== # JOB 10: SQL Server Integration Tests @@ -1791,9 +1803,10 @@ jobs: run: | docker run -d --name ojp-server \ --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ rrobetti/ojp:0.4.1-SNAPSHOT + - name: Wait for ojp-server to start run: sleep 10 @@ -1814,11 +1827,13 @@ jobs: run: | mvn test -pl ojp-jdbc-driver -Dgpg.skip=true \ -DenableSqlServerTests=true \ + -DenableSqlServerPrefetchCacheTests=true \ -Dtest="SQLServer*" - name: Show ojp-server.log if: always() # ensures it runs even if previous steps fail - run: docker logs ojp-server 2>&1 || echo "ojp-server container not found" + run: | + docker logs ojp-server 2>&1 || echo "ojp-server container not found" # =========================================================================== # JOB 11: Notify Integration Repository diff --git a/documents/README.md b/documents/README.md index d09d01014..67391cfba 100644 --- a/documents/README.md +++ b/documents/README.md @@ -56,6 +56,13 @@ Located in [connection-pool/](connection-pool/): Located in [analysis/](analysis/): - [Transaction Isolation Handling](analysis/TRANSACTION_ISOLATION_HANDLING.md) - Complete technical documentation on transaction isolation reset behavior +## Features + +Located in [features/](features/): +- [Next-Page Prefetch Cache](features/NEXT_PAGE_PREFETCH_CACHE.md) - Transparent background pre-fetching of the next query page to eliminate round-trip latency in paginated result sets +- [SQL Enhancer Engine Quickstart](features/SQL_ENHANCER_ENGINE_QUICKSTART.md) - SQL optimisation using Apache Calcite (experimental) +- [SQL Enhancer Configuration Examples](features/SQL_ENHANCER_CONFIGURATION_EXAMPLES.md) - Configuration examples for the SQL enhancer + ## Database Setup Guides Located in [environment-setup/](environment-setup/): @@ -158,6 +165,7 @@ documents/ ├── contributor-badges/ # Recognition program ├── designs/ # Design documents ├── environment-setup/ # Database setup guides +├── features/ # Feature guides and documentation ├── fixed-issues/ # Issue fix documentation ├── guides/ # Developer guides ├── images/ # Diagrams and images diff --git a/documents/configuration/ojp-server-configuration.md b/documents/configuration/ojp-server-configuration.md index 48cd1dd1d..bde1b77cc 100644 --- a/documents/configuration/ojp-server-configuration.md +++ b/documents/configuration/ojp-server-configuration.md @@ -160,6 +160,79 @@ For full integration examples including Docker Compose setups, see the **[Teleme | `ojp.server.slowQuerySegregation.slowSlotTimeout` | `OJP_SERVER_SLOWQUERYSEGREGATION_SLOWSLOTTIMEOUT` | long | 120000 | Timeout for acquiring slow operation slots (ms) | 0.2.0-beta | | `ojp.server.slowQuerySegregation.fastSlotTimeout` | `OJP_SERVER_SLOWQUERYSEGREGATION_FASTSLOTTIMEOUT` | long | 60000 | Timeout for acquiring fast operation slots (ms) | 0.2.0-beta | +### Next-Page Prefetch Cache Settings + +The prefetch cache transparently pre-executes the **next page query** in the background while the current page is being sent to the client. When the client requests the next page, the rows are served from memory instead of hitting the database again, significantly reducing perceived latency for paginated result sets. + +The cache detects SQL pagination clauses automatically (`LIMIT/OFFSET`, `OFFSET … FETCH`, `FETCH FIRST … ROWS ONLY`, MySQL `LIMIT m, n`, and standalone `LIMIT n`). + +> **Two-tier configuration model:** +> The cache uses a two-tier configuration model. The **server administrator** enables the feature globally and tunes its resource limits (TTL, max entries, timeouts). Each **client application** then controls, per datasource, whether that datasource uses the cache — without requiring a server restart. See the client-side settings below. + +| Property | Environment Variable | Type | Default | Description | Since | +|---|---|---|---|---|---| +| `ojp.server.nextPageCache.enabled` | `OJP_SERVER_NEXTPAGECACHE_ENABLED` | boolean | false | Enable/disable the next-page prefetch cache globally | 0.4.1 | +| `ojp.server.nextPageCache.ttlSeconds` | `OJP_SERVER_NEXTPAGECACHE_TTLSECONDS` | long | 60 | Maximum time (seconds) a cached page is kept before being discarded | 0.4.1 | +| `ojp.server.nextPageCache.maxEntries` | `OJP_SERVER_NEXTPAGECACHE_MAXENTRIES` | int | 100 | Maximum number of cache entries across all datasources | 0.4.1 | +| `ojp.server.nextPageCache.prefetchWaitTimeoutMs` | `OJP_SERVER_NEXTPAGECACHE_PREFETCHWAITTIMEOUTMS` | long | 5000 | Maximum time (ms) to wait for a prefetch to complete before falling back to a live query | 0.4.1 | +| `ojp.server.nextPageCache.cleanupIntervalSeconds` | `OJP_SERVER_NEXTPAGECACHE_CLEANUPINTERVALSECONDS` | long | 60 | Interval (seconds) at which the background cleanup thread evicts expired entries | 0.4.1 | +| `ojp.server.nextPageCache.datasource..prefetchWaitTimeoutMs` | *(no env-var equivalent)* | long | *(global default)* | Per-datasource override for `prefetchWaitTimeoutMs`; `` matches `ojp.datasource.name` on the client | 0.4.1 | + +> **Per-datasource `enabled` is a client-side setting.** +> Each datasource in the client application can independently opt in or out of the prefetch cache +> by setting `ojp.nextPageCache.enabled` in its `ojp.properties`. The value is sent to the server +> at connection time; when absent, the server's global flag applies as the fallback. +> ```properties +> # ojp.properties — client application +> +> # Default datasource: explicitly enable the cache +> ojp.nextPageCache.enabled=true +> +> # Disable the prefetch cache for the "random-access" datasource +> random-access.ojp.nextPageCache.enabled=false +> ``` + +#### Next-Page Prefetch Cache Configuration Examples + +**Enable the cache with default settings:** +```bash +java -Duser.timezone=UTC \ + -Dojp.server.nextPageCache.enabled=true \ + -jar ojp-server.jar +``` + +**Enable with custom TTL and wait timeout:** +```bash +java -Duser.timezone=UTC \ + -Dojp.server.nextPageCache.enabled=true \ + -Dojp.server.nextPageCache.ttlSeconds=30 \ + -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=2000 \ + -jar ojp-server.jar +``` + +**Per-datasource wait timeout override (server-side):** +```bash +# Give the "analytics" datasource more time to prefetch large pages +java -Duser.timezone=UTC \ + -Dojp.server.nextPageCache.enabled=true \ + -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=2000 \ + -D"ojp.server.nextPageCache.datasource.analytics.prefetchWaitTimeoutMs=10000" \ + -jar ojp-server.jar +``` + +**Via environment variables:** +```bash +export OJP_SERVER_NEXTPAGECACHE_ENABLED=true +export OJP_SERVER_NEXTPAGECACHE_TTLSECONDS=60 +export OJP_SERVER_NEXTPAGECACHE_PREFETCHWAITTIMEOUTMS=5000 +export OJP_SERVER_NEXTPAGECACHE_CLEANUPINTERVALSECONDS=60 +java -Duser.timezone=UTC -jar ojp-server.jar +``` + +> **ℹ️ Cache isolation**: Entries are keyed by `datasourceId + normalizedSQL`, so two datasources executing the same query never share cached data. + +> **ℹ️ Background cleanup**: A single shared virtual thread (`ojp-prefetch-cache-cleanup`) runs the eviction scan at the configured interval. No additional threads are created regardless of how many datasources are active. + ### SQL Enhancer and Schema Loader Settings > **⚠️ EXPERIMENTAL FEATURE - NOT RECOMMENDED FOR PRODUCTION** diff --git a/documents/ebook/part2-chapter6-server-configuration.md b/documents/ebook/part2-chapter6-server-configuration.md index d4d8be48d..8b63ce977 100644 --- a/documents/ebook/part2-chapter6-server-configuration.md +++ b/documents/ebook/part2-chapter6-server-configuration.md @@ -402,6 +402,129 @@ graph LR E --> C ``` +## 6.8 Next-Page Prefetch Cache + +For applications that page through query results — common in reporting, data exports, and list views — OJP can dramatically reduce latency by pre-executing the **next page query in the background** while the current page is being delivered to the client. When the client then requests the next page, the rows are served directly from memory instead of making a round-trip to the database. + +### How It Works + +OJP automatically detects SQL pagination clauses in the queries your application already writes. There are no client-side changes needed — the feature is fully transparent. Supported pagination patterns include: + +| SQL Pattern | Example | +|---|---| +| `LIMIT n OFFSET m` | `SELECT * FROM orders LIMIT 100 OFFSET 200` | +| `OFFSET m ROWS FETCH NEXT n ROWS ONLY` | SQL Server, Oracle | +| `FETCH FIRST n ROWS ONLY` | DB2, Oracle | +| `LIMIT m, n` | MySQL shorthand | +| Standalone `LIMIT n` | First-page query without OFFSET | + +```mermaid +flowchart TD + A([Client requests page N]) --> B{Cache hit?} + B -- Yes --> C[Serve rows from memory] + B -- No --> D[Execute page N SQL against DB] + D --> E[Stream rows to client] + E --> F{SQL is paginated?} + F -- No --> Z([Done]) + F -- Yes --> G[Rewrite SQL for page N+1] + G --> H[Start background virtual thread] + H --> I[(Execute page N+1 query against DB)] + I --> J[Materialise all rows in memory] + J --> K[Store in cache keyed by datasource + SQL] + C --> L[Remove entry from cache] + L --> F +``` + +The cache key combines the datasource identifier and the normalised SQL text, so two datasources running identical queries never see each other's cached data. + +### Configuration + +The prefetch cache uses a **two-tier configuration model**: + +- **Server administrator** enables the global cache infrastructure and tunes resource limits (TTL, max entries, timeouts) in `ojp-server.properties` or as JVM system properties. +- **Client application** controls, per datasource, whether that datasource uses the cache by setting `ojp.nextPageCache.enabled` in its `ojp.properties` — without requiring a server restart. + +**Step 1 — Server administrator: enable the infrastructure** + +```bash +java -Duser.timezone=UTC \ + -Dojp.server.nextPageCache.enabled=true \ + -jar ojp-server.jar +``` + +**Step 2 — Client application: opt in per datasource** (`ojp.properties`) + +```properties +# Default datasource — explicitly opt in +ojp.nextPageCache.enabled=true + +# "random-access" datasource — opt out even though server has the cache enabled +random-access.ojp.nextPageCache.enabled=false +``` + +When a datasource does not set `ojp.nextPageCache.enabled`, the server's global `ojp.server.nextPageCache.enabled` value is used as the fallback. + +**Server-side settings (`ojp-server.properties` / JVM system properties):** + +| Property | Default | Description | +|---|---|---| +| `ojp.server.nextPageCache.enabled` | `false` | Enable/disable the feature globally | +| `ojp.server.nextPageCache.ttlSeconds` | `60` | Maximum age (seconds) of a cached page before eviction | +| `ojp.server.nextPageCache.maxEntries` | `100` | Maximum number of in-memory cache entries | +| `ojp.server.nextPageCache.prefetchWaitTimeoutMs` | `5000` | Maximum time (ms) to wait for a prefetch to complete; falls back to a live query on timeout | +| `ojp.server.nextPageCache.cleanupIntervalSeconds` | `60` | Interval (seconds) between background eviction sweeps | +| `ojp.server.nextPageCache.datasource..prefetchWaitTimeoutMs` | *(global)* | Per-datasource override for the wait timeout (`` matches `ojp.datasource.name` on the client) | + +**Client-side settings (`ojp.properties` in the client application):** + +| Property | Default | Description | +|---|---|---| +| `ojp.nextPageCache.enabled` | *(server global)* | Per-datasource opt-in/out; set to `false` to disable the cache for this datasource even when the server has it globally enabled | + +### Per-Datasource Cache Control + +While each client datasource controls its `enabled` flag (shown in the "Configuration" section above), the server administrator can also tune the prefetch wait timeout on a per-datasource basis — useful when different databases have significantly different response times: + +```bash +java -Duser.timezone=UTC \ + -Dojp.server.nextPageCache.enabled=true \ + -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=2000 \ + -D"ojp.server.nextPageCache.datasource.analytics.prefetchWaitTimeoutMs=10000" \ + -jar ojp-server.jar +``` + +### Background Cleanup + +A single virtual thread named `ojp-prefetch-cache-cleanup` runs the eviction sweep on a fixed interval, removing entries that are either expired (older than `ttlSeconds`) or abandoned (prefetch still in-flight past the TTL). Only one cleanup thread ever exists per JVM, regardless of how many datasources are active. + +```mermaid +flowchart TD + BOOT([JVM starts]) --> EX[Create shared CLEANUP_EXECUTOR\none virtual thread for all instances] + INST([Cache instance created]) --> REG[Schedule evictExpiredOrCompleted\nevery cleanupIntervalSeconds] + REG --> TASK[ScheduledFuture stored per instance] + + subgraph TICK [Every cleanupIntervalSeconds] + T1[Iterate all entries] --> T2{Entry done or failed?} + T2 -- Yes + expired --> T3[Remove entry] + T2 -- No, still in-flight --> T4{Older than ttlSeconds?} + T4 -- Yes --> T5[Cancel prefetch future\nRemove entry] + T4 -- No --> T6[Keep entry] + end + EX --> TICK +``` + +### When to Enable It + +The prefetch cache delivers the most benefit when: + +- Your application pages through results **sequentially** (page 1, 2, 3, …) rather than jumping to arbitrary offsets. +- The database round-trip latency is noticeable (> 50 ms) for each page query. +- Pagination page sizes are consistent across requests for the same query. + +It has minimal impact (and adds slight overhead) when queries jump to random offsets, when all rows fit on a single page, or when the database is so fast that the prefetch rarely completes before the client requests the next page. + +**[IMAGE PROMPT: Create a timeline diagram showing two scenarios side-by-side. Left side: "Without Prefetch Cache" showing sequential client requests each waiting for a DB round-trip. Right side: "With Prefetch Cache" showing the next page being pre-fetched while the current page is delivered, with the second request served instantly from memory. Use a horizontal timeline axis labeled "Time" with colored blocks for DB calls and client waits. Style: Performance comparison diagram with green (fast) vs gray (waiting) blocks.]** + ## 6.9 Configuration Validation and Troubleshooting When things don't work as expected, configuration issues are often the culprit. OJP provides clear error messages when configuration values are invalid or inconsistent. The server validates configuration at startup and fails fast if critical settings are problematic. @@ -434,8 +557,8 @@ The server logs its active configuration at INFO level during startup. Review th OJP server configuration gives you precise control over server behavior, security, performance, and observability. The hierarchical configuration system with JVM properties and environment variables provides flexibility for different deployment scenarios. Default settings work well for most use cases, but understanding the available options lets you optimize for your specific workload. -Key configuration areas include core server settings for network and threading, security controls through IP whitelisting, logging levels for operational visibility, OpenTelemetry integration for observability, circuit breakers for resilience, and slow query segregation for performance under mixed workloads. Each area offers sensible defaults that you can refine based on monitoring data. +Key configuration areas include core server settings for network and threading, security controls through IP whitelisting, logging levels for operational visibility, OpenTelemetry integration for observability, circuit breakers for resilience, slow query segregation for performance under mixed workloads, and the next-page prefetch cache for transparently accelerating paginated queries. Each area offers sensible defaults that you can refine based on monitoring data. Start simple, monitor closely, and adjust based on observed behavior. Good configuration emerges from understanding your workload and using OJP's flexibility to match it, not from cargo-culting settings from other environments. -**[IMAGE PROMPT: Create a summary mind map with "OJP Server Configuration" at the center. Six main branches radiating outward: "Core Settings" (server icon), "Security" (lock icon), "Logging" (document icon), "Telemetry" (graph icon), "Circuit Breaker" (shield icon), and "Slow Query Segregation" (speedometer icon). Each branch has 2-3 sub-branches with key points. Use colors to group related concepts and make it visually hierarchical. Style: Modern mind map with icons and color coding.]** +**[IMAGE PROMPT: Create a summary mind map with "OJP Server Configuration" at the center. Seven main branches radiating outward: "Core Settings" (server icon), "Security" (lock icon), "Logging" (document icon), "Telemetry" (graph icon), "Circuit Breaker" (shield icon), "Slow Query Segregation" (speedometer icon), and "Prefetch Cache" (cache/memory icon). Each branch has 2-3 sub-branches with key points. Use colors to group related concepts and make it visually hierarchical. Style: Modern mind map with icons and color coding.]** diff --git a/documents/features/NEXT_PAGE_PREFETCH_CACHE.md b/documents/features/NEXT_PAGE_PREFETCH_CACHE.md new file mode 100644 index 000000000..dab55eaf5 --- /dev/null +++ b/documents/features/NEXT_PAGE_PREFETCH_CACHE.md @@ -0,0 +1,183 @@ +# Next-Page Prefetch Cache + +The **Next-Page Prefetch Cache** transparently pre-executes the next page query in the background while the current page is being streamed to the client. When the client requests the next page, OJP serves it from memory instead of making a round-trip to the database, eliminating the latency of sequential pagination. + +## How It Works + +### Request Flow + +```mermaid +flowchart TD + A([Client sends paginated query]) --> B{Cache enabled?} + B -- No --> LIVE[Execute query live against DB] + B -- Yes --> C{Entry in cache for this page?} + + C -- No: cache MISS --> LIVE + C -- Yes: entry exists --> D{Prefetch still in flight?} + + D -- Completed --> E{Entry expired?\n> ttlSeconds} + D -- In flight --> F[Wait up to prefetchWaitTimeoutMs] + F -- Completed in time --> E + F -- Timed out --> LIVE + + E -- Expired --> LIVE + E -- Fresh --> SERVE[Serve rows from memory\ncache HIT] + + LIVE --> RESP([Send rows to client]) + SERVE --> RESP + + LIVE --> G{Is the query paginated?} + G -- No --> DONE([Done]) + G -- Yes --> H[Rewrite SQL for next page] + H --> I[Start virtual thread ojp-next-page-prefetch] + I --> J[(DB: execute next-page SQL)] + J --> K[Materialise all rows in memory] + K --> L[Store CachedPage in cache map] + L --> DONE + + SERVE --> M[Remove entry from cache\nsingle-use semantics] + M --> G +``` + +### Background Cleanup + +```mermaid +flowchart TD + BOOT([JVM starts]) --> EX[Create single shared CLEANUP_EXECUTOR\nstatic final virtual thread] + INST([New cache instance created]) --> REG[Register periodic task on shared executor] + REG --> TASK[ScheduledFuture stored in AtomicReference per instance] + EX --> TICK + + subgraph TICK [Every cleanupIntervalSeconds] + direction TB + T1[Iterate all cache entries] --> T2{Entry completed or failed?} + T2 -- Yes + expired --> T3[Remove entry] + T2 -- No: still in-flight --> T4{Created > ttlSeconds ago?} + T4 -- Yes --> T5[Cancel future\nRemove entry] + T4 -- No --> T6[Keep entry] + end +``` + +Only **one** cleanup thread exists per JVM (`ojp-prefetch-cache-cleanup`), shared across all cache instances. It runs as a virtual thread. + +## Pagination Pattern Detection + +OJP automatically detects the following SQL pagination patterns: + +| Pattern | Example | +|---|---| +| `LIMIT n OFFSET m` | `SELECT * FROM t LIMIT 100 OFFSET 200` | +| `OFFSET m ROWS FETCH NEXT n ROWS ONLY` | SQL Server, Oracle | +| `OFFSET m ROWS FETCH FIRST n ROWS ONLY` | DB2, Oracle | +| `FETCH FIRST n ROWS ONLY` (no offset) | First page | +| `FETCH NEXT n ROWS ONLY` (no offset) | First page | +| `LIMIT m, n` | MySQL shorthand | +| `LIMIT n` (no offset) | First page | + +## Cache Isolation + +Each cache entry is keyed by **datasource identifier + normalised SQL**. Two datasources running the same query never share cached data, preventing data leakage between tenants or connections. + +## Configuration Reference + +### Server-Side Settings (`ojp-server.properties` / JVM system properties) + +| Property | Default | Description | +|---|---|---| +| `ojp.server.nextPageCache.enabled` | `false` | Enable the feature globally (opt-in) | +| `ojp.server.nextPageCache.ttlSeconds` | `60` | Maximum age of a cached page before eviction | +| `ojp.server.nextPageCache.maxEntries` | `100` | Maximum cache entries across all datasources | +| `ojp.server.nextPageCache.prefetchWaitTimeoutMs` | `5000` | Maximum wait (ms) for an in-flight prefetch before falling back to a live query | +| `ojp.server.nextPageCache.cleanupIntervalSeconds` | `60` | Interval (seconds) between background eviction scans | +| `ojp.server.nextPageCache.datasource..prefetchWaitTimeoutMs` | *(global)* | Per-datasource override for `prefetchWaitTimeoutMs`; `` matches `ojp.datasource.name` sent by the client | + +### Client-Side Settings (`ojp.properties` in the client application) + +| Property | Default | Description | +|---|---|---| +| `ojp.nextPageCache.enabled` | *(server global)* | Per-datasource opt-in/out; when `false` the cache is disabled for this datasource even if the server has it globally enabled | + +The `enabled` flag is set in the client's `ojp.properties` file and is sent to the server at +connection time. When absent, the server's global `ojp.server.nextPageCache.enabled` value applies. + +### Per-Datasource Configuration + +Each datasource in the client application can independently opt in or out of the prefetch cache: + +```properties +# ojp.properties — client application + +# Default datasource: explicitly enable the cache +ojp.nextPageCache.enabled=true + +# "olap" datasource: disable the prefetch cache for random-access workloads +olap.ojp.nextPageCache.enabled=false +``` + +The server-side `prefetchWaitTimeoutMs` can also be overridden per datasource (server configuration): + +```properties +# ojp-server.properties or JVM system properties +ojp.server.nextPageCache.datasource.analytics.prefetchWaitTimeoutMs=10000 +ojp.server.nextPageCache.datasource.oltp.prefetchWaitTimeoutMs=1000 +``` + +## Quick Start + +**Step 1 — Server administrator: enable the cache infrastructure** +```bash +java -Duser.timezone=UTC \ + -Dojp.server.nextPageCache.enabled=true \ + -jar ojp-server.jar +``` + +**Step 2 — Client application: opt in per datasource** (`ojp.properties`) +```properties +# Default datasource: enable the prefetch cache +ojp.nextPageCache.enabled=true + +# "olap" datasource: disable cache for random-access workloads +olap.ojp.nextPageCache.enabled=false +``` + +**Tuned server settings for a reporting workload:** +```bash +java -Duser.timezone=UTC \ + -Dojp.server.nextPageCache.enabled=true \ + -Dojp.server.nextPageCache.ttlSeconds=120 \ + -Dojp.server.nextPageCache.maxEntries=200 \ + -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=8000 \ + -jar ojp-server.jar +``` + +**Via environment variables:** +```bash +export OJP_SERVER_NEXTPAGECACHE_ENABLED=true +export OJP_SERVER_NEXTPAGECACHE_TTLSECONDS=60 +export OJP_SERVER_NEXTPAGECACHE_PREFETCHWAITTIMEOUTMS=5000 +export OJP_SERVER_NEXTPAGECACHE_CLEANUPINTERVALSECONDS=60 +java -Duser.timezone=UTC -jar ojp-server.jar +``` + +## Interaction with gRPC Row Streaming + +OJP already streams query results to the client in blocks of 100 rows per gRPC message (the intrinsic transport-layer pagination). The prefetch cache operates at a higher level and is completely independent: + +| Layer | What it does | +|---|---| +| **gRPC row streaming** | Slices any single query result into 100-row gRPC messages for efficient transport | +| **Prefetch cache (this feature)** | Pre-executes the *next SQL page query* in the background; the returned rows are then delivered via the same 100-row gRPC streaming | + +The two mechanisms complement each other — the cache eliminates database round-trips, while the gRPC streaming ensures large results are transferred efficiently. + +## When to Use It + +**Best fit:** +- Applications that page through results sequentially (page 1 → 2 → 3 …). +- Database round-trip latency is noticeable (> 50 ms per page). +- Page sizes are consistent across subsequent requests for the same query. + +**Minimal benefit:** +- Queries that jump to arbitrary offsets (random access pagination). +- All rows fit on a single page. +- The database responds faster than the client can consume pages. diff --git a/ojp-grpc-commons/src/main/java/org/openjproxy/constants/CommonConstants.java b/ojp-grpc-commons/src/main/java/org/openjproxy/constants/CommonConstants.java index 1cf795f21..ef59a4e54 100644 --- a/ojp-grpc-commons/src/main/java/org/openjproxy/constants/CommonConstants.java +++ b/ojp-grpc-commons/src/main/java/org/openjproxy/constants/CommonConstants.java @@ -26,6 +26,7 @@ public class CommonConstants { // Configuration property keys public static final String DATASOURCE_NAME_PROPERTY = "ojp.datasource.name"; + public static final String NEXT_PAGE_CACHE_ENABLED_PROPERTY = "ojp.nextPageCache.enabled"; public static final String MAXIMUM_POOL_SIZE_PROPERTY = "ojp.connection.pool.maximumPoolSize"; public static final String MINIMUM_IDLE_PROPERTY = "ojp.connection.pool.minimumIdle"; public static final String IDLE_TIMEOUT_PROPERTY = "ojp.connection.pool.idleTimeout"; diff --git a/ojp-jdbc-driver/src/main/java/org/openjproxy/jdbc/DatasourcePropertiesLoader.java b/ojp-jdbc-driver/src/main/java/org/openjproxy/jdbc/DatasourcePropertiesLoader.java index 0bed6378e..e659ba04b 100644 --- a/ojp-jdbc-driver/src/main/java/org/openjproxy/jdbc/DatasourcePropertiesLoader.java +++ b/ojp-jdbc-driver/src/main/java/org/openjproxy/jdbc/DatasourcePropertiesLoader.java @@ -24,6 +24,7 @@ public class DatasourcePropertiesLoader { private static final String DEFAULT_DATASOURCE_NAME = "default"; private static final String OJP_POOL_PREFIX = "ojp.connection.pool."; private static final String OJP_XA_PREFIX = "ojp.xa."; + private static final String OJP_NEXT_PAGE_CACHE_PREFIX = "ojp.nextPageCache."; /** * Load ojp.properties and extract configuration for the datasource identified by @@ -105,11 +106,14 @@ private static void applyEnvProperties(Properties result, String prefixDot, bool } private static boolean hasPrefixedOjpKey(String key, String prefixDot) { - return key.startsWith(prefixDot + OJP_POOL_PREFIX) || key.startsWith(prefixDot + OJP_XA_PREFIX); + return key.startsWith(prefixDot + OJP_POOL_PREFIX) + || key.startsWith(prefixDot + OJP_XA_PREFIX) + || key.startsWith(prefixDot + OJP_NEXT_PAGE_CACHE_PREFIX); } private static boolean isUnprefixedOjpKey(String key) { - return key.startsWith(OJP_POOL_PREFIX) || key.startsWith(OJP_XA_PREFIX); + return key.startsWith(OJP_POOL_PREFIX) || key.startsWith(OJP_XA_PREFIX) + || key.startsWith(OJP_NEXT_PAGE_CACHE_PREFIX); } private static void copyUnprefixedOjpProperties(Properties target, Properties source) { diff --git a/ojp-jdbc-driver/src/main/java/org/openjproxy/jdbc/ResultSet.java b/ojp-jdbc-driver/src/main/java/org/openjproxy/jdbc/ResultSet.java index f573e2b96..8836dd784 100644 --- a/ojp-jdbc-driver/src/main/java/org/openjproxy/jdbc/ResultSet.java +++ b/ojp-jdbc-driver/src/main/java/org/openjproxy/jdbc/ResultSet.java @@ -132,6 +132,12 @@ public void close() throws SQLException { this.blockIdx = null; this.itResults = null; this.currentDataBlock = null; + // When the result set was served directly from the prefetch cache there is no + // server-side ResultSet object registered, so skip the remote close call. + String uuid = this.getResultSetUUID(); + if (uuid == null || uuid.isBlank()) { + return; + } //If the parent statement is closed the result set is closed already, attempting to close it again would produce an error. if (this.statement == null || !this.statement.isClosed()) { super.close(); diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/CockroachDBPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/CockroachDBPaginationCacheIntegrationTest.java new file mode 100644 index 000000000..00442d192 --- /dev/null +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/CockroachDBPaginationCacheIntegrationTest.java @@ -0,0 +1,265 @@ +package openjproxy.jdbc; + +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvFileSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * Integration test for the next-page prefetch cache feature with a CockroachDB backend. + * + *

CockroachDB is PostgreSQL-wire-compatible, so it uses the same {@code LIMIT n OFFSET m} + * pagination syntax and {@code BYTEA} binary type as the PostgreSQL test. + * + *

The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *

    + *
  1. Creates a dedicated table with multiple column types, including a {@code BYTEA} column.
  2. + *
  3. Inserts the requested number of rows with fully deterministic, per-row values.
  4. + *
  5. Paginates through all rows using {@code LIMIT 100 OFFSET …} against an OJP server instance + * that has {@code ojp.server.nextPageCache.enabled=true} (default port 1059). The client also sets + * {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, which is the + * per-datasource opt-in sent to the server on connect.
  6. + *
  7. Asserts every column value, including a byte-exact comparison of the + * {@code BYTEA} column.
  8. + *
  9. Drops the table on completion.
  10. + *
+ * + *

This test is disabled by default and is activated by passing + * {@code -DenableCockroachDBPrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on default port 1059 with + * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag + * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to + * the server at connection time to explicitly opt this datasource into the cache. + */ +class CockroachDBPaginationCacheIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(CockroachDBPaginationCacheIntegrationTest.class); + + /** Number of rows per page used throughout these tests. */ + private static final int PAGE_SIZE = 100; + + private static boolean isTestEnabled; + + @BeforeAll + static void checkTestConfiguration() { + isTestEnabled = Boolean.parseBoolean( + System.getProperty("enableCockroachDBPrefetchCacheTests", "false")); + } + + // ------------------------------------------------------------------------- + // Parameterized test – one run per row in the CSV + // ------------------------------------------------------------------------- + + /** + * Core pagination test for CockroachDB. + */ + @ParameterizedTest + @CsvFileSource(resources = "/cockroachdb_prefetch_cache_connections_with_record_counts.csv") + void testPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeTrue(isTestEnabled, + "CockroachDB prefetch-cache tests are disabled " + + "(pass -DenableCockroachDBPrefetchCacheTests=true to enable)"); + + Class.forName(driverClass); + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + String tableName = "ojp_pfx_crdb_" + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** + * Drops (if exists) and re-creates the test table. + * + *

Schema: + *

+     *   id         INT      PRIMARY KEY      – 1-based row identifier
+     *   name       VARCHAR  NOT NULL         – "record_{id}"
+     *   val_int    INT      NOT NULL         – id × 10
+     *   val_bigint BIGINT   NOT NULL         – id × 1,000,000
+     *   val_bool   BOOLEAN  NOT NULL         – true when id is even
+     *   val_text   TEXT     NOT NULL         – "text_value_for_row_{id}"
+     *   val_bytea  BYTEA    NOT NULL         – four deterministic bytes derived from id
+     * 
+ */ + private static void createTable(Connection conn, String tableName) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS " + tableName); + stmt.execute( + "CREATE TABLE " + tableName + " (" + + " id INT PRIMARY KEY," + + " name VARCHAR(100) NOT NULL," + + " val_int INT NOT NULL," + + " val_bigint BIGINT NOT NULL," + + " val_bool BOOLEAN NOT NULL," + + " val_text TEXT NOT NULL," + + " val_bytea BYTEA NOT NULL" + + ")"); + } + logger.debug("Created table {}", tableName); + } + + private static void insertRows(Connection conn, String tableName, int recordCount) + throws SQLException { + String sql = "INSERT INTO " + tableName + + " (id, name, val_int, val_bigint, val_bool, val_text, val_bytea)" + + " VALUES (?, ?, ?, ?, ?, ?, ?)"; + + try (PreparedStatement ps = conn.prepareStatement(sql)) { + for (int i = 1; i <= recordCount; i++) { + ps.setInt(1, i); + ps.setString(2, "record_" + i); + ps.setInt(3, i * 10); + ps.setLong(4, i * 1_000_000L); + ps.setBoolean(5, i % 2 == 0); + ps.setString(6, "text_value_for_row_" + i); + ps.setBytes(7, expectedBytea(i)); + ps.addBatch(); + + if (i % 500 == 0) { + ps.executeBatch(); + } + } + ps.executeBatch(); + } + logger.debug("Inserted {} rows into {}", recordCount, tableName); + } + + private static int assertPage(Connection conn, String tableName, + int offset, int expectedRowsOnPage) + throws SQLException { + + String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_bytea" + + " FROM " + tableName + + " ORDER BY id" + + " LIMIT " + PAGE_SIZE + " OFFSET " + offset; + + int rowsOnPage = 0; + try (PreparedStatement ps = conn.prepareStatement(sql); + ResultSet rs = ps.executeQuery()) { + + while (rs.next()) { + int expectedId = offset + rowsOnPage + 1; + int id = rs.getInt("id"); + + assertEquals(expectedId, id, + "id mismatch at offset=" + offset + " row=" + rowsOnPage); + assertEquals("record_" + id, rs.getString("name"), + "name mismatch for id=" + id); + assertEquals(id * 10, rs.getInt("val_int"), + "val_int mismatch for id=" + id); + assertEquals(id * 1_000_000L, rs.getLong("val_bigint"), + "val_bigint mismatch for id=" + id); + assertEquals(id % 2 == 0, rs.getBoolean("val_bool"), + "val_bool mismatch for id=" + id); + assertEquals("text_value_for_row_" + id, rs.getString("val_text"), + "val_text mismatch for id=" + id); + + assertBytea(expectedBytea(id), rs.getObject("val_bytea"), + "val_bytea for id=" + id); + + rowsOnPage++; + } + } + + assertEquals(expectedRowsOnPage, rowsOnPage, + "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows"); + return rowsOnPage; + } + + private static void dropTable(Connection conn, String tableName) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS " + tableName); + logger.debug("Dropped table {}", tableName); + } catch (SQLException e) { + logger.warn("Could not drop table {}: {}", tableName, e.getMessage()); + } + } + + // ------------------------------------------------------------------------- + // Data-generation helpers + // ------------------------------------------------------------------------- + + private static byte[] expectedBytea(int rowId) { + return new byte[]{ + (byte) (rowId & 0xFF), + (byte) ((rowId >> 8) & 0xFF), + (byte) ((rowId * 3) & 0xFF), + (byte) ((rowId * 7) & 0xFF) + }; + } + + /** + * Asserts that {@code actual} (which may be a {@code byte[]} or the hex-escape + * {@code String} {@code "\\xHH…"}) equals {@code expected} byte-for-byte. + */ + private static void assertBytea(byte[] expected, Object actual, String columnLabel) { + assertNotNull(actual, columnLabel + " must not be null"); + + byte[] actualBytes; + if (actual instanceof byte[]) { + actualBytes = (byte[]) actual; + } else if (actual instanceof String) { + String s = (String) actual; + if (s.startsWith("\\x") || s.startsWith("\\X")) { + actualBytes = hexStringToBytes(s.substring(2)); + } else { + actualBytes = s.getBytes(java.nio.charset.StandardCharsets.UTF_8); + } + } else { + actualBytes = fail(columnLabel + " has unexpected type " + actual.getClass().getName()); + } + + assertArrayEquals(expected, actualBytes, columnLabel + " bytes do not match"); + } + + private static byte[] hexStringToBytes(String hex) { + if (hex.isEmpty()) { + return new byte[0]; + } + int len = hex.length(); + byte[] data = new byte[len / 2]; + for (int i = 0; i < len; i += 2) { + data[i / 2] = (byte) ((Character.digit(hex.charAt(i), 16) << 4) + + Character.digit(hex.charAt(i + 1), 16)); + } + return data; + } +} diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java new file mode 100644 index 000000000..6772df9b9 --- /dev/null +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java @@ -0,0 +1,267 @@ +package openjproxy.jdbc; + +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvFileSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * Integration test for the next-page prefetch cache feature with an IBM DB2 backend. + * + *

DB2 uses the ANSI SQL {@code OFFSET m ROWS FETCH NEXT n ROWS ONLY} pagination syntax. + * + *

The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *

    + *
  1. Creates a dedicated table under the {@code DB2INST1} schema with multiple column types, + * including a {@code BLOB} column.
  2. + *
  3. Inserts the requested number of rows with fully deterministic, per-row values.
  4. + *
  5. Paginates through all rows using {@code OFFSET … ROWS FETCH NEXT 100 ROWS ONLY} against an + * OJP server instance that has {@code ojp.server.nextPageCache.enabled=true} (default port 1059). + * The client also sets {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, + * which is the per-datasource opt-in sent to the server on connect.
  6. + *
  7. Asserts every column value, including a byte-exact comparison of the + * {@code BLOB} column.
  8. + *
  9. Drops the table on completion.
  10. + *
+ * + *

This test is disabled by default and is activated by passing + * {@code -DenableDb2PrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on default port 1059 with + * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag + * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to + * the server at connection time to explicitly opt this datasource into the cache. + */ +class Db2PaginationCacheIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(Db2PaginationCacheIntegrationTest.class); + + /** Number of rows per page used throughout these tests. */ + private static final int PAGE_SIZE = 100; + + private static boolean isTestEnabled; + + @BeforeAll + static void checkTestConfiguration() { + isTestEnabled = Boolean.parseBoolean( + System.getProperty("enableDb2PrefetchCacheTests", "false")); + } + + // ------------------------------------------------------------------------- + // Parameterized test – one run per row in the CSV + // ------------------------------------------------------------------------- + + /** + * Core pagination test for DB2. + */ + @ParameterizedTest + @CsvFileSource(resources = "/db2_prefetch_cache_connections_with_record_counts.csv") + void testPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeTrue(isTestEnabled, + "DB2 prefetch-cache tests are disabled " + + "(pass -DenableDb2PrefetchCacheTests=true to enable)"); + + Class.forName(driverClass); + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + String tableName = "DB2INST1.ojp_pfx_db2_" + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + // DB2 requires explicit schema to avoid "object not found" errors + try (Statement schemaStmt = conn.createStatement()) { + schemaStmt.execute("SET SCHEMA DB2INST1"); + } + + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** + * Drops (if exists) and re-creates the test table. + * + *

Schema: + *

+     *   id         INTEGER      NOT NULL PRIMARY KEY  – 1-based row identifier
+     *   name       VARCHAR(100) NOT NULL              – "record_{id}"
+     *   val_int    INTEGER      NOT NULL              – id × 10
+     *   val_bigint BIGINT       NOT NULL              – id × 1,000,000
+     *   val_bool   SMALLINT     NOT NULL              – 1 when id is even, else 0
+     *   val_text   VARCHAR(255) NOT NULL              – "text_value_for_row_{id}"
+     *   val_blob   BLOB(1K)     NOT NULL              – four deterministic bytes derived from id
+     * 
+ * + *

Note: DB2 does not have a native BOOLEAN SQL type in older versions; {@code SMALLINT} (0/1) + * is used as a portable substitute. TEXT is replaced with VARCHAR(255). + */ + private static void createTable(Connection conn, String tableName) throws SQLException { + // Drop if exists (DB2 uses different DROP syntax) + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE " + tableName); + } catch (SQLException e) { + // Table does not exist – ignore + } + try (Statement stmt = conn.createStatement()) { + stmt.execute( + "CREATE TABLE " + tableName + " (" + + " id INTEGER NOT NULL," + + " name VARCHAR(100) NOT NULL," + + " val_int INTEGER NOT NULL," + + " val_bigint BIGINT NOT NULL," + + " val_bool SMALLINT NOT NULL," + + " val_text VARCHAR(255) NOT NULL," + + " val_blob BLOB(1K) NOT NULL," + + " PRIMARY KEY (id)" + + ")"); + } + logger.debug("Created table {}", tableName); + } + + private static void insertRows(Connection conn, String tableName, int recordCount) + throws SQLException { + String sql = "INSERT INTO " + tableName + + " (id, name, val_int, val_bigint, val_bool, val_text, val_blob)" + + " VALUES (?, ?, ?, ?, ?, ?, ?)"; + + try (PreparedStatement ps = conn.prepareStatement(sql)) { + for (int i = 1; i <= recordCount; i++) { + ps.setInt(1, i); + ps.setString(2, "record_" + i); + ps.setInt(3, i * 10); + ps.setLong(4, i * 1_000_000L); + ps.setInt(5, i % 2 == 0 ? 1 : 0); + ps.setString(6, "text_value_for_row_" + i); + ps.setBytes(7, expectedBlob(i)); + ps.addBatch(); + + if (i % 500 == 0) { + ps.executeBatch(); + } + } + ps.executeBatch(); + } + logger.debug("Inserted {} rows into {}", recordCount, tableName); + } + + private static int assertPage(Connection conn, String tableName, + int offset, int expectedRowsOnPage) + throws SQLException { + + // DB2 uses OFFSET m ROWS FETCH NEXT n ROWS ONLY + String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_blob" + + " FROM " + tableName + + " ORDER BY id" + + " OFFSET " + offset + " ROWS FETCH NEXT " + PAGE_SIZE + " ROWS ONLY"; + + int rowsOnPage = 0; + try (PreparedStatement ps = conn.prepareStatement(sql); + ResultSet rs = ps.executeQuery()) { + + while (rs.next()) { + int expectedId = offset + rowsOnPage + 1; + int id = rs.getInt("id"); + + assertEquals(expectedId, id, + "id mismatch at offset=" + offset + " row=" + rowsOnPage); + assertEquals("record_" + id, rs.getString("name"), + "name mismatch for id=" + id); + assertEquals(id * 10, rs.getInt("val_int"), + "val_int mismatch for id=" + id); + assertEquals(id * 1_000_000L, rs.getLong("val_bigint"), + "val_bigint mismatch for id=" + id); + assertEquals(id % 2 == 0 ? 1 : 0, rs.getInt("val_bool"), + "val_bool mismatch for id=" + id); + assertEquals("text_value_for_row_" + id, rs.getString("val_text"), + "val_text mismatch for id=" + id); + + byte[] actualBlob = toBlobBytes(rs, "val_blob", id); + assertNotNull(actualBlob, "val_blob for id=" + id + " must not be null"); + assertArrayEquals(expectedBlob(id), actualBlob, + "val_blob bytes do not match for id=" + id); + + rowsOnPage++; + } + } + + assertEquals(expectedRowsOnPage, rowsOnPage, + "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows"); + return rowsOnPage; + } + + /** + * Reads a BLOB/binary column as a {@code byte[]}. + * + *

The prefetch cache materialises BLOBs as {@code byte[]} ({@link java.sql.Types#BINARY} / + * {@code VARBINARY}) when serving from cache, whereas a live DB query may return a + * {@link java.sql.Blob} object. Both representations are handled here. + */ + private static byte[] toBlobBytes(ResultSet rs, String column, int id) throws SQLException { + Object obj = rs.getObject(column); + if (obj == null) { + return null; + } + if (obj instanceof byte[]) { + return (byte[]) obj; + } + if (obj instanceof java.sql.Blob) { + java.sql.Blob blob = (java.sql.Blob) obj; + return blob.getBytes(1, (int) blob.length()); + } + // Fallback: use getBytes + return rs.getBytes(column); + } + + private static void dropTable(Connection conn, String tableName) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE " + tableName); + logger.debug("Dropped table {}", tableName); + } catch (SQLException e) { + logger.warn("Could not drop table {}: {}", tableName, e.getMessage()); + } + } + + // ------------------------------------------------------------------------- + // Data-generation helpers + // ------------------------------------------------------------------------- + + private static byte[] expectedBlob(int rowId) { + return new byte[]{ + (byte) (rowId & 0xFF), + (byte) ((rowId >> 8) & 0xFF), + (byte) ((rowId * 3) & 0xFF), + (byte) ((rowId * 7) & 0xFF) + }; + } +} diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java new file mode 100644 index 000000000..5ad694b4b --- /dev/null +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java @@ -0,0 +1,230 @@ +package openjproxy.jdbc; + +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvFileSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * Integration test for the next-page prefetch cache feature with an H2 backend. + * + *

H2 supports the {@code LIMIT n OFFSET m} pagination syntax. + * + *

The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *

    + *
  1. Creates a dedicated table with multiple column types, including a {@code VARBINARY} column.
  2. + *
  3. Inserts the requested number of rows with fully deterministic, per-row values.
  4. + *
  5. Paginates through all rows using {@code LIMIT 100 OFFSET …} against an OJP server instance + * that has {@code ojp.server.nextPageCache.enabled=true} (default port 1059). The client also sets + * {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, which is the + * per-datasource opt-in sent to the server on connect.
  6. + *
  7. Asserts every column value, including a byte-exact comparison of the + * {@code VARBINARY} column.
  8. + *
  9. Drops the table on completion.
  10. + *
+ * + *

This test is disabled by default and is activated by passing + * {@code -DenableH2PrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on default port 1059 with + * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag + * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to + * the server at connection time to explicitly opt this datasource into the cache. + */ +class H2PaginationCacheIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(H2PaginationCacheIntegrationTest.class); + + /** Number of rows per page used throughout these tests. */ + private static final int PAGE_SIZE = 100; + + private static boolean isTestEnabled; + + @BeforeAll + static void checkTestConfiguration() { + isTestEnabled = Boolean.parseBoolean( + System.getProperty("enableH2PrefetchCacheTests", "false")); + } + + // ------------------------------------------------------------------------- + // Parameterized test – one run per row in the CSV + // ------------------------------------------------------------------------- + + /** + * Core pagination test for H2. + */ + @ParameterizedTest + @CsvFileSource(resources = "/h2_prefetch_cache_connections_with_record_counts.csv") + void testPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeTrue(isTestEnabled, + "H2 prefetch-cache tests are disabled " + + "(pass -DenableH2PrefetchCacheTests=true to enable)"); + + Class.forName(driverClass); + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + String tableName = "OJP_PFX_H2_" + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** + * Drops (if exists) and re-creates the test table. + * + *

Schema: + *

+     *   id         INT           PRIMARY KEY  – 1-based row identifier
+     *   name       VARCHAR(100)  NOT NULL     – "record_{id}"
+     *   val_int    INT           NOT NULL     – id × 10
+     *   val_bigint BIGINT        NOT NULL     – id × 1,000,000
+     *   val_bool   BOOLEAN       NOT NULL     – true when id is even
+     *   val_text   VARCHAR(255)  NOT NULL     – "text_value_for_row_{id}"
+     *   val_binary VARBINARY(32) NOT NULL     – four deterministic bytes derived from id
+     * 
+ */ + private static void createTable(Connection conn, String tableName) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS " + tableName); + stmt.execute( + "CREATE TABLE " + tableName + " (" + + " id INT PRIMARY KEY," + + " name VARCHAR(100) NOT NULL," + + " val_int INT NOT NULL," + + " val_bigint BIGINT NOT NULL," + + " val_bool BOOLEAN NOT NULL," + + " val_text VARCHAR(255) NOT NULL," + + " val_binary VARBINARY(32) NOT NULL" + + ")"); + } + logger.debug("Created table {}", tableName); + } + + private static void insertRows(Connection conn, String tableName, int recordCount) + throws SQLException { + String sql = "INSERT INTO " + tableName + + " (id, name, val_int, val_bigint, val_bool, val_text, val_binary)" + + " VALUES (?, ?, ?, ?, ?, ?, ?)"; + + try (PreparedStatement ps = conn.prepareStatement(sql)) { + for (int i = 1; i <= recordCount; i++) { + ps.setInt(1, i); + ps.setString(2, "record_" + i); + ps.setInt(3, i * 10); + ps.setLong(4, i * 1_000_000L); + ps.setBoolean(5, i % 2 == 0); + ps.setString(6, "text_value_for_row_" + i); + ps.setBytes(7, expectedBinary(i)); + ps.addBatch(); + + if (i % 500 == 0) { + ps.executeBatch(); + } + } + ps.executeBatch(); + } + logger.debug("Inserted {} rows into {}", recordCount, tableName); + } + + private static int assertPage(Connection conn, String tableName, + int offset, int expectedRowsOnPage) + throws SQLException { + + String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_binary" + + " FROM " + tableName + + " ORDER BY id" + + " LIMIT " + PAGE_SIZE + " OFFSET " + offset; + + int rowsOnPage = 0; + try (PreparedStatement ps = conn.prepareStatement(sql); + ResultSet rs = ps.executeQuery()) { + + while (rs.next()) { + int expectedId = offset + rowsOnPage + 1; + int id = rs.getInt("id"); + + assertEquals(expectedId, id, + "id mismatch at offset=" + offset + " row=" + rowsOnPage); + assertEquals("record_" + id, rs.getString("name"), + "name mismatch for id=" + id); + assertEquals(id * 10, rs.getInt("val_int"), + "val_int mismatch for id=" + id); + assertEquals(id * 1_000_000L, rs.getLong("val_bigint"), + "val_bigint mismatch for id=" + id); + assertEquals(id % 2 == 0, rs.getBoolean("val_bool"), + "val_bool mismatch for id=" + id); + assertEquals("text_value_for_row_" + id, rs.getString("val_text"), + "val_text mismatch for id=" + id); + + Object binObj = rs.getObject("val_binary"); + assertNotNull(binObj, "val_binary for id=" + id + " must not be null"); + byte[] actualBytes = binObj instanceof byte[] ? (byte[]) binObj + : rs.getBytes("val_binary"); + assertArrayEquals(expectedBinary(id), actualBytes, + "val_binary bytes do not match for id=" + id); + + rowsOnPage++; + } + } + + assertEquals(expectedRowsOnPage, rowsOnPage, + "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows"); + return rowsOnPage; + } + + private static void dropTable(Connection conn, String tableName) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS " + tableName); + logger.debug("Dropped table {}", tableName); + } catch (SQLException e) { + logger.warn("Could not drop table {}: {}", tableName, e.getMessage()); + } + } + + // ------------------------------------------------------------------------- + // Data-generation helpers + // ------------------------------------------------------------------------- + + private static byte[] expectedBinary(int rowId) { + return new byte[]{ + (byte) (rowId & 0xFF), + (byte) ((rowId >> 8) & 0xFF), + (byte) ((rowId * 3) & 0xFF), + (byte) ((rowId * 7) & 0xFF) + }; + } +} diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java new file mode 100644 index 000000000..745cd39b6 --- /dev/null +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java @@ -0,0 +1,262 @@ +package openjproxy.jdbc; + +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvFileSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * Integration test for the next-page prefetch cache feature with a MySQL backend. + * + *

The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *

    + *
  1. Creates a dedicated table with multiple column types, including a {@code VARBINARY} LOB column.
  2. + *
  3. Inserts the requested number of rows with fully deterministic, per-row values.
  4. + *
  5. Paginates through all rows using {@code LIMIT 100 OFFSET …} against an OJP server instance + * that has {@code ojp.server.nextPageCache.enabled=true} (default port 1059). The client also sets + * {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, which is the + * per-datasource opt-in sent to the server on connect.
  6. + *
  7. Asserts every column value, including a byte-exact comparison of the + * {@code VARBINARY} column.
  8. + *
  9. Drops the table on completion.
  10. + *
+ * + *

This test is disabled by default and is activated by passing + * {@code -DenableMySQLPrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on default port 1059 with + * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag + * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to + * the server at connection time to explicitly opt this datasource into the cache. + */ +class MySQLMariaDBPaginationCacheIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(MySQLMariaDBPaginationCacheIntegrationTest.class); + + /** Number of rows per page used throughout these tests. */ + private static final int PAGE_SIZE = 100; + + private static boolean isMySQLTestEnabled; + private static boolean isMariaDBTestEnabled; + + @BeforeAll + static void checkTestConfiguration() { + isMySQLTestEnabled = Boolean.parseBoolean( + System.getProperty("enableMySQLPrefetchCacheTests", "false")); + isMariaDBTestEnabled = Boolean.parseBoolean( + System.getProperty("enableMariaDBPrefetchCacheTests", "false")); + } + + // ------------------------------------------------------------------------- + // Parameterized tests – one run per row in each CSV + // ------------------------------------------------------------------------- + + /** + * Core pagination test for MySQL. + */ + @ParameterizedTest + @CsvFileSource(resources = "/mysql_prefetch_cache_connections_with_record_counts.csv") + void testMySQLPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeTrue(isMySQLTestEnabled, + "MySQL prefetch-cache tests are disabled " + + "(pass -DenableMySQLPrefetchCacheTests=true to enable)"); + + runPaginationTest(recordCount, driverClass, url, user, pwd, "ojp_pfx_mysql_"); + } + + /** + * Core pagination test for MariaDB. + */ + @ParameterizedTest + @CsvFileSource(resources = "/mariadb_prefetch_cache_connections_with_record_counts.csv") + void testMariaDBPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeTrue(isMariaDBTestEnabled, + "MariaDB prefetch-cache tests are disabled " + + "(pass -DenableMariaDBPrefetchCacheTests=true to enable)"); + + runPaginationTest(recordCount, driverClass, url, user, pwd, "ojp_pfx_maria_"); + } + + // ------------------------------------------------------------------------- + // Shared implementation + // ------------------------------------------------------------------------- + + private void runPaginationTest(int recordCount, String driverClass, + String url, String user, String pwd, + String tablePrefix) + throws SQLException, ClassNotFoundException { + + Class.forName(driverClass); + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + String tableName = tablePrefix + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** + * Drops (if exists) and re-creates the test table. + * + *

Schema: + *

+     *   id         INT          PRIMARY KEY   – 1-based row identifier
+     *   name       VARCHAR(100) NOT NULL      – "record_{id}"
+     *   val_int    INT          NOT NULL      – id × 10
+     *   val_bigint BIGINT       NOT NULL      – id × 1,000,000
+     *   val_bool   TINYINT(1)   NOT NULL      – true (1) when id is even, else false (0)
+     *   val_text   TEXT         NOT NULL      – "text_value_for_row_{id}"
+     *   val_binary VARBINARY(32) NOT NULL     – four deterministic bytes derived from id
+     * 
+ */ + private static void createTable(Connection conn, String tableName) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS " + tableName); + stmt.execute( + "CREATE TABLE " + tableName + " (" + + " id INT PRIMARY KEY," + + " name VARCHAR(100) NOT NULL," + + " val_int INT NOT NULL," + + " val_bigint BIGINT NOT NULL," + + " val_bool TINYINT(1) NOT NULL," + + " val_text TEXT NOT NULL," + + " val_binary VARBINARY(32) NOT NULL" + + ")"); + } + logger.debug("Created table {}", tableName); + } + + private static void insertRows(Connection conn, String tableName, int recordCount) + throws SQLException { + String sql = "INSERT INTO " + tableName + + " (id, name, val_int, val_bigint, val_bool, val_text, val_binary)" + + " VALUES (?, ?, ?, ?, ?, ?, ?)"; + + try (PreparedStatement ps = conn.prepareStatement(sql)) { + for (int i = 1; i <= recordCount; i++) { + ps.setInt(1, i); + ps.setString(2, "record_" + i); + ps.setInt(3, i * 10); + ps.setLong(4, i * 1_000_000L); + ps.setInt(5, i % 2 == 0 ? 1 : 0); + ps.setString(6, "text_value_for_row_" + i); + ps.setBytes(7, expectedBinary(i)); + ps.addBatch(); + + if (i % 500 == 0) { + ps.executeBatch(); + } + } + ps.executeBatch(); + } + logger.debug("Inserted {} rows into {}", recordCount, tableName); + } + + private static int assertPage(Connection conn, String tableName, + int offset, int expectedRowsOnPage) + throws SQLException { + + String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_binary" + + " FROM " + tableName + + " ORDER BY id" + + " LIMIT " + PAGE_SIZE + " OFFSET " + offset; + + int rowsOnPage = 0; + try (PreparedStatement ps = conn.prepareStatement(sql); + ResultSet rs = ps.executeQuery()) { + + while (rs.next()) { + int expectedId = offset + rowsOnPage + 1; + int id = rs.getInt("id"); + + assertEquals(expectedId, id, + "id mismatch at offset=" + offset + " row=" + rowsOnPage); + assertEquals("record_" + id, rs.getString("name"), + "name mismatch for id=" + id); + assertEquals(id * 10, rs.getInt("val_int"), + "val_int mismatch for id=" + id); + assertEquals(id * 1_000_000L, rs.getLong("val_bigint"), + "val_bigint mismatch for id=" + id); + assertEquals(id % 2 == 0, rs.getBoolean("val_bool"), + "val_bool mismatch for id=" + id); + assertEquals("text_value_for_row_" + id, rs.getString("val_text"), + "val_text mismatch for id=" + id); + + Object binaryObj = rs.getObject("val_binary"); + assertNotNull(binaryObj, "val_binary for id=" + id + " must not be null"); + byte[] actualBytes = binaryObj instanceof byte[] ? (byte[]) binaryObj + : rs.getBytes("val_binary"); + assertArrayEquals(expectedBinary(id), actualBytes, + "val_binary bytes do not match for id=" + id); + + rowsOnPage++; + } + } + + assertEquals(expectedRowsOnPage, rowsOnPage, + "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows"); + return rowsOnPage; + } + + private static void dropTable(Connection conn, String tableName) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS " + tableName); + logger.debug("Dropped table {}", tableName); + } catch (SQLException e) { + logger.warn("Could not drop table {}: {}", tableName, e.getMessage()); + } + } + + // ------------------------------------------------------------------------- + // Data-generation helpers + // ------------------------------------------------------------------------- + + /** + * Returns four deterministic bytes for a given {@code rowId}. + */ + private static byte[] expectedBinary(int rowId) { + return new byte[]{ + (byte) (rowId & 0xFF), + (byte) ((rowId >> 8) & 0xFF), + (byte) ((rowId * 3) & 0xFF), + (byte) ((rowId * 7) & 0xFF) + }; + } +} diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java new file mode 100644 index 000000000..3a989cad9 --- /dev/null +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java @@ -0,0 +1,264 @@ +package openjproxy.jdbc; + +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvFileSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * Integration test for the next-page prefetch cache feature with an Oracle backend. + * + *

Oracle 12c+ supports the ANSI SQL {@code OFFSET m ROWS FETCH NEXT n ROWS ONLY} pagination + * syntax, which is recognised by the OJP {@code PaginationDetector}. + * + *

The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *

    + *
  1. Creates a dedicated table with multiple column types, including a {@code BLOB} column.
  2. + *
  3. Inserts the requested number of rows with fully deterministic, per-row values.
  4. + *
  5. Paginates through all rows using {@code OFFSET … ROWS FETCH NEXT 100 ROWS ONLY} against an + * OJP server instance that has {@code ojp.server.nextPageCache.enabled=true} (default port 1059). + * The client also sets {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, + * which is the per-datasource opt-in sent to the server on connect.
  6. + *
  7. Asserts every column value, including a byte-exact comparison of the + * {@code BLOB} column.
  8. + *
  9. Drops the table on completion.
  10. + *
+ * + *

This test is disabled by default and is activated by passing + * {@code -DenableOraclePrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on default port 1059 with + * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag + * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to + * the server at connection time to explicitly opt this datasource into the cache. + * + *

Oracle type notes: + *

    + *
  • No native BOOLEAN SQL type (until Oracle 23c) → {@code NUMBER(1)} (0/1) is used.
  • + *
  • No BIGINT → {@code NUMBER(19,0)}.
  • + *
  • No TEXT → {@code VARCHAR2(255)}.
  • + *
  • Binary data → {@code BLOB}.
  • + *
+ */ +class OraclePaginationCacheIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(OraclePaginationCacheIntegrationTest.class); + + /** Number of rows per page used throughout these tests. */ + private static final int PAGE_SIZE = 100; + + private static boolean isTestEnabled; + + @BeforeAll + static void checkTestConfiguration() { + isTestEnabled = Boolean.parseBoolean( + System.getProperty("enableOraclePrefetchCacheTests", "false")); + } + + // ------------------------------------------------------------------------- + // Parameterized test – one run per row in the CSV + // ------------------------------------------------------------------------- + + /** + * Core pagination test for Oracle. + */ + @ParameterizedTest + @CsvFileSource(resources = "/oracle_prefetch_cache_connections_with_record_counts.csv") + void testPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeTrue(isTestEnabled, + "Oracle prefetch-cache tests are disabled " + + "(pass -DenableOraclePrefetchCacheTests=true to enable)"); + + Class.forName(driverClass); + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + String tableName = "ojp_pfx_ora_" + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** + * Drops (if exists) and re-creates the test table. + * + *

Schema: + *

+     *   id         NUMBER(10)   PRIMARY KEY   – 1-based row identifier
+     *   name       VARCHAR2(100) NOT NULL     – "record_{id}"
+     *   val_int    NUMBER(10)   NOT NULL      – id × 10
+     *   val_bigint NUMBER(19,0) NOT NULL      – id × 1,000,000
+     *   val_bool   NUMBER(1)    NOT NULL      – 1 when id is even, else 0
+     *   val_text   VARCHAR2(255) NOT NULL     – "text_value_for_row_{id}"
+     *   val_blob   BLOB         NOT NULL      – four deterministic bytes derived from id
+     * 
+ */ + private static void createTable(Connection conn, String tableName) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE " + tableName); + } catch (SQLException e) { + // Table does not exist – ignore + } + try (Statement stmt = conn.createStatement()) { + stmt.execute( + "CREATE TABLE " + tableName + " (" + + " id NUMBER(10) NOT NULL," + + " name VARCHAR2(100) NOT NULL," + + " val_int NUMBER(10) NOT NULL," + + " val_bigint NUMBER(19,0) NOT NULL," + + " val_bool NUMBER(1) NOT NULL," + + " val_text VARCHAR2(255) NOT NULL," + + " val_blob BLOB NOT NULL," + + " CONSTRAINT pk_" + tableName + " PRIMARY KEY (id)" + + ")"); + } + logger.debug("Created table {}", tableName); + } + + private static void insertRows(Connection conn, String tableName, int recordCount) + throws SQLException { + String sql = "INSERT INTO " + tableName + + " (id, name, val_int, val_bigint, val_bool, val_text, val_blob)" + + " VALUES (?, ?, ?, ?, ?, ?, ?)"; + + try (PreparedStatement ps = conn.prepareStatement(sql)) { + for (int i = 1; i <= recordCount; i++) { + ps.setInt(1, i); + ps.setString(2, "record_" + i); + ps.setInt(3, i * 10); + ps.setLong(4, i * 1_000_000L); + ps.setInt(5, i % 2 == 0 ? 1 : 0); + ps.setString(6, "text_value_for_row_" + i); + ps.setBytes(7, expectedBlob(i)); + ps.addBatch(); + + if (i % 500 == 0) { + ps.executeBatch(); + } + } + ps.executeBatch(); + } + logger.debug("Inserted {} rows into {}", recordCount, tableName); + } + + private static int assertPage(Connection conn, String tableName, + int offset, int expectedRowsOnPage) + throws SQLException { + + // Oracle 12c+ OFFSET/FETCH syntax + String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_blob" + + " FROM " + tableName + + " ORDER BY id" + + " OFFSET " + offset + " ROWS FETCH NEXT " + PAGE_SIZE + " ROWS ONLY"; + + int rowsOnPage = 0; + try (PreparedStatement ps = conn.prepareStatement(sql); + ResultSet rs = ps.executeQuery()) { + + while (rs.next()) { + int expectedId = offset + rowsOnPage + 1; + int id = rs.getInt("id"); + + assertEquals(expectedId, id, + "id mismatch at offset=" + offset + " row=" + rowsOnPage); + assertEquals("record_" + id, rs.getString("name"), + "name mismatch for id=" + id); + assertEquals(id * 10, rs.getInt("val_int"), + "val_int mismatch for id=" + id); + assertEquals(id * 1_000_000L, rs.getLong("val_bigint"), + "val_bigint mismatch for id=" + id); + assertEquals(id % 2 == 0 ? 1 : 0, rs.getInt("val_bool"), + "val_bool mismatch for id=" + id); + assertEquals("text_value_for_row_" + id, rs.getString("val_text"), + "val_text mismatch for id=" + id); + + byte[] actualBlob = toBlobBytes(rs, "val_blob", id); + assertNotNull(actualBlob, "val_blob for id=" + id + " must not be null"); + assertArrayEquals(expectedBlob(id), actualBlob, + "val_blob bytes do not match for id=" + id); + + rowsOnPage++; + } + } + + assertEquals(expectedRowsOnPage, rowsOnPage, + "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows"); + return rowsOnPage; + } + + /** + * Reads a BLOB column as a {@code byte[]}. + * + *

The prefetch cache materialises BLOBs as {@code byte[]} when serving from cache, + * whereas a live DB query returns a {@link java.sql.Blob} object. Both are handled here. + */ + private static byte[] toBlobBytes(ResultSet rs, String column, int id) throws SQLException { + Object obj = rs.getObject(column); + if (obj == null) { + return null; + } + if (obj instanceof byte[]) { + return (byte[]) obj; + } + if (obj instanceof java.sql.Blob) { + java.sql.Blob blob = (java.sql.Blob) obj; + return blob.getBytes(1, (int) blob.length()); + } + return rs.getBytes(column); + } + + private static void dropTable(Connection conn, String tableName) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE " + tableName); + logger.debug("Dropped table {}", tableName); + } catch (SQLException e) { + logger.warn("Could not drop table {}: {}", tableName, e.getMessage()); + } + } + + // ------------------------------------------------------------------------- + // Data-generation helpers + // ------------------------------------------------------------------------- + + private static byte[] expectedBlob(int rowId) { + return new byte[]{ + (byte) (rowId & 0xFF), + (byte) ((rowId >> 8) & 0xFF), + (byte) ((rowId * 3) & 0xFF), + (byte) ((rowId * 7) & 0xFF) + }; + } +} diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java new file mode 100644 index 000000000..5cbec47d1 --- /dev/null +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java @@ -0,0 +1,317 @@ +package openjproxy.jdbc; + +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvFileSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * Integration test for the next-page prefetch cache feature with a PostgreSQL backend. + * + *

The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *

    + *
  1. Creates a dedicated table with multiple column types, including a {@code BYTEA} LOB column.
  2. + *
  3. Inserts the requested number of rows with fully deterministic, per-row values.
  4. + *
  5. Paginates through all rows using {@code LIMIT 100 OFFSET …} against an OJP server instance + * that has {@code ojp.server.nextPageCache.enabled=true} (default port 1059). The client also sets + * {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, which is the + * per-datasource opt-in sent to the server on connect.
  6. + *
  7. Asserts every column value, including a byte-exact comparison of the + * {@code BYTEA} column.
  8. + *
  9. Drops the table on completion.
  10. + *
+ * + *

This test is disabled by default and is activated by passing + * {@code -DenablePostgresPrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on default port 1059 with + * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag + * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to + * the server at connection time to explicitly opt this datasource into the cache. + */ +class PostgresPaginationCacheIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(PostgresPaginationCacheIntegrationTest.class); + + /** Number of rows per page used throughout these tests. */ + private static final int PAGE_SIZE = 100; + + private static boolean isTestEnabled; + + @BeforeAll + static void checkTestConfiguration() { + isTestEnabled = Boolean.parseBoolean( + System.getProperty("enablePostgresPrefetchCacheTests", "false")); + } + + // ------------------------------------------------------------------------- + // Parameterized test – one run per row in the CSV + // ------------------------------------------------------------------------- + + /** + * Core pagination test. + * + *

The CSV provides five combinations of record count × connection details so that the + * same test method covers: a partial last page (99), exactly one full page (100), + * one full page plus one row (101), a non-round number (567), and a 10-page set (1000). + * + * @param recordCount total rows to insert and paginate over + * @param driverClass fully-qualified OJP driver class (loaded as a side-effect) + * @param url JDBC URL pointing at the prefetch-cache OJP server (default port 1059) + * @param user database user + * @param pwd database password + */ + @ParameterizedTest + @CsvFileSource(resources = "/postgres_prefetch_cache_connections_with_record_counts.csv") + void testPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeTrue(isTestEnabled, + "Postgres prefetch-cache tests are disabled " + + "(pass -DenablePostgresPrefetchCacheTests=true to enable)"); + + Class.forName(driverClass); + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + // Table name is unique per record-count so parallel executions don't collide + String tableName = "ojp_pfx_pg_" + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + // ------------------------------------------------------------------ + // 1. Setup: fresh table + batch insert + // ------------------------------------------------------------------ + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + // ------------------------------------------------------------------ + // 2. Paginate and assert every value on every row + // ------------------------------------------------------------------ + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + // ------------------------------------------------------------------ + // 3. Cleanup + // ------------------------------------------------------------------ + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** + * Drops (if exists) and re-creates the test table. + * + *

Schema: + *

+     *   id         INT      PRIMARY KEY      – 1-based row identifier
+     *   name       VARCHAR  NOT NULL         – "record_{id}"
+     *   val_int    INT      NOT NULL         – id × 10
+     *   val_bigint BIGINT   NOT NULL         – id × 1,000,000
+     *   val_bool   BOOLEAN  NOT NULL         – true when id is even
+     *   val_text   TEXT     NOT NULL         – "text_value_for_row_{id}"
+     *   val_bytea  BYTEA    NOT NULL         – four deterministic bytes derived from id
+     * 
+ */ + private static void createTable(Connection conn, String tableName) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS " + tableName); + stmt.execute( + "CREATE TABLE " + tableName + " (" + + " id INT PRIMARY KEY," + + " name VARCHAR(100) NOT NULL," + + " val_int INT NOT NULL," + + " val_bigint BIGINT NOT NULL," + + " val_bool BOOLEAN NOT NULL," + + " val_text TEXT NOT NULL," + + " val_bytea BYTEA NOT NULL" + + ")"); + } + logger.debug("Created table {}", tableName); + } + + /** + * Inserts {@code recordCount} rows using a {@link PreparedStatement} batch for efficiency. + */ + private static void insertRows(Connection conn, String tableName, int recordCount) + throws SQLException { + String sql = "INSERT INTO " + tableName + + " (id, name, val_int, val_bigint, val_bool, val_text, val_bytea)" + + " VALUES (?, ?, ?, ?, ?, ?, ?)"; + + try (PreparedStatement ps = conn.prepareStatement(sql)) { + for (int i = 1; i <= recordCount; i++) { + ps.setInt(1, i); + ps.setString(2, "record_" + i); + ps.setInt(3, i * 10); + ps.setLong(4, i * 1_000_000L); + ps.setBoolean(5, i % 2 == 0); + ps.setString(6, "text_value_for_row_" + i); + ps.setBytes(7, expectedBytea(i)); + ps.addBatch(); + + // Flush in chunks to avoid oversized batches + if (i % 500 == 0) { + ps.executeBatch(); + } + } + ps.executeBatch(); + } + logger.debug("Inserted {} rows into {}", recordCount, tableName); + } + + /** + * Queries one page ({@code LIMIT PAGE_SIZE OFFSET offset}), asserts every column value + * for every row on the page, and returns the number of rows actually returned. + */ + private static int assertPage(Connection conn, String tableName, + int offset, int expectedRowsOnPage) + throws SQLException { + + String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_bytea" + + " FROM " + tableName + + " ORDER BY id" + + " LIMIT " + PAGE_SIZE + " OFFSET " + offset; + + int rowsOnPage = 0; + try (PreparedStatement ps = conn.prepareStatement(sql); + ResultSet rs = ps.executeQuery()) { + + while (rs.next()) { + int expectedId = offset + rowsOnPage + 1; + int id = rs.getInt("id"); + + assertEquals(expectedId, id, + "id mismatch at offset=" + offset + " row=" + rowsOnPage); + assertEquals("record_" + id, rs.getString("name"), + "name mismatch for id=" + id); + assertEquals(id * 10, rs.getInt("val_int"), + "val_int mismatch for id=" + id); + assertEquals(id * 1_000_000L, rs.getLong("val_bigint"), + "val_bigint mismatch for id=" + id); + assertEquals(id % 2 == 0, rs.getBoolean("val_bool"), + "val_bool mismatch for id=" + id); + assertEquals("text_value_for_row_" + id, rs.getString("val_text"), + "val_text mismatch for id=" + id); + + // BYTEA: the prefetch cache materialises BINARY/VARBINARY as byte[]. + // PostgreSQL JDBC may also represent BYTEA as its hex escape string + // (e.g. "\\x01020304") when retrieved via getObject(); both forms are + // accepted here and compared byte-for-byte. + assertBytea(expectedBytea(id), rs.getObject("val_bytea"), + "val_bytea for id=" + id); + + rowsOnPage++; + } + } + + assertEquals(expectedRowsOnPage, rowsOnPage, + "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows"); + return rowsOnPage; + } + + /** Drops the test table, ignoring errors (e.g., table does not exist). */ + private static void dropTable(Connection conn, String tableName) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS " + tableName); + logger.debug("Dropped table {}", tableName); + } catch (SQLException e) { + logger.warn("Could not drop table {}: {}", tableName, e.getMessage()); + } + } + + // ------------------------------------------------------------------------- + // Data-generation helpers + // ------------------------------------------------------------------------- + + /** + * Returns four deterministic bytes for a given {@code rowId}: + *
    + *
  • byte 0: low 8 bits of rowId
  • + *
  • byte 1: high 8 bits of rowId (bits 8-15)
  • + *
  • byte 2: low 8 bits of (rowId × 3)
  • + *
  • byte 3: low 8 bits of (rowId × 7)
  • + *
+ * All four bytes are different for any rowId in [1, 1000], ensuring that the test + * cannot pass by coincidence on a partial or shuffled result set. + */ + private static byte[] expectedBytea(int rowId) { + return new byte[]{ + (byte) (rowId & 0xFF), + (byte) ((rowId >> 8) & 0xFF), + (byte) ((rowId * 3) & 0xFF), + (byte) ((rowId * 7) & 0xFF) + }; + } + + /** + * Asserts that {@code actual} (which may be a {@code byte[]} or the PostgreSQL hex-escape + * {@code String} {@code "\\xHH…"}) equals {@code expected} byte-for-byte. + * + * @param expected the expected byte array + * @param actual value returned by {@link ResultSet#getObject(String)} + * @param columnLabel column name used in failure messages + */ + private static void assertBytea(byte[] expected, Object actual, String columnLabel) { + assertNotNull(actual, columnLabel + " must not be null"); + + byte[] actualBytes; + if (actual instanceof byte[]) { + actualBytes = (byte[]) actual; + } else if (actual instanceof String) { + // PostgreSQL JDBC hex-escape format: \x followed by lowercase hex pairs + String s = (String) actual; + if (s.startsWith("\\x") || s.startsWith("\\X")) { + actualBytes = hexStringToBytes(s.substring(2)); + } else { + actualBytes = s.getBytes(java.nio.charset.StandardCharsets.UTF_8); + } + } else { + actualBytes = fail(columnLabel + " has unexpected type " + actual.getClass().getName()); + } + + assertArrayEquals(expected, actualBytes, columnLabel + " bytes do not match"); + } + + /** + * Converts a lower-case hex string (e.g. {@code "0102030a"}) to a {@code byte[]}. + * + * @param hex hex string with an even number of characters and no prefix + * @return decoded byte array + */ + private static byte[] hexStringToBytes(String hex) { + if (hex.isEmpty()) { + return new byte[0]; + } + int len = hex.length(); + byte[] data = new byte[len / 2]; + for (int i = 0; i < len; i += 2) { + data[i / 2] = (byte) ((Character.digit(hex.charAt(i), 16) << 4) + + Character.digit(hex.charAt(i + 1), 16)); + } + return data; + } +} diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java new file mode 100644 index 000000000..42c399b0e --- /dev/null +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java @@ -0,0 +1,275 @@ +package openjproxy.jdbc; + +import openjproxy.jdbc.testutil.SQLServerPrefetchCacheConnectionProvider; +import openjproxy.jdbc.testutil.SQLServerTestContainer; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.condition.EnabledIf; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ArgumentsSource; +import org.junit.jupiter.params.provider.ValueSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * Integration test for the next-page prefetch cache feature with a SQL Server backend. + * + *

SQL Server uses the ANSI SQL {@code OFFSET m ROWS FETCH NEXT n ROWS ONLY} pagination syntax. + * The SQL Server container is managed by TestContainers; the test connects via an OJP prefetch-cache + * server on default port 1059. + * + *

The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *

    + *
  1. Creates a dedicated table with multiple column types, including a {@code VARBINARY} column.
  2. + *
  3. Inserts the requested number of rows with fully deterministic, per-row values.
  4. + *
  5. Paginates through all rows using {@code OFFSET … ROWS FETCH NEXT 100 ROWS ONLY} against an + * OJP server instance that has {@code ojp.server.nextPageCache.enabled=true} (default port 1059). + * The client also sets {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, + * which is the per-datasource opt-in sent to the server on connect.
  6. + *
  7. Asserts every column value, including a byte-exact comparison of the + * {@code VARBINARY} column.
  8. + *
  9. Drops the table on completion.
  10. + *
+ * + *

This test is disabled by default and is activated by passing + * {@code -DenableSqlServerPrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on default port 1059 with + * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag + * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to + * the server at connection time to explicitly opt this datasource into the cache. + * + *

SQL Server type notes: + *

    + *
  • No native BOOLEAN → {@code BIT} (0/1) is used.
  • + *
  • Binary data → {@code VARBINARY(32)}.
  • + *
  • Large text → {@code NVARCHAR(255)}.
  • + *
+ */ +@EnabledIf("openjproxy.jdbc.testutil.SQLServerTestContainer#isEnabled") +class SQLServerPaginationCacheIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(SQLServerPaginationCacheIntegrationTest.class); + + /** Number of rows per page used throughout these tests. */ + private static final int PAGE_SIZE = 100; + + private static boolean isTestEnabled; + + @BeforeAll + static void checkTestConfiguration() { + isTestEnabled = Boolean.parseBoolean( + System.getProperty("enableSqlServerPrefetchCacheTests", "false")); + } + + // ------------------------------------------------------------------------- + // Parameterized test + // ------------------------------------------------------------------------- + + /** + * Core pagination test for SQL Server. + * + *

The record count is provided via {@code @ValueSource} and the connection details via + * {@link SQLServerPrefetchCacheConnectionProvider}. JUnit 5 does not support mixing + * two argument sources in a single {@code @ParameterizedTest}, so the test obtains the + * connection from the shared TestContainer directly and iterates over record counts. + */ + @ParameterizedTest + @ValueSource(ints = {99, 100, 101, 567, 1000}) + void testPaginationWithPrefetchCache(int recordCount) throws SQLException { + + assumeTrue(isTestEnabled, + "SQL Server prefetch-cache tests are disabled " + + "(pass -DenableSqlServerPrefetchCacheTests=true to enable)"); + + // Build connection via the prefetch-cache connection provider + String[] connArgs = getConnectionArgs(); + String url = connArgs[0]; + String user = connArgs[1]; + String pwd = connArgs[2]; + + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + String tableName = "ojp_pfx_mssql_" + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** Obtains [url, user, password] from the TestContainer via the prefetch-cache provider. */ + private static String[] getConnectionArgs() { + SQLServerTestContainer.getInstance(); + String containerJdbcUrl = SQLServerTestContainer.getJdbcUrl(); + String username = SQLServerTestContainer.getUsername(); + String password = SQLServerTestContainer.getPassword(); + + String prefetchCachePort = System.getProperty("ojp.prefetch.cache.port", "1059"); + String ojpProxyHost = System.getProperty("ojp.proxy.host", "localhost"); + + // strip "jdbc:" prefix and wrap with OJP proxy + String urlWithoutPrefix = containerJdbcUrl.startsWith("jdbc:") + ? containerJdbcUrl.substring("jdbc:".length()) + : containerJdbcUrl; + if (!urlWithoutPrefix.toLowerCase().contains("databasename=")) { + urlWithoutPrefix = urlWithoutPrefix + ";databaseName=defaultdb"; + } + String ojpUrl = "jdbc:ojp[" + ojpProxyHost + ":" + prefetchCachePort + "]_" + urlWithoutPrefix; + + return new String[]{ojpUrl, username, password}; + } + + /** + * Drops (if exists) and re-creates the test table. + * + *

Schema: + *

+     *   id         INT          PRIMARY KEY   – 1-based row identifier
+     *   name       NVARCHAR(100) NOT NULL     – "record_{id}"
+     *   val_int    INT          NOT NULL      – id × 10
+     *   val_bigint BIGINT       NOT NULL      – id × 1,000,000
+     *   val_bool   BIT          NOT NULL      – true (1) when id is even, else false (0)
+     *   val_text   NVARCHAR(255) NOT NULL     – "text_value_for_row_{id}"
+     *   val_binary VARBINARY(32) NOT NULL     – four deterministic bytes derived from id
+     * 
+ */ + private static void createTable(Connection conn, String tableName) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("IF OBJECT_ID('" + tableName + "', 'U') IS NOT NULL DROP TABLE " + tableName); + stmt.execute( + "CREATE TABLE " + tableName + " (" + + " id INT NOT NULL PRIMARY KEY," + + " name NVARCHAR(100) NOT NULL," + + " val_int INT NOT NULL," + + " val_bigint BIGINT NOT NULL," + + " val_bool BIT NOT NULL," + + " val_text NVARCHAR(255) NOT NULL," + + " val_binary VARBINARY(32) NOT NULL" + + ")"); + } + logger.debug("Created table {}", tableName); + } + + private static void insertRows(Connection conn, String tableName, int recordCount) + throws SQLException { + String sql = "INSERT INTO " + tableName + + " (id, name, val_int, val_bigint, val_bool, val_text, val_binary)" + + " VALUES (?, ?, ?, ?, ?, ?, ?)"; + + try (PreparedStatement ps = conn.prepareStatement(sql)) { + for (int i = 1; i <= recordCount; i++) { + ps.setInt(1, i); + ps.setString(2, "record_" + i); + ps.setInt(3, i * 10); + ps.setLong(4, i * 1_000_000L); + ps.setInt(5, i % 2 == 0 ? 1 : 0); + ps.setString(6, "text_value_for_row_" + i); + ps.setBytes(7, expectedBinary(i)); + ps.addBatch(); + + if (i % 500 == 0) { + ps.executeBatch(); + } + } + ps.executeBatch(); + } + logger.debug("Inserted {} rows into {}", recordCount, tableName); + } + + private static int assertPage(Connection conn, String tableName, + int offset, int expectedRowsOnPage) + throws SQLException { + + // SQL Server: ORDER BY is required when using OFFSET/FETCH + String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_binary" + + " FROM " + tableName + + " ORDER BY id" + + " OFFSET " + offset + " ROWS FETCH NEXT " + PAGE_SIZE + " ROWS ONLY"; + + int rowsOnPage = 0; + try (PreparedStatement ps = conn.prepareStatement(sql); + ResultSet rs = ps.executeQuery()) { + + while (rs.next()) { + int expectedId = offset + rowsOnPage + 1; + int id = rs.getInt("id"); + + assertEquals(expectedId, id, + "id mismatch at offset=" + offset + " row=" + rowsOnPage); + assertEquals("record_" + id, rs.getString("name"), + "name mismatch for id=" + id); + assertEquals(id * 10, rs.getInt("val_int"), + "val_int mismatch for id=" + id); + assertEquals(id * 1_000_000L, rs.getLong("val_bigint"), + "val_bigint mismatch for id=" + id); + assertEquals(id % 2 == 0, rs.getBoolean("val_bool"), + "val_bool mismatch for id=" + id); + assertEquals("text_value_for_row_" + id, rs.getString("val_text"), + "val_text mismatch for id=" + id); + + Object binObj = rs.getObject("val_binary"); + assertNotNull(binObj, "val_binary for id=" + id + " must not be null"); + byte[] actualBytes = binObj instanceof byte[] ? (byte[]) binObj + : rs.getBytes("val_binary"); + assertArrayEquals(expectedBinary(id), actualBytes, + "val_binary bytes do not match for id=" + id); + + rowsOnPage++; + } + } + + assertEquals(expectedRowsOnPage, rowsOnPage, + "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows"); + return rowsOnPage; + } + + private static void dropTable(Connection conn, String tableName) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("IF OBJECT_ID('" + tableName + "', 'U') IS NOT NULL DROP TABLE " + tableName); + logger.debug("Dropped table {}", tableName); + } catch (SQLException e) { + logger.warn("Could not drop table {}: {}", tableName, e.getMessage()); + } + } + + // ------------------------------------------------------------------------- + // Data-generation helpers + // ------------------------------------------------------------------------- + + private static byte[] expectedBinary(int rowId) { + return new byte[]{ + (byte) (rowId & 0xFF), + (byte) ((rowId >> 8) & 0xFF), + (byte) ((rowId * 3) & 0xFF), + (byte) ((rowId * 7) & 0xFF) + }; + } +} diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/testutil/SQLServerPrefetchCacheConnectionProvider.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/testutil/SQLServerPrefetchCacheConnectionProvider.java new file mode 100644 index 000000000..18ad828b4 --- /dev/null +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/testutil/SQLServerPrefetchCacheConnectionProvider.java @@ -0,0 +1,74 @@ +package openjproxy.jdbc.testutil; + +import org.jetbrains.annotations.NotNull; +import org.junit.jupiter.api.extension.ExtensionContext; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.ArgumentsProvider; + +import java.util.stream.Stream; + +/** + * Custom {@link ArgumentsProvider} for SQL Server prefetch-cache integration tests. + * + *

Provides connection details pointing to the OJP server (default port 1059) with the + * next-page prefetch cache enabled via the client-side property {@code ojp.nextPageCache.enabled}. + * The actual SQL Server instance is still supplied by {@link SQLServerTestContainer}. + */ +public class SQLServerPrefetchCacheConnectionProvider implements ArgumentsProvider { + + private static final String JDBC_PREFIX = "jdbc:"; + + /** OJP server host:port used for prefetch-cache tests (defaults to standard port 1059). */ + private static final String PREFETCH_CACHE_PORT = + System.getProperty("ojp.prefetch.cache.port", "1059"); + private static final String OJP_PROXY_HOST = + System.getProperty("ojp.proxy.host", "localhost"); + private static final String PREFETCH_CACHE_ADDRESS = OJP_PROXY_HOST + ":" + PREFETCH_CACHE_PORT; + + @Override + public Stream provideArguments(ExtensionContext context) { + if (!SQLServerTestContainer.isEnabled()) { + return Stream.empty(); + } + + ConnectionProps result = getConnectionProps(); + return Stream.of( + Arguments.of(result.driverClass, result.ojpUrl, result.username, result.password) + ); + } + + @NotNull + private static ConnectionProps getConnectionProps() { + SQLServerTestContainer.getInstance(); + + String containerJdbcUrl = SQLServerTestContainer.getJdbcUrl(); + String username = SQLServerTestContainer.getUsername(); + String password = SQLServerTestContainer.getPassword(); + + String driverClass = "org.openjproxy.jdbc.Driver"; + String urlWithoutPrefix = containerJdbcUrl.startsWith(JDBC_PREFIX) + ? containerJdbcUrl.substring(JDBC_PREFIX.length()) + : containerJdbcUrl; + + if (!urlWithoutPrefix.toLowerCase().contains("databasename=")) { + urlWithoutPrefix = urlWithoutPrefix + ";databaseName=defaultdb"; + } + + String ojpUrl = JDBC_PREFIX + "ojp[" + PREFETCH_CACHE_ADDRESS + "]_" + urlWithoutPrefix; + return new ConnectionProps(username, password, driverClass, ojpUrl); + } + + private static class ConnectionProps { + private final String username; + private final String password; + private final String driverClass; + private final String ojpUrl; + + ConnectionProps(String username, String password, String driverClass, String ojpUrl) { + this.username = username; + this.password = password; + this.driverClass = driverClass; + this.ojpUrl = ojpUrl; + } + } +} diff --git a/ojp-jdbc-driver/src/test/resources/cockroachdb_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/cockroachdb_prefetch_cache_connections_with_record_counts.csv new file mode 100644 index 000000000..75e108955 --- /dev/null +++ b/ojp-jdbc-driver/src/test/resources/cockroachdb_prefetch_cache_connections_with_record_counts.csv @@ -0,0 +1,5 @@ +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, diff --git a/ojp-jdbc-driver/src/test/resources/db2_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/db2_prefetch_cache_connections_with_record_counts.csv new file mode 100644 index 000000000..d21d2b7cd --- /dev/null +++ b/ojp-jdbc-driver/src/test/resources/db2_prefetch_cache_connections_with_record_counts.csv @@ -0,0 +1,5 @@ +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_db2://localhost:50000/testdb,db2inst1,testpass +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_db2://localhost:50000/testdb,db2inst1,testpass +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_db2://localhost:50000/testdb,db2inst1,testpass +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_db2://localhost:50000/testdb,db2inst1,testpass +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_db2://localhost:50000/testdb,db2inst1,testpass diff --git a/ojp-jdbc-driver/src/test/resources/h2_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/h2_prefetch_cache_connections_with_record_counts.csv new file mode 100644 index 000000000..c653232c4 --- /dev/null +++ b/ojp-jdbc-driver/src/test/resources/h2_prefetch_cache_connections_with_record_counts.csv @@ -0,0 +1,5 @@ +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_h2:~/test,sa, +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_h2:~/test,sa, +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_h2:~/test,sa, +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_h2:~/test,sa, +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_h2:~/test,sa, diff --git a/ojp-jdbc-driver/src/test/resources/mariadb_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/mariadb_prefetch_cache_connections_with_record_counts.csv new file mode 100644 index 000000000..f7a2f41f1 --- /dev/null +++ b/ojp-jdbc-driver/src/test/resources/mariadb_prefetch_cache_connections_with_record_counts.csv @@ -0,0 +1,5 @@ +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mariadb://localhost:3307/defaultdb,testuser,testpassword +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mariadb://localhost:3307/defaultdb,testuser,testpassword +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mariadb://localhost:3307/defaultdb,testuser,testpassword +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mariadb://localhost:3307/defaultdb,testuser,testpassword +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mariadb://localhost:3307/defaultdb,testuser,testpassword diff --git a/ojp-jdbc-driver/src/test/resources/mysql_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/mysql_prefetch_cache_connections_with_record_counts.csv new file mode 100644 index 000000000..bddb835d2 --- /dev/null +++ b/ojp-jdbc-driver/src/test/resources/mysql_prefetch_cache_connections_with_record_counts.csv @@ -0,0 +1,5 @@ +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mysql://localhost:3306/defaultdb,testuser,testpassword +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mysql://localhost:3306/defaultdb,testuser,testpassword +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mysql://localhost:3306/defaultdb,testuser,testpassword +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mysql://localhost:3306/defaultdb,testuser,testpassword +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mysql://localhost:3306/defaultdb,testuser,testpassword diff --git a/ojp-jdbc-driver/src/test/resources/ojp.properties b/ojp-jdbc-driver/src/test/resources/ojp.properties index 0330b4734..bacc62b5b 100644 --- a/ojp-jdbc-driver/src/test/resources/ojp.properties +++ b/ojp-jdbc-driver/src/test/resources/ojp.properties @@ -5,6 +5,10 @@ ojp.connection.pool.idleTimeout=2000 ojp.connection.pool.maxLifetime=1200000 ojp.connection.pool.connectionTimeout=20000 +# Enable the next-page prefetch cache for this client (client-side per-datasource setting). +# The server must also have ojp.server.nextPageCache.enabled=true for this to take effect. +ojp.nextPageCache.enabled=true + # Multinode-specific configuration for non-XA connections multinode.ojp.connection.pool.maximumPoolSize=22 multinode.ojp.connection.pool.minimumIdle=20 diff --git a/ojp-jdbc-driver/src/test/resources/oracle_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/oracle_prefetch_cache_connections_with_record_counts.csv new file mode 100644 index 000000000..757148ccb --- /dev/null +++ b/ojp-jdbc-driver/src/test/resources/oracle_prefetch_cache_connections_with_record_counts.csv @@ -0,0 +1,5 @@ +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword diff --git a/ojp-jdbc-driver/src/test/resources/postgres_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/postgres_prefetch_cache_connections_with_record_counts.csv new file mode 100644 index 000000000..a06451d49 --- /dev/null +++ b/ojp-jdbc-driver/src/test/resources/postgres_prefetch_cache_connections_with_record_counts.csv @@ -0,0 +1,5 @@ +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:5432/defaultdb,testuser,testpassword +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:5432/defaultdb,testuser,testpassword +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:5432/defaultdb,testuser,testpassword +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:5432/defaultdb,testuser,testpassword +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:5432/defaultdb,testuser,testpassword diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java index c2893d67f..01fdaf18d 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java @@ -68,6 +68,13 @@ public class ServerConfiguration { private static final String TELEMETRY_GRPC_METRICS_ENABLED_KEY = "ojp.telemetry.grpc.metrics.enabled"; private static final String TELEMETRY_POOL_METRICS_ENABLED_KEY = "ojp.telemetry.pool.metrics.enabled"; + // Next-page prefetch cache configuration keys + private static final String NEXT_PAGE_CACHE_ENABLED_KEY = "ojp.server.nextPageCache.enabled"; + private static final String NEXT_PAGE_CACHE_TTL_SECONDS_KEY = "ojp.server.nextPageCache.ttlSeconds"; + private static final String NEXT_PAGE_CACHE_MAX_ENTRIES_KEY = "ojp.server.nextPageCache.maxEntries"; + private static final String NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS_KEY = "ojp.server.nextPageCache.prefetchWaitTimeoutMs"; + private static final String NEXT_PAGE_CACHE_CLEANUP_INTERVAL_SECONDS_KEY = "ojp.server.nextPageCache.cleanupIntervalSeconds"; + // TLS configuration keys private static final String TLS_ENABLED_KEY = "ojp.server.tls.enabled"; private static final String TLS_KEYSTORE_PATH_KEY = "ojp.server.tls.keystore.path"; @@ -135,6 +142,13 @@ public class ServerConfiguration { public static final boolean DEFAULT_TELEMETRY_GRPC_METRICS_ENABLED = true; // Enabled by default when OpenTelemetry is enabled public static final boolean DEFAULT_TELEMETRY_POOL_METRICS_ENABLED = true; // Enabled by default when OpenTelemetry is enabled + // Next-page prefetch cache default values + public static final boolean DEFAULT_NEXT_PAGE_CACHE_ENABLED = false; // Disabled by default, opt-in + public static final long DEFAULT_NEXT_PAGE_CACHE_TTL_SECONDS = 60; // 1 minute + public static final int DEFAULT_NEXT_PAGE_CACHE_MAX_ENTRIES = 100; + public static final long DEFAULT_NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS = 5000; // 5 seconds + public static final long DEFAULT_NEXT_PAGE_CACHE_CLEANUP_INTERVAL_SECONDS = 60; // 1 minute + // TLS default values public static final boolean DEFAULT_TLS_ENABLED = false; // Disabled by default for backwards compatibility public static final boolean DEFAULT_TLS_CLIENT_AUTH_REQUIRED = false; // mTLS disabled by default @@ -211,6 +225,13 @@ public class ServerConfiguration { private final boolean tlsClientAuthRequired; + // Next-page prefetch cache configuration + private final boolean nextPageCacheEnabled; + private final long nextPageCacheTtlSeconds; + private final int nextPageCacheMaxEntries; + private final long nextPageCachePrefetchWaitTimeoutMs; + private final long nextPageCacheCleanupIntervalSeconds; + public ServerConfiguration() { this.serverPort = getIntProperty(SERVER_PORT_KEY, DEFAULT_SERVER_PORT); this.prometheusPort = getIntProperty(PROMETHEUS_PORT_KEY, DEFAULT_PROMETHEUS_PORT); @@ -274,6 +295,13 @@ public ServerConfiguration() { this.telemetryGrpcMetricsEnabled = getBooleanProperty(TELEMETRY_GRPC_METRICS_ENABLED_KEY, DEFAULT_TELEMETRY_GRPC_METRICS_ENABLED); this.telemetryPoolMetricsEnabled = getBooleanProperty(TELEMETRY_POOL_METRICS_ENABLED_KEY, DEFAULT_TELEMETRY_POOL_METRICS_ENABLED); + // Next-page prefetch cache configuration + this.nextPageCacheEnabled = getBooleanProperty(NEXT_PAGE_CACHE_ENABLED_KEY, DEFAULT_NEXT_PAGE_CACHE_ENABLED); + this.nextPageCacheTtlSeconds = getLongProperty(NEXT_PAGE_CACHE_TTL_SECONDS_KEY, DEFAULT_NEXT_PAGE_CACHE_TTL_SECONDS); + this.nextPageCacheMaxEntries = getIntProperty(NEXT_PAGE_CACHE_MAX_ENTRIES_KEY, DEFAULT_NEXT_PAGE_CACHE_MAX_ENTRIES); + this.nextPageCachePrefetchWaitTimeoutMs = getLongProperty(NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS_KEY, DEFAULT_NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS); + this.nextPageCacheCleanupIntervalSeconds = getLongProperty(NEXT_PAGE_CACHE_CLEANUP_INTERVAL_SECONDS_KEY, DEFAULT_NEXT_PAGE_CACHE_CLEANUP_INTERVAL_SECONDS); + logConfigurationSummary(); } @@ -416,6 +444,14 @@ private void logConfigurationSummary() { logger.info(" Tracing Service Name: {}", tracingServiceName); logger.info(" Tracing Sample Rate: {}", tracingSampleRate); } + logger.info("Next-Page Prefetch Cache Configuration:"); + logger.info(" Next-Page Cache Enabled: {}", nextPageCacheEnabled); + if (nextPageCacheEnabled) { + logger.info(" Next-Page Cache TTL: {} seconds", nextPageCacheTtlSeconds); + logger.info(" Next-Page Cache Max Entries: {}", nextPageCacheMaxEntries); + logger.info(" Next-Page Cache Prefetch Wait Timeout: {} ms", nextPageCachePrefetchWaitTimeoutMs); + logger.info(" Next-Page Cache Cleanup Interval: {} seconds", nextPageCacheCleanupIntervalSeconds); + } } /** @@ -641,5 +677,54 @@ public boolean isTelemetryGrpcMetricsEnabled() { public boolean isTelemetryPoolMetricsEnabled() { return telemetryPoolMetricsEnabled; } - + + public boolean isNextPageCacheEnabled() { + return nextPageCacheEnabled; + } + + public long getNextPageCacheTtlSeconds() { + return nextPageCacheTtlSeconds; + } + + public int getNextPageCacheMaxEntries() { + return nextPageCacheMaxEntries; + } + + public long getNextPageCachePrefetchWaitTimeoutMs() { + return nextPageCachePrefetchWaitTimeoutMs; + } + + /** + * Returns the prefetch-wait timeout for a specific datasource. + * + *

If a per-datasource override is configured via + * {@code ojp.server.nextPageCache.datasource..prefetchWaitTimeoutMs}, + * that value is returned. Otherwise the global + * {@code ojp.server.nextPageCache.prefetchWaitTimeoutMs} is used as the fallback.

+ * + * @param datasourceName the {@code ojp.datasource.name} value from the client connection + * properties; {@code null} or {@code "default"} always returns + * the global default + * @return the effective prefetch-wait timeout in milliseconds for the given datasource + */ + public long getNextPageCachePrefetchWaitTimeoutMs(String datasourceName) { + if (datasourceName != null && !datasourceName.isEmpty() && !"default".equals(datasourceName)) { + String perDatasourceKey = "ojp.server.nextPageCache.datasource." + datasourceName + + ".prefetchWaitTimeoutMs"; + String raw = getStringProperty(perDatasourceKey, null); + if (raw != null) { + try { + return Long.parseLong(raw); + } catch (NumberFormatException e) { + logger.warn("Invalid value for '{}': '{}', falling back to global default", perDatasourceKey, raw); + } + } + } + return nextPageCachePrefetchWaitTimeoutMs; + } + + public long getNextPageCacheCleanupIntervalSeconds() { + return nextPageCacheCleanupIntervalSeconds; + } + } \ No newline at end of file diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java index 1aacb6a1b..0b5d6295d 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java @@ -21,9 +21,12 @@ import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.StringUtils; +import org.openjproxy.constants.CommonConstants; import org.openjproxy.grpc.ProtoConverter; +import org.openjproxy.grpc.dto.OpQueryResult; import org.openjproxy.grpc.dto.Parameter; import org.openjproxy.grpc.server.action.resource.CallResourceAction; +import org.openjproxy.grpc.server.action.session.ResultSetHelper; import org.openjproxy.grpc.server.action.session.TerminateSessionAction; import org.openjproxy.grpc.server.action.transaction.CommitTransactionAction; import org.openjproxy.grpc.server.action.transaction.RollbackTransactionAction; @@ -35,6 +38,11 @@ import org.openjproxy.grpc.server.action.xa.XaRecoverAction; import org.openjproxy.grpc.server.action.xa.XaRollbackAction; import org.openjproxy.grpc.server.action.xa.XaStartAction; +import org.openjproxy.grpc.server.paging.CachedPage; +import org.openjproxy.grpc.server.paging.NextPagePrefetchCache; +import org.openjproxy.grpc.server.paging.PageInfo; +import org.openjproxy.grpc.server.paging.PaginationDetector; +import org.openjproxy.grpc.server.resultset.ResultSetWrapper; import org.openjproxy.grpc.server.statement.StatementFactory; import org.openjproxy.xa.pool.XATransactionRegistry; import org.openjproxy.xa.pool.spi.XAConnectionPoolProvider; @@ -47,6 +55,7 @@ import java.sql.SQLDataException; import java.sql.SQLException; import java.sql.Statement; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Optional; @@ -72,6 +81,10 @@ public class StatementServiceImpl extends StatementServiceGrpc.StatementServiceI // SQL Enhancer Engine for query optimization private final org.openjproxy.grpc.server.sql.SqlEnhancerEngine sqlEnhancerEngine; + // Next-page prefetch cache for paginated queries (disabled by default) + private final NextPagePrefetchCache nextPagePrefetchCache; + private final ServerConfiguration serverConfiguration; + // Multinode XA coordinator for distributing transaction limits private static final MultinodeXaCoordinator xaCoordinator = new MultinodeXaCoordinator(); @@ -87,9 +100,17 @@ public StatementServiceImpl(SessionManager sessionManager, CircuitBreakerRegistr ServerConfiguration serverConfiguration) { this.sessionManager = sessionManager; this.circuitBreakerRegistry = circuitBreakerRegistry; + this.serverConfiguration = serverConfiguration; // Server configuration for creating segregation managers this.sqlEnhancerEngine = new org.openjproxy.grpc.server.sql.SqlEnhancerEngine( serverConfiguration.isSqlEnhancerEnabled()); + // Next-page prefetch cache (disabled by default) + this.nextPagePrefetchCache = new NextPagePrefetchCache( + serverConfiguration.isNextPageCacheEnabled(), + serverConfiguration.getNextPageCacheMaxEntries(), + serverConfiguration.getNextPageCacheTtlSeconds(), + serverConfiguration.getNextPageCachePrefetchWaitTimeoutMs(), + serverConfiguration.getNextPageCacheCleanupIntervalSeconds()); initializeXAPoolProvider(); // Create SQL statement metrics from the registered OpenTelemetry instance (if available) @@ -194,6 +215,31 @@ private void initializeXAPoolProvider() { @Override public void connect(ConnectionDetails connectionDetails, StreamObserver responseObserver) { + // Register per-datasource prefetch wait timeout and enabled flag so that + // getIfReady() and prefetchAsync() use the correct settings for this datasource + // rather than the global defaults. + if (nextPagePrefetchCache.isEnabled()) { + String connHash = org.openjproxy.grpc.server.utils.ConnectionHashGenerator + .hashConnectionDetails(connectionDetails); + String datasourceName = org.openjproxy.grpc.server.utils.ConnectionHashGenerator + .extractDataSourceName(connectionDetails); + long perDatasourceTimeout = serverConfiguration + .getNextPageCachePrefetchWaitTimeoutMs(datasourceName); + nextPagePrefetchCache.registerDatasourcePrefetchWaitTimeout(connHash, perDatasourceTimeout); + // The per-datasource cache-enabled flag is a CLIENT-side connection property + // (ojp.nextPageCache.enabled in the client's ojp.properties). The server only + // owns the global on/off switch; individual datasources opt out via their own config. + // When the property is absent the per-connection map has no entry, so + // NextPagePrefetchCache.isEnabledForDatasource() falls through to the global + // server-side flag — which is the correct default. + java.util.Map clientProps = + ProtoConverter.propertiesFromProto(connectionDetails.getPropertiesList()); + Object clientEnabledRaw = clientProps.get(CommonConstants.NEXT_PAGE_CACHE_ENABLED_PROPERTY); + if (clientEnabledRaw != null) { + boolean clientCacheEnabled = Boolean.parseBoolean(clientEnabledRaw.toString()); + nextPagePrefetchCache.registerDatasourceCacheEnabled(connHash, clientCacheEnabled); + } + } org.openjproxy.grpc.server.action.connection.ConnectAction.getInstance() .execute(actionContext, connectionDetails, responseObserver); } @@ -287,16 +333,96 @@ private void executeQueryInternal(StatementRequest request, StreamObserver params = ProtoConverter.fromProtoList(request.getParametersList()); + + // ---- Next-page prefetch cache ---- + if (nextPagePrefetchCache.isEnabled()) { + String connHash = dto.getSession().getConnHash(); + if (nextPagePrefetchCache.isEnabledForDatasource(connHash)) { + Optional cached = nextPagePrefetchCache.getIfReady(connHash, sql); + if (cached.isPresent()) { + CachedPage page = cached.get(); + // Start prefetch for the page after this one before returning the cached result + startNextPagePrefetch(sql, params, connHash); + streamCachedPage(page, dto.getSession(), responseObserver); + return; + } + } + } + // ---- End next-page prefetch cache check ---- + + String resultSetUUID; if (CollectionUtils.isNotEmpty(params)) { PreparedStatement ps = StatementFactory.createPreparedStatement(sessionManager, dto, sql, params, request); - String resultSetUUID = this.sessionManager.registerResultSet(dto.getSession(), ps.executeQuery()); - handleResultSet(actionContext, dto.getSession(), resultSetUUID, responseObserver); + resultSetUUID = this.sessionManager.registerResultSet(dto.getSession(), ps.executeQuery()); } else { Statement stmt = StatementFactory.createStatement(sessionManager, dto.getConnection(), request); - String resultSetUUID = this.sessionManager.registerResultSet(dto.getSession(), - stmt.executeQuery(sql)); - handleResultSet(actionContext, dto.getSession(), resultSetUUID, responseObserver); + resultSetUUID = this.sessionManager.registerResultSet(dto.getSession(), stmt.executeQuery(sql)); + } + // Start prefetch for the next page while the current page is being streamed + startNextPagePrefetch(sql, params, dto.getSession().getConnHash()); + handleResultSet(actionContext, dto.getSession(), resultSetUUID, responseObserver); + } + + /** + * Starts an asynchronous prefetch of the next page for the given SQL, if the feature + * is enabled and the SQL contains a recognised pagination clause. + * + * @param sql the current paginated SQL + * @param params the query parameters (used as-is for the next-page query) + * @param connHash the connection hash used to look up the DataSource + */ + private void startNextPagePrefetch(String sql, List params, String connHash) { + if (!nextPagePrefetchCache.isEnabled()) { + return; + } + Optional pageInfo = PaginationDetector.detect(sql); + if (pageInfo.isEmpty()) { + return; + } + String nextPageSql = PaginationDetector.buildNextPageSql(sql, pageInfo.get()); + if (nextPageSql == null) { + return; + } + DataSource dataSource = datasourceMap.get(connHash); + if (dataSource == null) { + log.debug("No DataSource found for prefetch, connHash={}", connHash); + return; + } + nextPagePrefetchCache.prefetchAsync(dataSource, connHash, nextPageSql, params); + } + + /** + * Streams the rows held in a {@link CachedPage} directly to the gRPC response observer, + * using the same chunking strategy as {@link ResultSetHelper#handleResultSet}. + * + * @param page the cached page to stream + * @param session the current session info (embedded in each response message) + * @param responseObserver the gRPC observer to stream results into + */ + private static void streamCachedPage(CachedPage page, SessionInfo session, + StreamObserver responseObserver) { + OpQueryResult.OpQueryResultBuilder queryResultBuilder = OpQueryResult.builder(); + queryResultBuilder.labels(page.getColumnLabels()); + + List batch = new ArrayList<>(); + int totalRows = page.getRows().size(); + + for (Object[] rowValues : page.getRows()) { + batch.add(rowValues); + if (batch.size() == CommonConstants.ROWS_PER_RESULT_SET_DATA_BLOCK) { + responseObserver.onNext(ResultSetWrapper.wrapResults(session, batch, + queryResultBuilder, null, "")); + queryResultBuilder = OpQueryResult.builder(); + batch = new ArrayList<>(); + } + } + + // Send remaining rows, or an empty batch when there are no rows at all + if (!batch.isEmpty() || totalRows == 0) { + responseObserver.onNext(ResultSetWrapper.wrapResults(session, batch, + queryResultBuilder, null, "")); } + responseObserver.onCompleted(); } @Override diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/CachedPage.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/CachedPage.java new file mode 100644 index 000000000..f672972b6 --- /dev/null +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/CachedPage.java @@ -0,0 +1,58 @@ +package org.openjproxy.grpc.server.paging; + +import java.util.Collections; +import java.util.List; + +/** + * Holds a single cached page of query results. + * + *

Instances are immutable once created. The {@link #isExpired(long)} method + * can be used to check whether the entry has exceeded its time-to-live.

+ */ +public class CachedPage { + + private final List columnLabels; + private final List rows; + private final long createdAtMs; + + /** + * @param columnLabels ordered list of column names from the result set metadata + * @param rows result rows; each element is an array of column values + */ + public CachedPage(List columnLabels, List rows) { + this.columnLabels = Collections.unmodifiableList(columnLabels); + this.rows = Collections.unmodifiableList(rows); + this.createdAtMs = System.currentTimeMillis(); + } + + /** + * Returns the ordered list of column labels for this result set. + */ + public List getColumnLabels() { + return columnLabels; + } + + /** + * Returns the cached rows. Each element is an array of column values + * in the same order as {@link #getColumnLabels()}. + */ + public List getRows() { + return rows; + } + + /** + * Returns the epoch milliseconds at which this entry was created. + */ + public long getCreatedAtMs() { + return createdAtMs; + } + + /** + * Returns {@code true} if the entry is older than {@code ttlMs} milliseconds. + * + * @param ttlMs time-to-live in milliseconds + */ + public boolean isExpired(long ttlMs) { + return System.currentTimeMillis() - createdAtMs > ttlMs; + } +} diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java new file mode 100644 index 000000000..018851c97 --- /dev/null +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java @@ -0,0 +1,595 @@ +package org.openjproxy.grpc.server.paging; + +import lombok.extern.slf4j.Slf4j; +import org.openjproxy.grpc.dto.Parameter; +import org.openjproxy.grpc.dto.ParameterType; + +import javax.sql.DataSource; +import java.math.BigDecimal; +import java.sql.Connection; +import java.sql.Date; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.regex.Pattern; + +/** + * Cache for pre-fetched next pages of paginated SELECT queries. + * + *

Behaviour

+ *
    + *
  1. When a paginated query is executed, the server fires a virtual thread that + * executes the next page SQL against the database and stores the result + * in this cache, keyed by the datasource identifier and (trimmed) next-page SQL + * string.
  2. + *
  3. When the client subsequently requests the next page, the server first checks + * this cache. If a matching entry is found the result is served from memory, + * and another prefetch is started for the page after that.
  4. + *
  5. If the client requests a page that is still being fetched, the server waits + * up to {@code prefetchWaitTimeoutMs} for the operation to complete before + * falling back to a regular database query.
  6. + *
+ * + *

Datasource isolation

+ * Each cache entry is scoped to a specific datasource by including the + * {@code datasourceId} in the cache key. Two datasources executing the same SQL + * query will never share a prefetched page. + * + *

Materialised LOB data

+ * All column types are cached: + *
    + *
  • BLOB / LONGVARBINARY / VARBINARY / BINARY → materialized as {@code byte[]}
  • + *
  • CLOB / NCLOB / LONGVARCHAR / LONGNVARCHAR → materialized as {@code String}
  • + *
  • All other types → stored using {@code ResultSet.getObject()}
  • + *
+ * Queries that use LOB session references as input parameters (i.e., parameters + * of type BLOB or CLOB that reference a session-scoped LOB object) are still skipped + * because those references cannot be transferred to a separate prefetch connection. + * + *

Background cleanup

+ * All cache instances share a single application-wide daemon thread + * ({@link #CLEANUP_EXECUTOR}) that is created once for the lifetime of the JVM. + * When enabled, each instance registers its own periodic eviction task on that shared + * executor; {@link #shutdown()} cancels the task for that instance without affecting + * the shared thread or any other instance's tasks. Entries expire after + * {@code ttlSeconds} regardless of whether they were ever consumed. + * + *

Thread safety

+ * All public methods are thread-safe. The internal cache uses a + * {@link ConcurrentHashMap} and prefetch threads are Java 21 virtual threads. + */ +@Slf4j +public class NextPagePrefetchCache implements AutoCloseable { + + /** + * Application-wide single-threaded executor shared by ALL enabled cache instances. + * Using a {@code static final} field guarantees exactly ONE background cleanup thread + * per JVM regardless of how many {@code NextPagePrefetchCache} instances are created. + * The executor runs on a virtual thread; virtual threads are always daemon threads, + * so they never prevent JVM shutdown. + */ + private static final ScheduledExecutorService CLEANUP_EXECUTOR = + Executors.newSingleThreadScheduledExecutor(r -> + Thread.ofVirtual().name("ojp-prefetch-cache-cleanup").unstarted(r)); + + /** Pre-compiled pattern for stripping newlines and tabs in log abbreviations. */ + private static final Pattern NEWLINE_PATTERN = Pattern.compile("[\\r\\n\\t]+"); + + private final boolean enabled; + private final int maxEntries; + private final long ttlMs; + private final long prefetchWaitTimeoutMs; + + /** + * Per-datasource prefetch-wait timeout overrides. + * Key: datasource connection hash (see {@code ConnectionHashGenerator}). + * Value: timeout in milliseconds. + * When an entry is present it takes precedence over {@link #prefetchWaitTimeoutMs}. + */ + private final ConcurrentHashMap datasourcePrefetchWaitTimeoutMs + = new ConcurrentHashMap<>(); + + /** + * Per-datasource cache-enabled overrides. + * Key: datasource connection hash (see {@code ConnectionHashGenerator}). + * Value: {@code true} to enable, {@code false} to disable the cache for this datasource. + * When an entry is present it takes precedence over the global {@link #enabled} flag. + */ + private final ConcurrentHashMap datasourceCacheEnabled + = new ConcurrentHashMap<>(); + + /** + * Maps {@code "\u0001"} to the asynchronous result of the prefetch. + * Including the datasource ID in the key ensures that two different datasources executing + * the same SQL do not share cache entries. + */ + private final ConcurrentHashMap> cache + = new ConcurrentHashMap<>(); + + /** + * Handle to this instance's eviction task on {@link #CLEANUP_EXECUTOR}. + * {@code null} reference when the cleanup job is disabled ({@code cleanupIntervalSeconds == 0}). + * Cancelled atomically by {@link #shutdown()} to avoid concurrent double-cancel races. + */ + private final AtomicReference> cleanupTask = new AtomicReference<>(); + + /** + * Creates a new cache instance. + * + * @param enabled whether the feature is enabled + * @param maxEntries maximum number of entries to keep (oldest removed first) + * @param ttlSeconds time-to-live for each entry in seconds + * @param prefetchWaitTimeoutMs max time (ms) to wait for an in-progress prefetch + * before falling back to a live DB query + * @param cleanupIntervalSeconds interval (seconds) between background eviction sweeps; + * {@code 0} disables the background job for this instance + */ + public NextPagePrefetchCache(boolean enabled, int maxEntries, + long ttlSeconds, long prefetchWaitTimeoutMs, + long cleanupIntervalSeconds) { + this.enabled = enabled; + this.maxEntries = maxEntries; + this.ttlMs = ttlSeconds * 1000L; + this.prefetchWaitTimeoutMs = prefetchWaitTimeoutMs; + + if (enabled && cleanupIntervalSeconds > 0) { + // Register this instance's eviction task on the single shared executor. + // The executor has exactly one thread, so all tasks run sequentially on + // that same thread — never more than one cleanup thread in the JVM. + cleanupTask.set(CLEANUP_EXECUTOR.scheduleAtFixedRate( + this::evictExpiredOrCompleted, + cleanupIntervalSeconds, cleanupIntervalSeconds, TimeUnit.SECONDS)); + log.debug("Prefetch cache cleanup registered every {}s on shared executor", + cleanupIntervalSeconds); + } + } + + /** + * Returns {@code true} when this cache is enabled. + */ + public boolean isEnabled() { + return enabled; + } + + /** + * Returns the current number of entries in the cache (in-progress + completed). + * Primarily intended for monitoring and testing. + */ + public int cacheSize() { + return cache.size(); + } + + /** + * Registers a per-datasource prefetch-wait timeout that overrides the global default + * for the specified datasource. + * + *

Calling this method multiple times for the same {@code datasourceId} simply + * replaces the previously registered value. The registration is thread-safe.

+ * + * @param datasourceId the unique identifier of the datasource (connection hash) + * @param timeoutMs the maximum time in milliseconds to wait for an in-progress + * prefetch before falling back to a live DB query + */ + public void registerDatasourcePrefetchWaitTimeout(String datasourceId, long timeoutMs) { + if (datasourceId != null) { + datasourcePrefetchWaitTimeoutMs.put(datasourceId, timeoutMs); + log.debug("Registered per-datasource prefetchWaitTimeoutMs={} for datasourceId={}", + timeoutMs, datasourceId); + } + } + + /** + * Registers whether the prefetch cache is enabled for a specific datasource, + * overriding the global {@link #enabled} flag for that datasource. + * + *

Calling this method multiple times for the same {@code datasourceId} simply + * replaces the previously registered value. The registration is thread-safe.

+ * + * @param datasourceId the unique identifier of the datasource (connection hash) + * @param cacheEnabled {@code true} to enable caching, {@code false} to disable it + * for this specific datasource + */ + public void registerDatasourceCacheEnabled(String datasourceId, boolean cacheEnabled) { + if (datasourceId != null) { + datasourceCacheEnabled.put(datasourceId, cacheEnabled); + log.debug("Registered per-datasource cacheEnabled={} for datasourceId={}", + cacheEnabled, datasourceId); + } + } + + /** + * Returns whether the cache is enabled for the given datasource. + * If a per-datasource override has been registered via + * {@link #registerDatasourceCacheEnabled}, that value takes precedence + * over the global {@link #enabled} flag. + * + * @param datasourceId the connection hash for the datasource; may be {@code null} + * @return {@code true} if caching should be used for this datasource + */ + public boolean isEnabledForDatasource(String datasourceId) { + if (datasourceId != null) { + Boolean override = datasourceCacheEnabled.get(datasourceId); + if (override != null) { + return override; + } + } + return enabled; + } + + /** + * Cancels this instance's periodic cleanup task on the shared executor. + * The shared executor itself is left running so that other cache instances + * (if any) are not affected. Safe to call multiple times; uses an atomic + * swap to prevent concurrent double-cancel races. + */ + public void shutdown() { + ScheduledFuture task = cleanupTask.getAndSet(null); + if (task != null) { + task.cancel(false); + log.debug("Prefetch cache cleanup task cancelled"); + } + } + + /** Implements {@link AutoCloseable} by delegating to {@link #shutdown()}. */ + @Override + public void close() { + shutdown(); + } + + // ----------------------------------------------------------------- + // Cache read + // ----------------------------------------------------------------- + + /** + * Retrieves the cached page for the given datasource + SQL pair, waiting up to + * {@code prefetchWaitTimeoutMs} when the prefetch is still in progress. + * + *

Returns an empty Optional when:

+ *
    + *
  • no entry exists for {@code datasourceId} + {@code sql}
  • + *
  • the entry is expired
  • + *
  • the prefetch failed or timed out
  • + *
+ * + *

The entry is removed from the cache after a successful retrieval (single-use + * semantics) so that concurrent requests for the same page can each independently + * obtain the result and start the next prefetch.

+ * + * @param datasourceId the unique identifier of the datasource (e.g. connection hash); + * used to isolate entries from different datasources that may + * share the same SQL text + * @param sql the exact paginated SQL sent by the client + * @return an Optional containing the cached page, or empty if unavailable + */ + public Optional getIfReady(String datasourceId, String sql) { + String key = normalizeKey(datasourceId, sql); + CompletableFuture future = cache.get(key); + if (future == null) { + return Optional.empty(); + } + + long effectiveTimeoutMs = datasourcePrefetchWaitTimeoutMs.getOrDefault( + datasourceId, prefetchWaitTimeoutMs); + + try { + CachedPage page = future.get(effectiveTimeoutMs, TimeUnit.MILLISECONDS); + // Remove after use (single-use semantics; if another thread also grabs + // the same entry concurrently, it gets a copy of the same data). + cache.remove(key, future); + + if (page == null) { + log.debug("Prefetch for '{}' returned no-cache result", abbreviate(sql)); + return Optional.empty(); + } + if (page.isExpired(ttlMs)) { + log.debug("Cached page for '{}' has expired", abbreviate(sql)); + return Optional.empty(); + } + log.debug("Cache HIT for '{}' ({} rows)", abbreviate(sql), page.getRows().size()); + return Optional.of(page); + + } catch (java.util.concurrent.TimeoutException e) { + log.debug("Prefetch for '{}' did not complete within {}ms – falling back to live query", + abbreviate(sql), effectiveTimeoutMs); + return Optional.empty(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + log.warn("Interrupted while waiting for prefetch of '{}'", abbreviate(sql)); + return Optional.empty(); + } catch (Exception e) { + log.warn("Prefetch for '{}' failed: {}", abbreviate(sql), e.getMessage()); + cache.remove(key, future); + return Optional.empty(); + } + } + + // ----------------------------------------------------------------- + // Cache write / prefetch trigger + // ----------------------------------------------------------------- + + /** + * Starts an asynchronous prefetch of {@code nextPageSql} on a virtual thread. + * + *

The method returns immediately. If an entry for {@code datasourceId} + + * {@code nextPageSql} already exists (either in-progress or completed), no new + * prefetch is started. Entries are evicted lazily when the cache exceeds + * {@code maxEntries}.

+ * + *

BLOB/CLOB parameters are not supported; if any parameter has type + * {@code BLOB} or {@code CLOB} the prefetch is silently skipped.

+ * + * @param dataSource the DataSource from which to obtain a dedicated prefetch connection + * @param datasourceId the unique identifier of the datasource (e.g. connection hash); + * used to scope the cache entry so two datasources do not share pages + * @param nextPageSql the SQL for the next page (produced by {@link PaginationDetector#buildNextPageSql}) + * @param params the query parameters (may be null or empty for non-prepared queries) + */ + public void prefetchAsync(DataSource dataSource, String datasourceId, + String nextPageSql, List params) { + if (!isEnabledForDatasource(datasourceId) || dataSource == null || nextPageSql == null) { + return; + } + + // Skip if any parameter is a LOB reference (session-scoped, can't be used in prefetch) + if (params != null && params.stream().anyMatch(NextPagePrefetchCache::isLobParameter)) { + log.debug("Skipping prefetch – query contains LOB parameters"); + return; + } + + String key = normalizeKey(datasourceId, nextPageSql); + + // Don't prefetch if already in-progress or completed + if (cache.containsKey(key)) { + log.debug("Prefetch already in progress/completed for '{}'", abbreviate(nextPageSql)); + return; + } + + // Evict stale entries before inserting to respect max-size + if (cache.size() >= maxEntries) { + evictExpiredOrCompleted(); + } + + CompletableFuture future = new CompletableFuture<>(); + // putIfAbsent avoids a race where two callers try to start the same prefetch + if (cache.putIfAbsent(key, future) != null) { + // Another thread won the race + return; + } + + log.debug("Starting prefetch for '{}'", abbreviate(nextPageSql)); + + List paramsCopy = params == null ? List.of() : List.copyOf(params); + // Include a safe SQL snippet in the thread name for easier thread-dump analysis + String threadName = "ojp-next-page-prefetch[" + abbreviate(nextPageSql, 40) + "]"; + + Thread.ofVirtual().name(threadName).start(() -> { + try (Connection conn = dataSource.getConnection()) { + CachedPage page = executeAndReadAllRows(conn, nextPageSql, paramsCopy); + future.complete(page); // null signals "skip cache" + log.debug("Prefetch completed for '{}' ({} rows cached)", + abbreviate(nextPageSql), + page != null ? page.getRows().size() : 0); + } catch (Exception e) { + log.warn("Prefetch failed for '{}': {}", abbreviate(nextPageSql), e.getMessage()); + future.completeExceptionally(e); + cache.remove(key, future); + } + }); + } + + // ----------------------------------------------------------------- + // Internal helpers + // ----------------------------------------------------------------- + + /** + * Executes {@code sql} using the given connection and materialises all result + * rows into a {@link CachedPage}. + */ + private static CachedPage executeAndReadAllRows(Connection conn, String sql, + List params) throws SQLException { + if (params.isEmpty()) { + try (Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery(sql)) { + return readAllRows(rs); + } + } else { + try (PreparedStatement ps = conn.prepareStatement(sql)) { + setNonLobParameters(ps, params); + try (ResultSet rs = ps.executeQuery()) { + return readAllRows(rs); + } + } + } + } + + /** + * Materialises all rows from {@code rs}, eagerly reading all column values + * (including LOB types) into in-memory representations. + */ + private static CachedPage readAllRows(ResultSet rs) throws SQLException { + ResultSetMetaData meta = rs.getMetaData(); + int colCount = meta.getColumnCount(); + + // Collect column labels + List labels = new ArrayList<>(colCount); + for (int i = 1; i <= colCount; i++) { + labels.add(meta.getColumnName(i)); + } + + // Read all rows eagerly; materialise binary and character LOBs + List rows = new ArrayList<>(); + while (rs.next()) { + Object[] row = new Object[colCount]; + for (int i = 0; i < colCount; i++) { + row[i] = readColumnValue(rs, i + 1, meta.getColumnType(i + 1)); + } + rows.add(row); + } + + return new CachedPage(labels, rows); + } + + /** + * Reads a single column value, eagerly materialising LOB data so that + * it remains valid after the connection is closed: + *
    + *
  • BLOB / LONGVARBINARY → {@code byte[]}
  • + *
  • VARBINARY / BINARY → {@code byte[]}
  • + *
  • CLOB / NCLOB / LONGVARCHAR / LONGNVARCHAR → {@code String}
  • + *
  • All other types → returned as-is via {@code ResultSet.getObject()}
  • + *
+ */ + private static Object readColumnValue(ResultSet rs, int col, int sqlType) throws SQLException { + switch (sqlType) { + case Types.BLOB: + case Types.LONGVARBINARY: { + java.sql.Blob blob = rs.getBlob(col); + if (blob == null) { + return null; + } + try (java.io.InputStream stream = blob.getBinaryStream()) { + return stream.readAllBytes(); + } catch (java.io.IOException e) { + throw new SQLException("Failed to read BLOB data", e); + } + } + case Types.VARBINARY: + case Types.BINARY: + return rs.getBytes(col); + case Types.CLOB: + case Types.LONGVARCHAR: { + try (java.io.Reader reader = rs.getCharacterStream(col)) { + if (reader == null) { + return null; + } + java.io.StringWriter sw = new java.io.StringWriter(); + reader.transferTo(sw); + return sw.toString(); + } catch (java.io.IOException e) { + throw new SQLException("Failed to read CLOB data", e); + } + } + case Types.NCLOB: + case Types.LONGNVARCHAR: { + try (java.io.Reader reader = rs.getNCharacterStream(col)) { + if (reader == null) { + return null; + } + java.io.StringWriter sw = new java.io.StringWriter(); + reader.transferTo(sw); + return sw.toString(); + } catch (java.io.IOException e) { + throw new SQLException("Failed to read NCLOB data", e); + } + } + case Types.DATE: + return rs.getDate(col); + case Types.TIMESTAMP: + return rs.getTimestamp(col); + default: + return rs.getObject(col); + } + } + + /** + * Sets non-LOB parameters on a PreparedStatement using the parameter list. + * Only handles basic JDBC types (INT, LONG, STRING, DOUBLE, etc.). + * LOB parameters are rejected before this method is called. + */ + private static void setNonLobParameters(PreparedStatement ps, + List params) throws SQLException { + for (Parameter param : params) { + int idx = param.getIndex(); + if (param.getValues().isEmpty()) { + ps.setNull(idx, java.sql.Types.NULL); + continue; + } + Object value = param.getValues().get(0); + ParameterType type = param.getType(); + + switch (type) { + case INT -> ps.setInt(idx, (int) value); + case SHORT -> ps.setShort(idx, ((Number) value).shortValue()); + case LONG -> ps.setLong(idx, (long) value); + case DOUBLE -> ps.setDouble(idx, (double) value); + case FLOAT -> ps.setFloat(idx, (float) value); + case BOOLEAN -> ps.setBoolean(idx, (boolean) value); + case STRING -> ps.setString(idx, (String) value); + case BIG_DECIMAL -> ps.setBigDecimal(idx, (BigDecimal) value); + case DATE -> ps.setDate(idx, (Date) value); + case TIME -> ps.setTime(idx, (Time) value); + case TIMESTAMP -> ps.setTimestamp(idx, (Timestamp) value); + case BYTES -> ps.setBytes(idx, (byte[]) value); + case NULL -> ps.setNull(idx, (int) value); + default -> ps.setObject(idx, value); + } + } + } + + /** Returns true for parameter types that reference session-scoped LOB objects. */ + private static boolean isLobParameter(Parameter param) { + ParameterType type = param.getType(); + return type == ParameterType.BLOB || type == ParameterType.CLOB; + } + + /** + * Normalises a SQL string for use as a cache key: + * strips leading/trailing whitespace and folds to lower-case so that + * minor formatting differences do not result in cache misses. + * The datasource ID is separated from the SQL by the ASCII SOH character + * ({@code \u0001}), which cannot appear in a SQL string or a connection hash, + * guaranteeing no key collisions regardless of the datasource ID content. + */ + private static String normalizeKey(String datasourceId, String sql) { + String normalizedSql = sql.trim().toLowerCase(java.util.Locale.ROOT); + return (datasourceId == null ? "" : datasourceId) + '\u0001' + normalizedSql; + } + + /** Returns a safe short preview of an SQL string for log messages. */ + private static String abbreviate(String sql) { + return abbreviate(sql, 80); + } + + /** Returns a safe short preview of an SQL string, truncated to {@code maxLen} characters. */ + private static String abbreviate(String sql, int maxLen) { + if (sql == null) { + return ""; + } + // Remove newlines/tabs for single-line thread names + String singleLine = NEWLINE_PATTERN.matcher(sql).replaceAll(" ").trim(); + return singleLine.length() <= maxLen ? singleLine : singleLine.substring(0, maxLen - 3) + "..."; + } + + /** + * Removes cache entries that are either expired or whose future has completed + * exceptionally. Called before inserting a new entry to bound cache size. + */ + private void evictExpiredOrCompleted() { + cache.entrySet().removeIf(entry -> { + CompletableFuture f = entry.getValue(); + if (!f.isDone()) { + return false; // still in progress – keep it + } + if (f.isCompletedExceptionally()) { + return true; // failed – evict + } + CachedPage page = f.getNow(null); + return page == null || page.isExpired(ttlMs); + }); + } +} diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PageInfo.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PageInfo.java new file mode 100644 index 000000000..65680a384 --- /dev/null +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PageInfo.java @@ -0,0 +1,49 @@ +package org.openjproxy.grpc.server.paging; + +/** + * Holds pagination information extracted from a SQL query. + * Supports multi-dialect pagination (LIMIT/OFFSET, FETCH NEXT, ROWNUM, etc.) + */ +public class PageInfo { + + private final long currentOffset; + private final long pageSize; + + public PageInfo(long currentOffset, long pageSize) { + this.currentOffset = currentOffset; + this.pageSize = pageSize; + } + + /** + * Returns the OFFSET value for the current page. + */ + public long getCurrentOffset() { + return currentOffset; + } + + /** + * Returns the number of rows per page (LIMIT / FETCH size). + */ + public long getPageSize() { + return pageSize; + } + + /** + * Returns the OFFSET value for the next page. + */ + public long getNextPageOffset() { + return currentOffset + pageSize; + } + + /** + * Returns true if this is the first page (offset == 0). + */ + public boolean isFirstPage() { + return currentOffset == 0; + } + + @Override + public String toString() { + return "PageInfo{currentOffset=" + currentOffset + ", pageSize=" + pageSize + "}"; + } +} diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PaginationDetector.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PaginationDetector.java new file mode 100644 index 000000000..4ccf593bf --- /dev/null +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PaginationDetector.java @@ -0,0 +1,207 @@ +package org.openjproxy.grpc.server.paging; + +import java.util.Optional; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Detects pagination syntax in SQL queries across multiple database dialects and + * provides utilities to generate the next-page SQL. + * + *

Supported dialects:

+ *
    + *
  • PostgreSQL / MySQL / SQLite: {@code LIMIT n OFFSET m} or {@code LIMIT n}
  • + *
  • MySQL shorthand: {@code LIMIT m, n} (OFFSET m, page-size n)
  • + *
  • SQL Server / Oracle 12c+ / DB2: + * {@code OFFSET m ROWS FETCH NEXT n ROWS ONLY}
  • + *
  • DB2 / Oracle first-page: {@code FETCH FIRST n ROWS ONLY}
  • + *
+ */ +public class PaginationDetector { + + // ----------------------------------------------------------------- + // Compiled patterns (immutable, thread-safe) + // ----------------------------------------------------------------- + + /** + * Pattern 1 – LIMIT n OFFSET m (PostgreSQL, MySQL ≥5.7, SQLite) + * Groups: (1)=limit, (2)=offset + */ + private static final Pattern LIMIT_OFFSET = Pattern.compile( + "(?i)\\bLIMIT\\s+(\\d+)\\s+OFFSET\\s+(\\d+)\\b" + ); + + /** + * Pattern 2 – OFFSET m ROWS? FETCH NEXT|FIRST n ROWS? ONLY (SQL Server, Oracle 12c+, DB2) + * Groups: (1)=offset, (2)=fetch-size + */ + private static final Pattern OFFSET_FETCH = Pattern.compile( + "(?i)\\bOFFSET\\s+(\\d+)\\s+ROWS?\\s+FETCH\\s+(?:NEXT|FIRST)\\s+(\\d+)\\s+ROWS?\\s+ONLY\\b" + ); + + /** + * Pattern 3 – LIMIT m, n (MySQL shorthand: first arg = OFFSET, second arg = page-size) + * Groups: (1)=offset, (2)=limit + */ + private static final Pattern LIMIT_COMMA = Pattern.compile( + "(?i)\\bLIMIT\\s+(\\d+)\\s*,\\s*(\\d+)\\b" + ); + + /** + * Pattern 4 – FETCH NEXT|FIRST n ROWS? ONLY, without preceding OFFSET (first page) + * Groups: (1)=fetch-size + */ + private static final Pattern FETCH_ONLY = Pattern.compile( + "(?i)\\bFETCH\\s+(?:NEXT|FIRST)\\s+(\\d+)\\s+ROWS?\\s+ONLY\\b" + ); + + /** + * Pattern 5 – standalone LIMIT n with no OFFSET anywhere (first page) + * Groups: (1)=limit + */ + private static final Pattern LIMIT_ONLY = Pattern.compile( + "(?i)\\bLIMIT\\s+(\\d+)\\b" + ); + + /** Used to detect any OFFSET keyword in the query (guards Pattern 5 usage). */ + private static final Pattern HAS_OFFSET = Pattern.compile( + "(?i)\\bOFFSET\\b" + ); + + // Private constructor – static utility class + private PaginationDetector() { + } + + // ----------------------------------------------------------------- + // Public API + // ----------------------------------------------------------------- + + /** + * Detects whether {@code sql} contains a pagination clause and returns the + * corresponding {@link PageInfo}. Returns an empty Optional when no + * pagination is detected or when the SQL is null/blank. + * + *

Patterns are evaluated in priority order; the first match wins.

+ * + * @param sql the SQL string to inspect + * @return an Optional containing page information, or empty if not paginated + */ + public static Optional detect(String sql) { + if (sql == null || sql.isBlank()) { + return Optional.empty(); + } + + // Pattern 1: LIMIT n OFFSET m + Matcher m1 = LIMIT_OFFSET.matcher(sql); + if (m1.find()) { + long limit = Long.parseLong(m1.group(1)); + long offset = Long.parseLong(m1.group(2)); + return Optional.of(new PageInfo(offset, limit)); + } + + // Pattern 2: OFFSET m ROWS FETCH NEXT/FIRST n ROWS ONLY + Matcher m2 = OFFSET_FETCH.matcher(sql); + if (m2.find()) { + long offset = Long.parseLong(m2.group(1)); + long fetchSize = Long.parseLong(m2.group(2)); + return Optional.of(new PageInfo(offset, fetchSize)); + } + + // Pattern 3: LIMIT m, n (MySQL shorthand) + Matcher m3 = LIMIT_COMMA.matcher(sql); + if (m3.find()) { + long offset = Long.parseLong(m3.group(1)); + long limit = Long.parseLong(m3.group(2)); + return Optional.of(new PageInfo(offset, limit)); + } + + // Patterns 4 and 5 only apply when the query has no OFFSET clause at all. + // Evaluate once and reuse the result. + boolean noOffset = !HAS_OFFSET.matcher(sql).find(); + + // Pattern 4: FETCH FIRST/NEXT n ROWS ONLY (first page, offset = 0) + if (noOffset) { + Matcher m4 = FETCH_ONLY.matcher(sql); + if (m4.find()) { + long fetchSize = Long.parseLong(m4.group(1)); + return Optional.of(new PageInfo(0, fetchSize)); + } + } + + // Pattern 5: standalone LIMIT n (first page, offset = 0) + if (noOffset) { + Matcher m5 = LIMIT_ONLY.matcher(sql); + if (m5.find()) { + long limit = Long.parseLong(m5.group(1)); + return Optional.of(new PageInfo(0, limit)); + } + } + + return Optional.empty(); + } + + /** + * Builds the SQL for the next page by incrementing the OFFSET + * (or inserting one when absent) in the given SQL string. + * + *

The method applies the same pattern-priority order as {@link #detect}. + * Returns {@code null} when the next-page SQL cannot be determined.

+ * + * @param sql the original paginated SQL + * @param pageInfo the page information returned by {@link #detect} + * @return the next-page SQL, or {@code null} if transformation is not possible + */ + public static String buildNextPageSql(String sql, PageInfo pageInfo) { + if (sql == null || pageInfo == null) { + return null; + } + + long nextOffset = pageInfo.getNextPageOffset(); + + // Pattern 1: replace OFFSET value in LIMIT n OFFSET m + Matcher m1 = LIMIT_OFFSET.matcher(sql); + if (m1.find()) { + // group(2) is the offset value; replace only that token + return sql.substring(0, m1.start(2)) + nextOffset + sql.substring(m1.end(2)); + } + + // Pattern 2: replace OFFSET value in OFFSET m ROWS FETCH ... ONLY + Matcher m2 = OFFSET_FETCH.matcher(sql); + if (m2.find()) { + // group(1) is the offset value + return sql.substring(0, m2.start(1)) + nextOffset + sql.substring(m2.end(1)); + } + + // Pattern 3: replace offset in LIMIT m, n + Matcher m3 = LIMIT_COMMA.matcher(sql); + if (m3.find()) { + // group(1) is the offset value (first number in LIMIT m, n) + return sql.substring(0, m3.start(1)) + nextOffset + sql.substring(m3.end(1)); + } + + // Patterns 4 and 5 only apply when the query has no OFFSET clause at all. + // Evaluate once and reuse the result. + boolean noOffset = !HAS_OFFSET.matcher(sql).find(); + + // Pattern 4: FETCH FIRST/NEXT n ROWS ONLY without OFFSET → insert OFFSET before FETCH + if (noOffset) { + Matcher m4 = FETCH_ONLY.matcher(sql); + if (m4.find()) { + int fetchStart = m4.start(); + return sql.substring(0, fetchStart) + + "OFFSET " + nextOffset + " ROWS " + + sql.substring(fetchStart); + } + } + + // Pattern 5: standalone LIMIT n → append OFFSET n + if (noOffset) { + Matcher m5 = LIMIT_ONLY.matcher(sql); + if (m5.find()) { + return sql + " OFFSET " + nextOffset; + } + } + + return null; + } +} diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/utils/ConnectionHashGenerator.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/utils/ConnectionHashGenerator.java index b8a96f366..6e58913e5 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/utils/ConnectionHashGenerator.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/utils/ConnectionHashGenerator.java @@ -44,20 +44,20 @@ public static String hashConnectionDetails(ConnectionDetails connectionDetails) /** * Extracts the dataSource name from connection details properties. - * Returns "default" if no dataSource name is specified. + * Returns {@code "default"} if no dataSource name is specified. + * + *

The dataSource name corresponds to the {@code ojp.datasource.name} property + * set in the client connection properties.

+ * + * @param connectionDetails the connection details whose properties to inspect + * @return the datasource name, or {@code "default"} when none is set */ - private static String extractDataSourceName(ConnectionDetails connectionDetails) { + public static String extractDataSourceName(ConnectionDetails connectionDetails) { if (connectionDetails.getPropertiesList().isEmpty()) { return "default"; } - - try { - Map properties = ProtoConverter.propertiesFromProto(connectionDetails.getPropertiesList()); - Object dataSourceName = properties.get("ojp.datasource.name"); - return dataSourceName != null ? dataSourceName.toString() : "default"; - } catch (Exception e) { - // If we can't deserialize properties, fall back to default - return "default"; - } + Map properties = ProtoConverter.propertiesFromProto(connectionDetails.getPropertiesList()); + Object dataSourceName = properties.get("ojp.datasource.name"); + return dataSourceName != null ? dataSourceName.toString() : "default"; } } \ No newline at end of file diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java new file mode 100644 index 000000000..31d56afa1 --- /dev/null +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java @@ -0,0 +1,246 @@ +package org.openjproxy.grpc.server; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.NullSource; +import org.junit.jupiter.params.provider.ValueSource; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Tests for next-page prefetch cache configuration properties in {@link ServerConfiguration}. + */ +class NextPageCacheConfigurationTest { + + private static final String ENABLED_KEY = "ojp.server.nextPageCache.enabled"; + private static final String TTL_KEY = "ojp.server.nextPageCache.ttlSeconds"; + private static final String MAX_ENTRIES_KEY = "ojp.server.nextPageCache.maxEntries"; + private static final String WAIT_TIMEOUT_MS_KEY = "ojp.server.nextPageCache.prefetchWaitTimeoutMs"; + private static final String CLEANUP_INTERVAL_KEY = "ojp.server.nextPageCache.cleanupIntervalSeconds"; + + @BeforeEach + void clearProperties() { + System.clearProperty(ENABLED_KEY); + System.clearProperty(TTL_KEY); + System.clearProperty(MAX_ENTRIES_KEY); + System.clearProperty(WAIT_TIMEOUT_MS_KEY); + System.clearProperty(CLEANUP_INTERVAL_KEY); + } + + @AfterEach + void cleanupProperties() { + clearProperties(); + } + + // ---------------------------------------------------------------- + // Defaults + // ---------------------------------------------------------------- + + @Test + void defaultConfiguration_nextPageCacheIsDisabled() { + ServerConfiguration config = new ServerConfiguration(); + + assertFalse(config.isNextPageCacheEnabled(), + "Next-page cache must be disabled by default"); + } + + @Test + void defaultConfiguration_hasExpectedDefaultValues() { + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_TTL_SECONDS, + config.getNextPageCacheTtlSeconds(), "Default TTL mismatch"); + assertEquals(ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_MAX_ENTRIES, + config.getNextPageCacheMaxEntries(), "Default max-entries mismatch"); + assertEquals(ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS, + config.getNextPageCachePrefetchWaitTimeoutMs(), "Default prefetch-wait-timeout mismatch"); + assertEquals(ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_CLEANUP_INTERVAL_SECONDS, + config.getNextPageCacheCleanupIntervalSeconds(), "Default cleanup-interval mismatch"); + } + + // ---------------------------------------------------------------- + // Enable via system property + // ---------------------------------------------------------------- + + @Test + void systemProperty_enabled_overridesDefault() { + System.setProperty(ENABLED_KEY, "true"); + + ServerConfiguration config = new ServerConfiguration(); + + assertTrue(config.isNextPageCacheEnabled()); + } + + @Test + void systemProperty_disabled_overridesDefault() { + System.setProperty(ENABLED_KEY, "false"); + + ServerConfiguration config = new ServerConfiguration(); + + assertFalse(config.isNextPageCacheEnabled()); + } + + // ---------------------------------------------------------------- + // Custom TTL + // ---------------------------------------------------------------- + + @Test + void systemProperty_ttlSeconds_isRespected() { + System.setProperty(TTL_KEY, "120"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(120L, config.getNextPageCacheTtlSeconds()); + } + + @Test + void systemProperty_invalidTtl_fallsBackToDefault() { + System.setProperty(TTL_KEY, "not-a-number"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_TTL_SECONDS, + config.getNextPageCacheTtlSeconds()); + } + + // ---------------------------------------------------------------- + // Custom max entries + // ---------------------------------------------------------------- + + @Test + void systemProperty_maxEntries_isRespected() { + System.setProperty(MAX_ENTRIES_KEY, "250"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(250, config.getNextPageCacheMaxEntries()); + } + + @Test + void systemProperty_invalidMaxEntries_fallsBackToDefault() { + System.setProperty(MAX_ENTRIES_KEY, "invalid"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_MAX_ENTRIES, + config.getNextPageCacheMaxEntries()); + } + + // ---------------------------------------------------------------- + // Custom prefetch wait timeout + // ---------------------------------------------------------------- + + @Test + void systemProperty_prefetchWaitTimeoutMs_isRespected() { + System.setProperty(WAIT_TIMEOUT_MS_KEY, "10000"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(10000L, config.getNextPageCachePrefetchWaitTimeoutMs()); + } + + @Test + void systemProperty_invalidPrefetchWaitTimeout_fallsBackToDefault() { + System.setProperty(WAIT_TIMEOUT_MS_KEY, "bad-value"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS, + config.getNextPageCachePrefetchWaitTimeoutMs()); + } + + // ---------------------------------------------------------------- + // Custom cleanup interval + // ---------------------------------------------------------------- + + @Test + void systemProperty_cleanupIntervalSeconds_isRespected() { + System.setProperty(CLEANUP_INTERVAL_KEY, "30"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(30L, config.getNextPageCacheCleanupIntervalSeconds()); + } + + @Test + void systemProperty_invalidCleanupInterval_fallsBackToDefault() { + System.setProperty(CLEANUP_INTERVAL_KEY, "not-a-number"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_CLEANUP_INTERVAL_SECONDS, + config.getNextPageCacheCleanupIntervalSeconds()); + } + + @Test + void defaultCleanupInterval_is60Seconds() { + assertEquals(60L, ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_CLEANUP_INTERVAL_SECONDS); + } + + @Test + void defaultTtlSeconds_is60Seconds() { + assertEquals(60L, ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_TTL_SECONDS); + } + + // ---------------------------------------------------------------- + // Per-datasource prefetch wait timeout + // ---------------------------------------------------------------- + + @Test + void perDatasource_prefetchWaitTimeoutMs_isRespected() { + System.setProperty("ojp.server.nextPageCache.datasource.my-db.prefetchWaitTimeoutMs", "1500"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(1500L, config.getNextPageCachePrefetchWaitTimeoutMs("my-db")); + + System.clearProperty("ojp.server.nextPageCache.datasource.my-db.prefetchWaitTimeoutMs"); + } + + @ParameterizedTest + @NullSource + @ValueSource(strings = {"unknown-ds", "default"}) + void perDatasource_prefetchWaitTimeoutMs_fallsBackToGlobalDefault_whenNoPerDatasourcePropertySet(String datasourceName) { + System.setProperty(WAIT_TIMEOUT_MS_KEY, "5000"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(5000L, config.getNextPageCachePrefetchWaitTimeoutMs(datasourceName)); + + System.clearProperty(WAIT_TIMEOUT_MS_KEY); + } + + @Test + void perDatasource_invalidPrefetchWaitTimeout_fallsBackToGlobalDefault() { + System.setProperty("ojp.server.nextPageCache.datasource.bad-ds.prefetchWaitTimeoutMs", "not-a-number"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS, + config.getNextPageCachePrefetchWaitTimeoutMs("bad-ds")); + + System.clearProperty("ojp.server.nextPageCache.datasource.bad-ds.prefetchWaitTimeoutMs"); + } + + @Test + void perDatasource_multipleOverrides_areIndependent() { + System.setProperty("ojp.server.nextPageCache.datasource.ds-a.prefetchWaitTimeoutMs", "1000"); + System.setProperty("ojp.server.nextPageCache.datasource.ds-b.prefetchWaitTimeoutMs", "2000"); + System.setProperty(WAIT_TIMEOUT_MS_KEY, "9000"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(1000L, config.getNextPageCachePrefetchWaitTimeoutMs("ds-a")); + assertEquals(2000L, config.getNextPageCachePrefetchWaitTimeoutMs("ds-b")); + assertEquals(9000L, config.getNextPageCachePrefetchWaitTimeoutMs("ds-c")); // falls back to global + + System.clearProperty("ojp.server.nextPageCache.datasource.ds-a.prefetchWaitTimeoutMs"); + System.clearProperty("ojp.server.nextPageCache.datasource.ds-b.prefetchWaitTimeoutMs"); + System.clearProperty(WAIT_TIMEOUT_MS_KEY); + } + +} diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java new file mode 100644 index 000000000..f98f6852b --- /dev/null +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java @@ -0,0 +1,619 @@ +package org.openjproxy.grpc.server.paging; + +import org.junit.jupiter.api.Test; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.Statement; +import java.sql.Types; +import java.util.List; +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Unit tests for {@link NextPagePrefetchCache}. + */ +class NextPagePrefetchCacheTest { + + // ---------------------------------------------------------------- + // Helpers + // ---------------------------------------------------------------- + + private static NextPagePrefetchCache enabledCache() { + return new NextPagePrefetchCache(true, 100, 60, 5000, 0); + } + + private static NextPagePrefetchCache disabledCache() { + return new NextPagePrefetchCache(false, 100, 60, 5000, 0); + } + + /** + * Creates a mock DataSource that returns a ResultSet with one row + * containing a single integer column named "id" with value 42. + */ + private static DataSource mockDataSource(int rowCount) throws Exception { + ResultSetMetaData meta = mock(ResultSetMetaData.class); + when(meta.getColumnCount()).thenReturn(1); + when(meta.getColumnName(1)).thenReturn("id"); + when(meta.getColumnType(1)).thenReturn(Types.INTEGER); + + ResultSet rs = mock(ResultSet.class); + when(rs.getMetaData()).thenReturn(meta); + + // Simulate 'rowCount' rows + if (rowCount == 0) { + when(rs.next()).thenReturn(false); + } else { + Boolean[] nexts = new Boolean[rowCount + 1]; + for (int i = 0; i < rowCount; i++) nexts[i] = true; + nexts[rowCount] = false; + Boolean first = nexts[0]; + Boolean[] rest = new Boolean[rowCount]; + System.arraycopy(nexts, 1, rest, 0, rowCount); + when(rs.next()).thenReturn(first, rest); + } + when(rs.getObject(1)).thenReturn(42); + + Statement stmt = mock(Statement.class); + when(stmt.executeQuery(anyString())).thenReturn(rs); + + Connection conn = mock(Connection.class); + when(conn.createStatement()).thenReturn(stmt); + + DataSource ds = mock(DataSource.class); + when(ds.getConnection()).thenReturn(conn); + + return ds; + } + + // ---------------------------------------------------------------- + // isEnabled() + // ---------------------------------------------------------------- + + @Test + void isEnabled_returnsTrueWhenConstructedEnabled() { + assertTrue(enabledCache().isEnabled()); + } + + @Test + void isEnabled_returnsFalseWhenConstructedDisabled() { + assertFalse(disabledCache().isEnabled()); + } + + // ---------------------------------------------------------------- + // getIfReady() – no entry + // ---------------------------------------------------------------- + + @Test + void getIfReady_returnsEmpty_whenNothingCached() { + NextPagePrefetchCache cache = enabledCache(); + Optional result = cache.getIfReady("ds1", "SELECT * FROM t LIMIT 10 OFFSET 10"); + assertFalse(result.isPresent(), "Expected empty when nothing is cached"); + } + + // ---------------------------------------------------------------- + // prefetchAsync() – disabled cache + // ---------------------------------------------------------------- + + @Test + void prefetchAsync_doesNothing_whenDisabled() throws Exception { + NextPagePrefetchCache cache = disabledCache(); + DataSource ds = mockDataSource(1); + + cache.prefetchAsync(ds, "ds1", "SELECT * FROM t LIMIT 10 OFFSET 10", List.of()); + + // Cache should still be empty + assertFalse(cache.getIfReady("ds1", "SELECT * FROM t LIMIT 10 OFFSET 10").isPresent()); + } + + @Test + void prefetchAsync_doesNothing_whenDataSourceIsNull() { + NextPagePrefetchCache cache = enabledCache(); + cache.prefetchAsync(null, "ds1", "SELECT * FROM t LIMIT 10 OFFSET 10", List.of()); + assertFalse(cache.getIfReady("ds1", "SELECT * FROM t LIMIT 10 OFFSET 10").isPresent()); + } + + @Test + void prefetchAsync_doesNothing_whenSqlIsNull() throws Exception { + NextPagePrefetchCache cache = enabledCache(); + DataSource ds = mockDataSource(1); + cache.prefetchAsync(ds, "ds1", null, List.of()); + assertEquals(0, cache.cacheSize(), "Cache should remain empty when SQL is null"); + } + + // ---------------------------------------------------------------- + // prefetchAsync() + getIfReady() – happy path + // ---------------------------------------------------------------- + + @Test + void prefetchAndGet_returnsRows_forSimpleQuery() throws Exception { + NextPagePrefetchCache cache = enabledCache(); + DataSource ds = mockDataSource(3); + + String sql = "SELECT id FROM t LIMIT 10 OFFSET 10"; + cache.prefetchAsync(ds, "ds1", sql, List.of()); + + // Wait for the prefetch (virtual thread) to complete + Optional result = cache.getIfReady("ds1", sql); + + assertTrue(result.isPresent(), "Expected cached page"); + CachedPage page = result.get(); + assertEquals(List.of("id"), page.getColumnLabels()); + assertEquals(3, page.getRows().size()); + } + + @Test + void prefetchAndGet_cacheKeyIsCaseAndWhitespaceInsensitive() throws Exception { + NextPagePrefetchCache cache = enabledCache(); + DataSource ds = mockDataSource(1); + + // Prefetch with one form of the SQL + cache.prefetchAsync(ds, "ds1", "SELECT id FROM t LIMIT 10 OFFSET 10", List.of()); + + // Retrieve with slightly different casing/whitespace (should normalise to same key) + Optional result = cache.getIfReady("ds1", " SELECT ID FROM T LIMIT 10 OFFSET 10 "); + + assertTrue(result.isPresent(), "Keys should normalise to the same entry"); + } + + // ---------------------------------------------------------------- + // Single-use semantics + // ---------------------------------------------------------------- + + @Test + void getIfReady_returnsSingleUse_secondCallEmpty() throws Exception { + NextPagePrefetchCache cache = enabledCache(); + DataSource ds = mockDataSource(2); + + String sql = "SELECT id FROM t LIMIT 10 OFFSET 10"; + cache.prefetchAsync(ds, "ds1", sql, List.of()); + + Optional first = cache.getIfReady("ds1", sql); + assertTrue(first.isPresent(), "First retrieval should succeed"); + + // Second retrieval should return empty (entry was removed after first use) + Optional second = cache.getIfReady("ds1", sql); + assertFalse(second.isPresent(), "Second retrieval should be empty (single-use)"); + } + + // ---------------------------------------------------------------- + // Expiry + // ---------------------------------------------------------------- + + @Test + void getIfReady_returnsEmpty_whenEntryExpired() throws Exception { + // TTL = 0 seconds → immediately expired + NextPagePrefetchCache cache = new NextPagePrefetchCache(true, 100, 0, 5000, 0); + DataSource ds = mockDataSource(1); + + String sql = "SELECT id FROM t LIMIT 10 OFFSET 10"; + cache.prefetchAsync(ds, "ds1", sql, List.of()); + + // Wait a bit to ensure the prefetch completes and the entry is expired + Thread.sleep(50); //NOSONAR + + Optional result = cache.getIfReady("ds1", sql); + assertFalse(result.isPresent(), "Entry should be expired with TTL=0"); + } + + // ---------------------------------------------------------------- + // No-duplicate prefetch + // ---------------------------------------------------------------- + + @Test + void prefetchAsync_doesNotStartDuplicate_whenKeyAlreadyPresent() throws Exception { + NextPagePrefetchCache cache = enabledCache(); + DataSource ds = mockDataSource(1); + + String sql = "SELECT id FROM t LIMIT 10 OFFSET 10"; + cache.prefetchAsync(ds, "ds1", sql, List.of()); // first start + cache.prefetchAsync(ds, "ds1", sql, List.of()); // duplicate – should be ignored + + // Retrieve to confirm the entry exists (one execution) + Optional result = cache.getIfReady("ds1", sql); + assertTrue(result.isPresent()); + } + + // ---------------------------------------------------------------- + // CachedPage + // ---------------------------------------------------------------- + + @Test + void cachedPage_isNotExpired_whenJustCreated() { + CachedPage page = new CachedPage(List.of("col"), List.of()); + assertFalse(page.isExpired(60_000), "Freshly created page should not be expired"); + } + + @Test + void cachedPage_isExpired_withZeroTtl() throws Exception { + CachedPage page = new CachedPage(List.of("col"), List.of()); + Thread.sleep(10); //NOSONAR - small delay so currentTime > createdAt + assertTrue(page.isExpired(0), "Page should be expired with TTL=0"); + } + + // ---------------------------------------------------------------- + // CLOB / NCLOB return columns + // ---------------------------------------------------------------- + + /** + * Creates a mock DataSource whose ResultSet returns one row with one CLOB column. + * The CLOB content is materialised as the {@code String} returned by + * {@code getCharacterStream()}. + */ + private static DataSource mockDataSourceWithClob(String clobContent) throws Exception { + java.io.Reader reader = new java.io.StringReader(clobContent); + + ResultSetMetaData meta = mock(ResultSetMetaData.class); + when(meta.getColumnCount()).thenReturn(1); + when(meta.getColumnName(1)).thenReturn("description"); + when(meta.getColumnType(1)).thenReturn(Types.CLOB); + + ResultSet rs = mock(ResultSet.class); + when(rs.getMetaData()).thenReturn(meta); + when(rs.next()).thenReturn(true, false); + when(rs.getCharacterStream(1)).thenReturn(reader); + + Statement stmt = mock(Statement.class); + when(stmt.executeQuery(anyString())).thenReturn(rs); + + Connection conn = mock(Connection.class); + when(conn.createStatement()).thenReturn(stmt); + + DataSource ds = mock(DataSource.class); + when(ds.getConnection()).thenReturn(conn); + return ds; + } + + /** + * Creates a mock DataSource whose ResultSet returns one row with one NCLOB column. + */ + private static DataSource mockDataSourceWithNClob(String nclobContent) throws Exception { + java.io.Reader reader = new java.io.StringReader(nclobContent); + + ResultSetMetaData meta = mock(ResultSetMetaData.class); + when(meta.getColumnCount()).thenReturn(1); + when(meta.getColumnName(1)).thenReturn("content"); + when(meta.getColumnType(1)).thenReturn(Types.NCLOB); + + ResultSet rs = mock(ResultSet.class); + when(rs.getMetaData()).thenReturn(meta); + when(rs.next()).thenReturn(true, false); + when(rs.getNCharacterStream(1)).thenReturn(reader); + + Statement stmt = mock(Statement.class); + when(stmt.executeQuery(anyString())).thenReturn(rs); + + Connection conn = mock(Connection.class); + when(conn.createStatement()).thenReturn(stmt); + + DataSource ds = mock(DataSource.class); + when(ds.getConnection()).thenReturn(conn); + return ds; + } + + @Test + void prefetchAndGet_cachesClobColumns_asString() throws Exception { + NextPagePrefetchCache cache = enabledCache(); + String clobContent = "This is a large text value stored as CLOB"; + DataSource ds = mockDataSourceWithClob(clobContent); + + String sql = "SELECT description FROM articles LIMIT 10 OFFSET 10"; + cache.prefetchAsync(ds, "ds1", sql, List.of()); + + Optional result = cache.getIfReady("ds1", sql); + + assertTrue(result.isPresent(), "CLOB column query should be cached"); + CachedPage page = result.get(); + assertEquals(1, page.getRows().size()); + assertEquals(clobContent, page.getRows().get(0)[0], + "CLOB content should be materialised as String"); + } + + @Test + void prefetchAndGet_cachesNclobColumns_asString() throws Exception { + NextPagePrefetchCache cache = enabledCache(); + String nclobContent = "Unicode text: こんにちは"; + DataSource ds = mockDataSourceWithNClob(nclobContent); + + String sql = "SELECT content FROM docs LIMIT 10 OFFSET 10"; + cache.prefetchAsync(ds, "ds1", sql, List.of()); + + Optional result = cache.getIfReady("ds1", sql); + + assertTrue(result.isPresent(), "NCLOB column query should be cached"); + CachedPage page = result.get(); + assertEquals(1, page.getRows().size()); + assertEquals(nclobContent, page.getRows().get(0)[0], + "NCLOB content should be materialised as String"); + } + + @Test + void prefetchAndGet_handlesNullClobValue() throws Exception { + ResultSetMetaData meta = mock(ResultSetMetaData.class); + when(meta.getColumnCount()).thenReturn(1); + when(meta.getColumnName(1)).thenReturn("description"); + when(meta.getColumnType(1)).thenReturn(Types.CLOB); + + ResultSet rs = mock(ResultSet.class); + when(rs.getMetaData()).thenReturn(meta); + when(rs.next()).thenReturn(true, false); + when(rs.getCharacterStream(1)).thenReturn(null); // NULL CLOB + + Statement stmt = mock(Statement.class); + when(stmt.executeQuery(anyString())).thenReturn(rs); + + Connection conn = mock(Connection.class); + when(conn.createStatement()).thenReturn(stmt); + + DataSource ds = mock(DataSource.class); + when(ds.getConnection()).thenReturn(conn); + + NextPagePrefetchCache cache = enabledCache(); + String sql = "SELECT description FROM t LIMIT 10 OFFSET 10"; + cache.prefetchAsync(ds, "ds1", sql, List.of()); + + Optional result = cache.getIfReady("ds1", sql); + + assertTrue(result.isPresent(), "Null CLOB should be cached as null value"); + assertFalse(result.get().getRows().isEmpty()); + assertNull(result.get().getRows().get(0)[0], "Null CLOB column should be null in cache"); + } + + // ---------------------------------------------------------------- + // Datasource isolation + // ---------------------------------------------------------------- + + @Test + void prefetchAndGet_isolatesByDatasourceId() throws Exception { + NextPagePrefetchCache cache = enabledCache(); + DataSource ds1 = mockDataSource(2); + DataSource ds2 = mockDataSource(5); + + String sql = "SELECT id FROM t LIMIT 10 OFFSET 10"; + + // Prefetch same SQL for two different datasources + cache.prefetchAsync(ds1, "conn-hash-A", sql, List.of()); + cache.prefetchAsync(ds2, "conn-hash-B", sql, List.of()); + + // Each datasource gets its own cache entry + Optional resultA = cache.getIfReady("conn-hash-A", sql); + Optional resultB = cache.getIfReady("conn-hash-B", sql); + + assertTrue(resultA.isPresent(), "Datasource A should have its own cache entry"); + assertTrue(resultB.isPresent(), "Datasource B should have its own cache entry"); + assertEquals(2, resultA.get().getRows().size(), "DS-A should have 2 rows"); + assertEquals(5, resultB.get().getRows().size(), "DS-B should have 5 rows"); + } + + @Test + void getIfReady_withDifferentDatasourceId_missesCache() throws Exception { + NextPagePrefetchCache cache = enabledCache(); + DataSource ds = mockDataSource(1); + + String sql = "SELECT id FROM t LIMIT 10 OFFSET 10"; + cache.prefetchAsync(ds, "conn-hash-A", sql, List.of()); + + // Asking for the same SQL under a different datasource ID should miss + Optional result = cache.getIfReady("conn-hash-B", sql); + assertFalse(result.isPresent(), + "Cache miss expected: different datasourceId should not match"); + } + + // ---------------------------------------------------------------- + // Background cleanup scheduler + // ---------------------------------------------------------------- + + @Test + void shutdown_doesNotThrow_whenSchedulerNotStarted() { + // cleanupIntervalSeconds=0 → no cleanup task registered + NextPagePrefetchCache cache = new NextPagePrefetchCache(true, 100, 60, 5000, 0); + cache.shutdown(); // must not throw + assertEquals(0, cache.cacheSize(), "Cache should remain empty after shutdown"); + } + + @Test + void shutdown_isIdempotent() { + NextPagePrefetchCache cache = new NextPagePrefetchCache(true, 100, 60, 5000, 30); + cache.shutdown(); + cache.shutdown(); // second call must not throw + assertEquals(0, cache.cacheSize(), "Cache should remain empty after double shutdown"); + } + + @Test + void backgroundCleanup_evictsExpiredEntries() throws Exception { + // TTL = 0 → all entries expire immediately + // cleanupInterval = 1 second → scheduler will run + NextPagePrefetchCache cache = new NextPagePrefetchCache(true, 100, 0, 5000, 1); + DataSource ds = mockDataSource(2); + + String sql = "SELECT id FROM t LIMIT 10 OFFSET 10"; + cache.prefetchAsync(ds, "ds1", sql, List.of()); + + // Wait (with polling) for the background cleanup to reduce the cache size to 0 + long deadline = System.currentTimeMillis() + 5_000; + while (cache.cacheSize() > 0 && System.currentTimeMillis() < deadline) { + Thread.sleep(50); //NOSONAR + } + + assertEquals(0, cache.cacheSize(), + "Background cleanup should have evicted the expired entry"); + + cache.shutdown(); + } + + // ---------------------------------------------------------------- + // Per-datasource prefetch wait timeout + // ---------------------------------------------------------------- + + @Test + void registerDatasourcePrefetchWaitTimeout_ignoresNullId() { + NextPagePrefetchCache cache = enabledCache(); + // Null datasourceId should be silently ignored (no NullPointerException) + cache.registerDatasourcePrefetchWaitTimeout(null, 1000); + assertEquals(0, cache.cacheSize(), "Cache should remain empty when datasourceId is null"); + } + + @Test + void getIfReady_usesPerDatasourceTimeout_whenRegistered() throws Exception { + // enabled, maxEntries=100, ttlSeconds=60, globalTimeoutMs=1, cleanupInterval=0 (disabled) + NextPagePrefetchCache cache = new NextPagePrefetchCache(true, 100, 60, 1, 0); // global: 1ms + cache.registerDatasourcePrefetchWaitTimeout("ds-custom", 5_000); // per-ds: 5 s + + DataSource ds = mockDataSource(3); + String sql = "SELECT id FROM t LIMIT 10 OFFSET 0"; + cache.prefetchAsync(ds, "ds-custom", sql, List.of()); + + Optional result = cache.getIfReady("ds-custom", sql); + + assertTrue(result.isPresent(), "Cache hit expected with per-datasource timeout"); + assertEquals(3, result.get().getRows().size()); + } + + @Test + void registerDatasourcePrefetchWaitTimeout_replacesExistingValue() throws Exception { + // enabled, maxEntries=100, ttlSeconds=60, globalTimeoutMs=9999, cleanupInterval=0 (disabled) + NextPagePrefetchCache cache = new NextPagePrefetchCache(true, 100, 60, 9_999, 0); + + cache.registerDatasourcePrefetchWaitTimeout("ds-x", 1_000); + cache.registerDatasourcePrefetchWaitTimeout("ds-x", 2_000); // replace + + // Exercise getIfReady to confirm the updated timeout is used without error + DataSource ds = mockDataSource(1); + String sql = "SELECT id FROM t LIMIT 5 OFFSET 0"; + cache.prefetchAsync(ds, "ds-x", sql, List.of()); + + Optional result = cache.getIfReady("ds-x", sql); + assertTrue(result.isPresent()); + } + + // ---------------------------------------------------------------- + // Per-datasource cache enabled flag + // ---------------------------------------------------------------- + + @Test + void registerDatasourceCacheEnabled_ignoresNullId() { + NextPagePrefetchCache cache = enabledCache(); + // Null datasourceId should be silently ignored (no NullPointerException) + cache.registerDatasourceCacheEnabled(null, false); + assertEquals(0, cache.cacheSize(), "Cache should remain empty when datasourceId is null"); + } + + @Test + void isEnabledForDatasource_returnsTrueByDefault_whenGloballyEnabled() { + NextPagePrefetchCache cache = enabledCache(); + assertTrue(cache.isEnabledForDatasource("any-ds"), + "Should return true when no per-datasource override is registered"); + } + + @Test + void isEnabledForDatasource_returnsFalseByDefault_whenGloballyDisabled() { + NextPagePrefetchCache cache = disabledCache(); + assertFalse(cache.isEnabledForDatasource("any-ds"), + "Should return false when cache is globally disabled"); + } + + @Test + void isEnabledForDatasource_respectsPerDatasourceOverride_disabled() { + NextPagePrefetchCache cache = enabledCache(); // globally enabled + cache.registerDatasourceCacheEnabled("disabled-ds", false); + + assertFalse(cache.isEnabledForDatasource("disabled-ds"), + "Per-datasource false should override the global true"); + assertTrue(cache.isEnabledForDatasource("other-ds"), + "Other datasources not overridden should still use the global setting"); + } + + @Test + void isEnabledForDatasource_respectsPerDatasourceOverride_enabled() { + NextPagePrefetchCache cache = disabledCache(); // globally disabled + cache.registerDatasourceCacheEnabled("special-ds", true); + + assertTrue(cache.isEnabledForDatasource("special-ds"), + "Per-datasource true should override the global false"); + assertFalse(cache.isEnabledForDatasource("other-ds"), + "Other datasources not overridden should still use the global setting"); + } + + @Test + void prefetchAsync_isSkipped_whenPerDatasourceDisabled() throws Exception { + NextPagePrefetchCache cache = enabledCache(); // globally enabled + cache.registerDatasourceCacheEnabled("disabled-ds", false); + + DataSource ds = mockDataSource(5); + String sql = "SELECT id FROM t LIMIT 10 OFFSET 0"; + cache.prefetchAsync(ds, "disabled-ds", sql, List.of()); + + assertEquals(0, cache.cacheSize(), "Prefetch should be skipped for disabled datasource"); + } + + @Test + void prefetchAsync_isAllowed_whenPerDatasourceEnabled_andGloballyDisabled() throws Exception { + NextPagePrefetchCache cache = disabledCache(); // globally disabled + cache.registerDatasourceCacheEnabled("special-ds", true); + + DataSource ds = mockDataSource(3); + String sql = "SELECT id FROM t LIMIT 10 OFFSET 0"; + cache.prefetchAsync(ds, "special-ds", sql, List.of()); + + Thread.sleep(50); //NOSONAR + Optional result = cache.getIfReady("special-ds", sql); + assertTrue(result.isPresent(), "Per-datasource enabled override should allow prefetch"); + assertEquals(3, result.get().getRows().size()); + } + + // ---------------------------------------------------------------- + // Client-side ojp.nextPageCache.enabled property (simulates connect() logic) + // ---------------------------------------------------------------- + + @Test + void clientProperty_disable_registersOverrideViaCache() { + // Simulate what StatementServiceImpl.connect() does when the client sends + // ojp.nextPageCache.enabled=false in its connection properties. + NextPagePrefetchCache cache = enabledCache(); // globally enabled + + // Client sends the property; connect() reads it and registers with the cache + String clientEnabledValue = "false"; + cache.registerDatasourceCacheEnabled("conn-abc", Boolean.parseBoolean(clientEnabledValue)); + + assertFalse(cache.isEnabledForDatasource("conn-abc"), + "Client property ojp.nextPageCache.enabled=false should disable cache for that connection"); + assertTrue(cache.isEnabledForDatasource("conn-xyz"), + "Other connections without an override should still use the global setting"); + } + + @Test + void clientProperty_enable_overridesGlobalDisable() { + // Client sends ojp.nextPageCache.enabled=true while the server global is false. + NextPagePrefetchCache cache = disabledCache(); // globally disabled + + cache.registerDatasourceCacheEnabled("conn-reporting", Boolean.parseBoolean("true")); + + assertTrue(cache.isEnabledForDatasource("conn-reporting"), + "Client property ojp.nextPageCache.enabled=true should enable cache even when globally disabled"); + assertFalse(cache.isEnabledForDatasource("conn-other"), + "Connections without an override should still reflect the global disabled setting"); + } + + @Test + void clientProperty_absent_fallsBackToGlobal() { + // When the client does NOT send ojp.nextPageCache.enabled, connect() does not call + // registerDatasourceCacheEnabled, so isEnabledForDatasource falls back to the global flag. + NextPagePrefetchCache cacheEnabled = enabledCache(); + NextPagePrefetchCache cacheDisabled = disabledCache(); + + // No registration performed (client property was absent) + assertTrue(cacheEnabled.isEnabledForDatasource("conn-no-prop"), + "Absent client property on globally-enabled cache should default to true"); + assertFalse(cacheDisabled.isEnabledForDatasource("conn-no-prop"), + "Absent client property on globally-disabled cache should default to false"); + } +} diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/PaginationDetectorTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/PaginationDetectorTest.java new file mode 100644 index 000000000..cf5ed0c1b --- /dev/null +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/PaginationDetectorTest.java @@ -0,0 +1,166 @@ +package org.openjproxy.grpc.server.paging; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; + +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Tests for {@link PaginationDetector}. + */ +class PaginationDetectorTest { + + // ---------------------------------------------------------------- + // detect() – positive cases + // ---------------------------------------------------------------- + + @Test + void detectLimitOffset_returnsCorrectPageInfo() { + String sql = "SELECT id, name FROM users ORDER BY id LIMIT 10 OFFSET 20"; + Optional result = PaginationDetector.detect(sql); + + assertTrue(result.isPresent(), "Expected pagination to be detected"); + assertEquals(20, result.get().getCurrentOffset()); + assertEquals(10, result.get().getPageSize()); + assertEquals(30, result.get().getNextPageOffset()); + } + + @Test + void detectLimitOffset_firstPage() { + String sql = "SELECT * FROM orders LIMIT 25 OFFSET 0"; + Optional result = PaginationDetector.detect(sql); + + assertTrue(result.isPresent()); + assertEquals(0, result.get().getCurrentOffset()); + assertEquals(25, result.get().getPageSize()); + assertTrue(result.get().isFirstPage()); + } + + @ParameterizedTest(name = "[{index}] {0}") + @CsvSource({ + // SQL Server / Oracle: OFFSET … ROWS FETCH NEXT … ROWS ONLY + "'SELECT id, name FROM users ORDER BY id OFFSET 30 ROWS FETCH NEXT 10 ROWS ONLY', 10, 30, 40", + // FETCH FIRST … ROWS ONLY with explicit OFFSET 0 + "'SELECT * FROM items OFFSET 0 ROWS FETCH FIRST 50 ROWS ONLY', 50, 0, 50", + // MySQL shorthand: LIMIT offset, pageSize + "'SELECT * FROM products LIMIT 20, 10', 10, 20, 30", + // FETCH FIRST … ROWS ONLY without any OFFSET (first page) + "'SELECT TOP_N.* FROM (SELECT * FROM t) TOP_N FETCH FIRST 10 ROWS ONLY', 10, 0, 10", + // FETCH NEXT … ROWS ONLY without any OFFSET (first page) + "'SELECT * FROM t FETCH NEXT 5 ROWS ONLY', 5, 0, 5", + // Standalone LIMIT without OFFSET (first page) + "'SELECT * FROM users WHERE active = 1 LIMIT 15', 15, 0, 15", + // Case-insensitive matching + "'select id from foo limit 5 offset 10', 5, 10, 15" + }) + void detect_recognisesPaginationPatterns(String sql, long pageSize, long currentOffset, long nextOffset) { + Optional result = PaginationDetector.detect(sql); + + assertTrue(result.isPresent(), "Expected pagination to be detected in: " + sql); + assertEquals(pageSize, result.get().getPageSize(), "Page size mismatch"); + assertEquals(currentOffset, result.get().getCurrentOffset(), "Current offset mismatch"); + assertEquals(nextOffset, result.get().getNextPageOffset(), "Next offset mismatch"); + } + + // ---------------------------------------------------------------- + // detect() – negative cases + // ---------------------------------------------------------------- + + @Test + void detect_returnsEmpty_forNonPaginatedQuery() { + String sql = "SELECT id, name FROM users WHERE id = 1"; + Optional result = PaginationDetector.detect(sql); + + assertFalse(result.isPresent(), "Expected no pagination"); + } + + @Test + void detect_returnsEmpty_forNullSql() { + assertFalse(PaginationDetector.detect(null).isPresent()); + } + + @Test + void detect_returnsEmpty_forBlankSql() { + assertFalse(PaginationDetector.detect(" ").isPresent()); + } + + @Test + void detect_limitOnly_notMatchedWhenOffsetPresent() { + // LIMIT n with an OFFSET keyword somewhere else – should not match Pattern 5 + String sql = "SELECT * FROM t WHERE col > 0 LIMIT 10 OFFSET 5"; + Optional result = PaginationDetector.detect(sql); + + // Pattern 1 (LIMIT n OFFSET m) should match instead + assertTrue(result.isPresent()); + assertEquals(5, result.get().getCurrentOffset()); + assertEquals(10, result.get().getPageSize()); + } + + // ---------------------------------------------------------------- + // buildNextPageSql() – parameterised round-trip + // ---------------------------------------------------------------- + + @ParameterizedTest(name = "[{index}] {0}") + @CsvSource({ + // LIMIT n OFFSET m – first page (offset 0 → 10) + "'SELECT id FROM users ORDER BY id LIMIT 10 OFFSET 0', 'SELECT id FROM users ORDER BY id LIMIT 10 OFFSET 10'", + // LIMIT n OFFSET m – second page (offset 10 → 20) + "'SELECT id FROM users ORDER BY id LIMIT 10 OFFSET 10', 'SELECT id FROM users ORDER BY id LIMIT 10 OFFSET 20'", + // OFFSET FETCH (SQL Server / Oracle) – first page (offset 0 → 20) + "'SELECT id FROM t ORDER BY id OFFSET 0 ROWS FETCH NEXT 20 ROWS ONLY', 'SELECT id FROM t ORDER BY id OFFSET 20 ROWS FETCH NEXT 20 ROWS ONLY'", + // OFFSET FETCH (SQL Server / Oracle) – second page (offset 20 → 40) + "'SELECT id FROM t ORDER BY id OFFSET 20 ROWS FETCH NEXT 20 ROWS ONLY', 'SELECT id FROM t ORDER BY id OFFSET 40 ROWS FETCH NEXT 20 ROWS ONLY'", + // MySQL LIMIT offset, pageSize – first page (offset 0 → 10) + "'SELECT * FROM products LIMIT 0, 10', 'SELECT * FROM products LIMIT 10, 10'", + // FETCH FIRST … ROWS ONLY without OFFSET – inserts OFFSET clause + "'SELECT * FROM t FETCH FIRST 10 ROWS ONLY', 'SELECT * FROM t OFFSET 10 ROWS FETCH FIRST 10 ROWS ONLY'", + // Standalone LIMIT without OFFSET – appends OFFSET clause + "'SELECT * FROM users LIMIT 5', 'SELECT * FROM users LIMIT 5 OFFSET 5'" + }) + void buildNextPageSql_producesCorrectNextPageQuery(String sql, String expected) { + PageInfo pageInfo = PaginationDetector.detect(sql).orElseThrow(); + assertEquals(expected, PaginationDetector.buildNextPageSql(sql, pageInfo)); + } + + // ---------------------------------------------------------------- + // buildNextPageSql() – edge cases + // ---------------------------------------------------------------- + + @Test + void buildNextPage_returnsNull_forNullSql() { + assertNull(PaginationDetector.buildNextPageSql(null, new PageInfo(0, 10))); + } + + @Test + void buildNextPage_returnsNull_forNullPageInfo() { + assertNull(PaginationDetector.buildNextPageSql("SELECT 1", null)); + } + + // ---------------------------------------------------------------- + // Parameterised – detect then build round-trip + // ---------------------------------------------------------------- + + @ParameterizedTest(name = "[{index}] {0}") + @CsvSource({ + "'SELECT a FROM t LIMIT 10 OFFSET 0', 10, 0, 10", + "'SELECT a FROM t LIMIT 10 OFFSET 10', 10, 10, 20", + // MySQL LIMIT m,n: first arg = offset, second arg = page-size + "'SELECT a FROM t LIMIT 5, 20', 20, 5, 25", + "'SELECT a FROM t OFFSET 0 ROWS FETCH NEXT 10 ROWS ONLY', 10, 0, 10", + "'SELECT a FROM t OFFSET 10 ROWS FETCH FIRST 10 ROWS ONLY', 10, 10, 20" + }) + void detectAndNextOffset(String sql, long pageSize, long currentOffset, long expectedNextOffset) { + Optional pageInfo = PaginationDetector.detect(sql); + + assertTrue(pageInfo.isPresent(), "Expected pagination in: " + sql); + assertEquals(pageSize, pageInfo.get().getPageSize(), "Page size mismatch"); + assertEquals(currentOffset, pageInfo.get().getCurrentOffset(), "Current offset mismatch"); + assertEquals(expectedNextOffset, pageInfo.get().getNextPageOffset(), "Next offset mismatch"); + } +}