From 555f20d59a85ce9b5367571a97b7c7626c74bee1 Mon Sep 17 00:00:00 2001 From: atovpeko Date: Thu, 2 Oct 2025 13:45:40 +0300 Subject: [PATCH 1/5] update for azure --- migrate/livesync-for-kafka.md | 16 ++++++++++++++++ migrate/livesync-for-s3.md | 16 ++++++++++++++++ migrate/upload-file-using-console.md | 16 ++++++++++++++++ 3 files changed, 48 insertions(+) diff --git a/migrate/livesync-for-kafka.md b/migrate/livesync-for-kafka.md index bcc59d1c35..082c812b38 100644 --- a/migrate/livesync-for-kafka.md +++ b/migrate/livesync-for-kafka.md @@ -8,9 +8,14 @@ tags: [stream, connector] import PrereqCloud from "versionContent/_partials/_prereqs-cloud-only.mdx"; import EarlyAccessNoRelease from "versionContent/_partials/_early_access.mdx"; +import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx"; # Stream data from Kafka + + + + You use the Kafka source connector in $CLOUD_LONG to stream events from Kafka into your $SERVICE_SHORT. $CLOUD_LONG connects to your Confluent Cloud Kafka cluster and Schema Registry using SASL/SCRAM authentication and service account–based API keys. Only the Avro format is currently supported [with some limitations][limitations]. This page explains how to connect $CLOUD_LONG to your Confluence Cloud Kafka cluster. @@ -232,6 +237,17 @@ Unsupported examples: } ``` + + + + + + + + + + + [confluent-cloud]: https://confluent.cloud/ [connection-info]: /integrations/:currentVersion:/find-connection-details/ [confluence-signup]: https://www.confluent.io/get-started/ diff --git a/migrate/livesync-for-s3.md b/migrate/livesync-for-s3.md index dd87de50cf..aedf3cdcef 100644 --- a/migrate/livesync-for-s3.md +++ b/migrate/livesync-for-s3.md @@ -8,9 +8,14 @@ tags: [recovery, logical backup, replication] import PrereqCloud from "versionContent/_partials/_prereqs-cloud-only.mdx"; import EarlyAccessNoRelease from "versionContent/_partials/_early_access.mdx"; +import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx"; # Sync data from S3 + + + + You use the $S3_CONNECTOR in $CLOUD_LONG to synchronize CSV and Parquet files from an S3 bucket to your $SERVICE_LONG in real time. The connector runs continuously, enabling you to leverage $CLOUD_LONG as your analytics database with data constantly synced from S3. This lets you take full advantage of $CLOUD_LONG's real-time analytics capabilities without having to develop or manage custom ETL solutions between S3 and $CLOUD_LONG. ![Tiger Cloud connectors overview](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-cloud-connector-overview.png) @@ -157,6 +162,17 @@ To sync data from your S3 bucket to your $SERVICE_LONG using $CONSOLE: And that is it, you are using the $S3_CONNECTOR to synchronize all the data, or specific files, from an S3 bucket to your $SERVICE_LONG in real time. + + + + + + + + + + + [about-hypertables]: /use-timescale/:currentVersion:/hypertables/ [lives-sync-specify-tables]: /migrate/:currentVersion:/livesync-for-postgresql/#specify-the-tables-to-synchronize [compression]: /use-timescale/:currentVersion:/compression/about-compression diff --git a/migrate/upload-file-using-console.md b/migrate/upload-file-using-console.md index 09fb68de2d..3d57df466f 100644 --- a/migrate/upload-file-using-console.md +++ b/migrate/upload-file-using-console.md @@ -7,9 +7,14 @@ keywords: [import] import ImportPrerequisitesCloudNoConnection from "versionContent/_partials/_prereqs-cloud-no-connection.mdx"; import EarlyAccessGeneral from "versionContent/_partials/_early_access.mdx"; +import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx"; # Upload a file into your $SERVICE_SHORT using $CONSOLE_LONG + + + + You can upload files into your $SERVICE_SHORT using $CONSOLE_LONG. This page explains how to upload CSV, Parquet, and text files, from your local machine and from an S3 bucket. @@ -205,6 +210,17 @@ To import a Parquet file from an S3 bucket: And that is it, you have imported your data to your $SERVICE_LONG. + + + + + + + + + + + [credentials-iam]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user.html#roles-creatingrole-user-console [credentials-public]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-anonymous-user [console]: hhttps://console.cloud.timescale.com/dashboard/services From bc89170b776f45e47b33d5beb2a43ee3b6414df2 Mon Sep 17 00:00:00 2001 From: atovpeko Date: Thu, 2 Oct 2025 13:53:19 +0300 Subject: [PATCH 2/5] update for azure --- use-timescale/tigerlake.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/use-timescale/tigerlake.md b/use-timescale/tigerlake.md index 1799a20ebf..75240638ec 100644 --- a/use-timescale/tigerlake.md +++ b/use-timescale/tigerlake.md @@ -8,9 +8,14 @@ keywords: [data lake, lakehouse, s3, iceberg] import IntegrationPrereqsCloud from "versionContent/_partials/_integration-prereqs-cloud-only.mdx"; import EarlyAccessGeneral from "versionContent/_partials/_early_access.mdx"; +import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure.mdx"; # Integrate data lakes with $CLOUD_LONG + + + + $LAKE_LONG enables you to build real-time applications alongside efficient data pipeline management within a single system. $LAKE_LONG unifies the $CLOUD_LONG operational architecture with data lake architectures. @@ -333,6 +338,17 @@ data lake: * Iceberg snapshots are pruned automatically if the amount exceeds 2500. * The Iceberg namespace is hard coded to `timescaledb`, a custom namespace value is work in progress. + + + + + + + + + + + [cmc]: https://console.aws.amazon.com/cloudformation/ [aws-athena]: https://aws.amazon.com/athena/ [apache-spark]: https://spark.apache.org/ From bc3924c148fc38735f5f1bd500ea59cd397734d4 Mon Sep 17 00:00:00 2001 From: atovpeko Date: Sun, 5 Oct 2025 15:59:11 +0300 Subject: [PATCH 3/5] add missing partial --- _partials/_not-supported-for-azure.mdx | 1 + 1 file changed, 1 insertion(+) create mode 100644 _partials/_not-supported-for-azure.mdx diff --git a/_partials/_not-supported-for-azure.mdx b/_partials/_not-supported-for-azure.mdx new file mode 100644 index 0000000000..b6108d27c0 --- /dev/null +++ b/_partials/_not-supported-for-azure.mdx @@ -0,0 +1 @@ +This feature is on our roadmap for $CLOUD_LONG on Microsoft Azure. Stay tuned! \ No newline at end of file From a038312c971f2af67bcefef019764c2b90a19eb7 Mon Sep 17 00:00:00 2001 From: atovpeko Date: Tue, 14 Oct 2025 10:47:28 +0300 Subject: [PATCH 4/5] update --- migrate/livesync-for-kafka.md | 19 +++---------------- migrate/livesync-for-s3.md | 15 ++------------- 2 files changed, 5 insertions(+), 29 deletions(-) diff --git a/migrate/livesync-for-kafka.md b/migrate/livesync-for-kafka.md index 4b7e10b2c6..db762c510a 100644 --- a/migrate/livesync-for-kafka.md +++ b/migrate/livesync-for-kafka.md @@ -12,13 +12,9 @@ import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure # Stream data from Kafka - - - - You use the Kafka source connector in $CLOUD_LONG to stream events from Kafka into your $SERVICE_SHORT. $CLOUD_LONG connects to your Confluent Cloud Kafka cluster and Schema Registry using SASL/SCRAM authentication and service account–based API keys. Only the Avro format is currently supported [with some limitations][limitations]. -This page explains how to connect $CLOUD_LONG to your Confluence Cloud Kafka cluster. +This page explains how to connect $CLOUD_LONG to your Confluent Cloud Kafka cluster. : the Kafka source connector is not yet supported for production use. @@ -29,6 +25,8 @@ This page explains how to connect $CLOUD_LONG to your Confluence Cloud Kafka clu - [Sign up][confluence-signup] for Confluence Cloud. - [Create][create-kafka-cluster] a Kafka cluster in Confluence Cloud. + + ## Access your Kafka cluster in Confluent Cloud Take the following steps to prepare your Kafka cluster for connection to $CLOUD_LONG: @@ -237,17 +235,6 @@ Unsupported examples: } ``` - - - - - - - - - - - [confluent-cloud]: https://confluent.cloud/ [connection-info]: /integrations/:currentVersion:/find-connection-details/ [confluence-signup]: https://www.confluent.io/get-started/ diff --git a/migrate/livesync-for-s3.md b/migrate/livesync-for-s3.md index 336fa68016..fecf92ce21 100644 --- a/migrate/livesync-for-s3.md +++ b/migrate/livesync-for-s3.md @@ -12,10 +12,6 @@ import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure # Sync data from S3 - - - - You use the $S3_CONNECTOR in $CLOUD_LONG to synchronize CSV and Parquet files from an S3 bucket to your $SERVICE_LONG in real time. The connector runs continuously, enabling you to leverage $CLOUD_LONG as your analytics database with data constantly synced from S3. This lets you take full advantage of $CLOUD_LONG's real-time analytics capabilities without having to develop or manage custom ETL solutions between S3 and $CLOUD_LONG. ![Tiger connectors overview](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-console-connector-overview.png) @@ -67,6 +63,8 @@ The $S3_CONNECTOR continuously imports data from an Amazon S3 bucket into your d - [Public anonymous user][credentials-public]. + + ## Limitations - **File naming**: @@ -166,15 +164,6 @@ To sync data from your S3 bucket to your $SERVICE_LONG using $CONSOLE: And that is it, you are using the $S3_CONNECTOR to synchronize all the data, or specific files, from an S3 bucket to your $SERVICE_LONG in real time. - - - - - - - - - [about-hypertables]: /use-timescale/:currentVersion:/hypertables/ From 1e9749cffc2fc456a96791fcac968930c072c11b Mon Sep 17 00:00:00 2001 From: atovpeko Date: Tue, 14 Oct 2025 13:30:00 +0300 Subject: [PATCH 5/5] turn tabs into notes --- _partials/_not-supported-for-azure.mdx | 6 +++++- migrate/upload-file-using-console.md | 16 ++++------------ use-timescale/tigerlake.md | 14 ++------------ 3 files changed, 11 insertions(+), 25 deletions(-) diff --git a/_partials/_not-supported-for-azure.mdx b/_partials/_not-supported-for-azure.mdx index b6108d27c0..cc108fd357 100644 --- a/_partials/_not-supported-for-azure.mdx +++ b/_partials/_not-supported-for-azure.mdx @@ -1 +1,5 @@ -This feature is on our roadmap for $CLOUD_LONG on Microsoft Azure. Stay tuned! \ No newline at end of file + + +This feature is on our roadmap for $CLOUD_LONG on Microsoft Azure. Stay tuned! + + \ No newline at end of file diff --git a/migrate/upload-file-using-console.md b/migrate/upload-file-using-console.md index 30060929f4..84637a7690 100644 --- a/migrate/upload-file-using-console.md +++ b/migrate/upload-file-using-console.md @@ -11,10 +11,6 @@ import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure # Upload a file into your $SERVICE_SHORT using $CONSOLE_LONG - - - - You can upload files into your $SERVICE_SHORT using $CONSOLE_LONG. This page explains how to upload CSV, Parquet, and text files, from your local machine and from an S3 bucket. @@ -29,6 +25,8 @@ $CONSOLE_LONG enables you to drag and drop files to upload from your local machi + + @@ -129,6 +127,8 @@ $CONSOLE_LONG enables you to upload CSV and Parquet files, including archives co - [IAM Role][credentials-iam]. - [Public anonymous user][credentials-public]. + + @@ -207,18 +207,10 @@ To import a Parquet file from an S3 bucket: - And that is it, you have imported your data to your $SERVICE_LONG. - - - - - - - [credentials-iam]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user.html#roles-creatingrole-user-console diff --git a/use-timescale/tigerlake.md b/use-timescale/tigerlake.md index b4c651f0d8..75529ec423 100644 --- a/use-timescale/tigerlake.md +++ b/use-timescale/tigerlake.md @@ -12,10 +12,6 @@ import NotSupportedAzure from "versionContent/_partials/_not-supported-for-azure # Integrate data lakes with $CLOUD_LONG - - - - $LAKE_LONG enables you to build real-time applications alongside efficient data pipeline management within a single system. $LAKE_LONG unifies the $CLOUD_LONG operational architecture with data lake architectures. @@ -34,6 +30,8 @@ Tiger Lake is currently in private beta. Please contact us to request access. + + ## Integrate a data lake with your $SERVICE_LONG To connect a $SERVICE_LONG to your data lake: @@ -338,15 +336,7 @@ data lake: * Iceberg snapshots are pruned automatically if the amount exceeds 2500. * The Iceberg namespace is hard coded to `timescaledb`, a custom namespace value is work in progress. - - - - - - - - [cmc]: https://console.aws.amazon.com/cloudformation/