Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion migrations_lockfile.txt
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ releases: 0004_cleanup_failed_safe_deletes

replays: 0006_add_bulk_delete_job

sentry: 1013_add_repositorysettings_table
sentry: 1014_transactions_to_spans_widgets_self_hosted

social_auth: 0003_social_auth_json_field

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,216 @@
# Generated by Django 5.2.8 on 2025-12-04 15:11


from enum import Enum

import sentry_sdk
from django.db import migrations
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.db.models import Q

from sentry.explore.translation.dashboards_translation import (
restore_transaction_widget,
translate_dashboard_widget,
)
from sentry.models.dashboard_widget import DashboardWidget
from sentry.new_migrations.migrations import CheckedMigration
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar


class TypesClass:
TYPES: list[tuple[int, str]]

@classmethod
def as_choices(cls) -> list[tuple[int, str]]:
return [(k, str(v)) for k, v in cls.TYPES]

@classmethod
def as_text_choices(cls) -> list[tuple[str, str]]:
return [(str(v), str(v)) for _, v in cls.TYPES]

@classmethod
def get_type_name(cls: type["TypesClass"], num: int) -> str | None:
for id, name in cls.TYPES:
if id == num:
return name
return None

@classmethod
def get_id_for_type_name(cls: type["TypesClass"], type_name: str) -> int | None:
for id, name in cls.TYPES:
if type_name == name:
return id
return None


class DashboardWidgetTypes(TypesClass):
DISCOVER = 0
"""
Old way of accessing error events and transaction events simultaneously @deprecated. Use ERROR_EVENTS or TRANSACTION_LIKE instead.
"""
ISSUE = 1
RELEASE_HEALTH = 2
METRICS = 3
ERROR_EVENTS = 100
"""
Error side of the split from Discover.
"""
TRANSACTION_LIKE = 101
"""
This targets transaction-like data from the split from discover. Itt may either use 'Transactions' events or 'PerformanceMetrics' depending on on-demand, MEP metrics, etc.
"""
SPANS = 102
"""
These represent the logs trace item type on the EAP dataset.
"""
LOGS = 103
"""
These represent the tracemetrics item type on the EAP dataset.
"""
TRACEMETRICS = 104

TYPES = [
(DISCOVER, "discover"),
(ISSUE, "issue"),
(
RELEASE_HEALTH,
"metrics",
),
(ERROR_EVENTS, "error-events"),
(TRANSACTION_LIKE, "transaction-like"),
(SPANS, "spans"),
(LOGS, "logs"),
(TRACEMETRICS, "tracemetrics"),
]
TYPE_NAMES = [t[1] for t in TYPES]


class DatasetSourcesTypes(Enum):
"""
Ambiguous queries that haven't been or couldn't be categorized into a
specific dataset.
"""

UNKNOWN = 0
"""
Dataset inferred by either running the query or using heuristics.
"""
INFERRED = 1
"""
Canonical dataset, user explicitly selected it.
"""
USER = 2
"""
Was an ambiguous dataset forced to split (i.e. we picked a default)
"""
FORCED = 3
"""
Dataset inferred by split script, version 1
"""
SPLIT_VERSION_1 = 4
"""
Dataset inferred by split script, version 2
"""
SPLIT_VERSION_2 = 5
"""
Dataset modified by transaction -> span migration
"""
SPAN_MIGRATION_VERSION_1 = 6
"""
Dataset modified by using the widget snapshot to restore the original transaction query
"""
RESTORED_SPAN_MIGRATION_VERSION_1 = 7
"""
Dataset modified by the transaction -> span migration version 2
"""
SPAN_MIGRATION_VERSION_2 = 8
"""
Dataset modified by the transaction -> span migration version 3
"""
SPAN_MIGRATION_VERSION_3 = 9
"""
Dataset modified by the transaction -> span migration version 4 (fixing boolean bug)
"""
SPAN_MIGRATION_VERSION_4 = 10
"""
Dataset modified by the transaction -> span migration version 5 (fixing boolean bug again)
"""
SPAN_MIGRATION_VERSION_5 = 11

@classmethod
def as_choices(cls) -> tuple[tuple[int, str], ...]:
return tuple((source.value, source.name.lower()) for source in cls)

@classmethod
def as_text_choices(cls) -> tuple[tuple[str, int], ...]:
return tuple((source.name.lower(), source.value) for source in cls)


def migrate_transactions_to_spans_widgets_self_hosted(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:

qs = DashboardWidget.objects.filter(
Q(widget_type=DashboardWidgetTypes.TRANSACTION_LIKE)
| Q(
widget_type=DashboardWidgetTypes.DISCOVER,
discover_widget_split=DashboardWidgetTypes.TRANSACTION_LIKE,
)
)

for widget in RangeQuerySetWrapperWithProgressBar(qs):
try:
translate_dashboard_widget(widget)
except Exception as e:
sentry_sdk.capture_exception(e)


def reverse_migrate_transactions_to_spans_widgets_self_hosted(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:

qs = DashboardWidget.objects.filter(
widget_type=DashboardWidgetTypes.SPANS,
dataset_source__in=[
DatasetSourcesTypes.SPAN_MIGRATION_VERSION_1.value,
DatasetSourcesTypes.SPAN_MIGRATION_VERSION_2.value,
DatasetSourcesTypes.SPAN_MIGRATION_VERSION_3.value,
DatasetSourcesTypes.SPAN_MIGRATION_VERSION_4.value,
DatasetSourcesTypes.SPAN_MIGRATION_VERSION_5.value,
],
)

for widget in RangeQuerySetWrapperWithProgressBar(qs):
try:
restore_transaction_widget(widget)
except Exception as e:
sentry_sdk.capture_exception(e)


class Migration(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment

is_post_deployment = True

dependencies = [
("sentry", "1013_add_repositorysettings_table"),
]

operations = [
migrations.RunPython(
migrate_transactions_to_spans_widgets_self_hosted,
reverse_code=reverse_migrate_transactions_to_spans_widgets_self_hosted,
hints={"tables": ["sentry_dashboardwidget"]},
)
]
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
from sentry.hybridcloud.models.outbox import outbox_context
from sentry.models.dashboard import Dashboard
from sentry.models.dashboard_widget import DashboardWidget, DashboardWidgetQuery
from sentry.models.organization import Organization
from sentry.testutils.cases import SnubaTestCase, TestMigrations


class MigrateDiscoverQueriesToExploreQueriesSelfHostedTest(TestMigrations, SnubaTestCase):
migrate_from = "1013_add_repositorysettings_table"
migrate_to = "1014_transactions_to_spans_widgets_self_hosted"

def setup_before_migration(self, apps):

with outbox_context(flush=False):
self.org = Organization.objects.create(name="test", slug="test")
self.dashboard = Dashboard.objects.create(
organization_id=self.org.id, title="test dashboard"
)

self.transaction_widget = DashboardWidget.objects.create(
dashboard_id=self.dashboard.id, widget_type=101, display_type=4
) # TRANSACTION_LIKE and TABLE
self.transaction_widget_query = DashboardWidgetQuery.objects.create(
order=0,
widget_id=self.transaction_widget.id,
name="Test Query",
fields=["title", "count()", "count_unique(user)"],
columns=["title"],
aggregates=["count()", "count_unique(user)"],
conditions="transaction:foo",
field_aliases=["Title", "Count", "Unique Users"],
orderby="count()",
)

self.transaction_widget_2 = DashboardWidget.objects.create(
dashboard_id=self.dashboard.id, widget_type=101, display_type=0
) # TRANSACTION_LIKE and LINE CHART
self.transaction_widget_query_2 = DashboardWidgetQuery.objects.create(
order=0,
widget_id=self.transaction_widget_2.id,
name="Test Query 2",
fields=["apdex(300)"],
columns=[],
aggregates=["apdex(300)"],
conditions="transaction:foo",
field_aliases=[],
)

self.discover_split_transaction_widget = DashboardWidget.objects.create(
dashboard_id=self.dashboard.id,
widget_type=0,
display_type=0,
discover_widget_split=101,
) # DISCOVER and TRANSACTION_LIKE and LINE CHART
self.discover_split_transaction_widget_query = DashboardWidgetQuery.objects.create(
order=0,
widget_id=self.discover_split_transaction_widget.id,
name="Test Query 3",
fields=["transaction", "sum(transaction.duration)"],
columns=["transaction"],
aggregates=["sum(transaction.duration)"],
conditions="",
field_aliases=[],
)

self.error_widget = DashboardWidget.objects.create(
dashboard_id=self.dashboard.id, widget_type=100, display_type=4
) # ERROR_EVENTS and TABLE
self.error_widget_query = DashboardWidgetQuery.objects.create(
order=0,
widget_id=self.error_widget.id,
name="Test Query 4",
fields=["title", "count()"],
columns=["title"],
aggregates=["count()"],
conditions="transaction:foo",
field_aliases=["Title", "Count"],
orderby="count()",
)

self.spans_widget = DashboardWidget.objects.create(
dashboard_id=self.dashboard.id, widget_type=102, display_type=4
) # SPANS and TABLE
self.spans_widget_query = DashboardWidgetQuery.objects.create(
order=0,
widget_id=self.spans_widget.id,
name="Test Query 5",
fields=["transaction", "count(span.duration)"],
columns=["transaction"],
aggregates=["count(span.duration)"],
conditions="",
field_aliases=[],
)

def test(self):
self.transaction_widget.refresh_from_db()
transaction_widget_query = DashboardWidgetQuery.objects.get(
widget_id=self.transaction_widget.id
)
self.transaction_widget_2.refresh_from_db()
transaction_widget_query_2 = DashboardWidgetQuery.objects.get(
widget_id=self.transaction_widget_2.id
)
self.discover_split_transaction_widget.refresh_from_db()
discover_split_transaction_widget_query = DashboardWidgetQuery.objects.get(
widget_id=self.discover_split_transaction_widget.id
)
self.error_widget.refresh_from_db()
error_widget_query = DashboardWidgetQuery.objects.get(widget_id=self.error_widget.id)
self.spans_widget.refresh_from_db()
spans_widget_query = DashboardWidgetQuery.objects.get(widget_id=self.spans_widget.id)

# all tranasaction widgets should be spans widgets and have snapshots
assert self.transaction_widget.widget_type == 102
assert self.transaction_widget.widget_snapshot is not None
assert self.transaction_widget_2.widget_type == 102
assert self.transaction_widget_2.widget_snapshot is not None
assert self.discover_split_transaction_widget.widget_type == 102
assert self.discover_split_transaction_widget.widget_snapshot is not None

# all other widgets should not be changed
assert self.error_widget.widget_type == 100
assert self.error_widget.widget_snapshot is None
assert error_widget_query.id == self.error_widget_query.id
assert self.spans_widget.widget_type == 102
assert self.spans_widget.widget_snapshot is None
assert spans_widget_query.id == self.spans_widget_query.id

# all transaction widget queries should be translated appropriately
assert transaction_widget_query.fields == [
"transaction",
"count(span.duration)",
"count_unique(user)",
]
assert transaction_widget_query.conditions == "(transaction:foo) AND is_transaction:1"
assert transaction_widget_query.orderby == "count(span.duration)"

assert transaction_widget_query_2.fields == ["equation|apdex(span.duration,300)"]
assert transaction_widget_query_2.conditions == "(transaction:foo) AND is_transaction:1"
assert transaction_widget_query_2.orderby == ""

assert discover_split_transaction_widget_query.fields == [
"transaction",
"sum(span.duration)",
]
assert discover_split_transaction_widget_query.conditions == "is_transaction:1"
assert discover_split_transaction_widget_query.orderby == ""
Loading