diff --git a/CHANGELOG.md b/CHANGELOG.md index e06b429317..7caba13dd6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,8 +41,21 @@ - `regr_sxy` - `regr_syy` - `try_to_binary` + - `base64` + - `base64_decode_string` + - `base64_encode` + - `editdistance` + - `hex` + - `hex_encode` + - `instr` + - `log1p` + - `log2` + - `log10` + - `percentile_approx` + - `unbase64` - Added support for specifying a schema string (including implicit struct syntax) when calling `DataFrame.create_dataframe`. - Added support for `DataFrameWriter.insert_into/insertInto`. This method also supports local testing mode. +- Added support for multiple columns in the functions `map_cat` and `map_concat`. #### Experimental Features diff --git a/docs/source/snowpark/functions.rst b/docs/source/snowpark/functions.rst index afd01af63e..bd95298b34 100644 --- a/docs/source/snowpark/functions.rst +++ b/docs/source/snowpark/functions.rst @@ -81,6 +81,9 @@ Functions atanh atan2 avg + base64 + base64_decode_string + base64_encode bit_length bitmap_bit_position bitmap_bucket_number @@ -157,6 +160,7 @@ Functions desc_nulls_last div0 divnull + editdistance endswith equal_nan equal_null @@ -178,12 +182,15 @@ Functions grouping grouping_id hash + hex + hex_encode hour iff ifnull in_ initcap insert + instr is_array is_binary is_boolean @@ -217,6 +224,9 @@ Functions locate localtimestamp log + log1p + log2 + log10 lower lpad ltrim @@ -257,6 +267,7 @@ Functions parse_json parse_xml percent_rank + percentile_approx percentile_cont position pow @@ -350,6 +361,7 @@ Functions udaf udf udtf + unbase64 uniform unix_timestamp upper diff --git a/src/snowflake/snowpark/functions.py b/src/snowflake/snowpark/functions.py index ed22b89f28..8de99fa562 100644 --- a/src/snowflake/snowpark/functions.py +++ b/src/snowflake/snowpark/functions.py @@ -160,6 +160,7 @@ import functools import sys import typing +from functools import reduce from random import randint from types import ModuleType from typing import Callable, Dict, List, Optional, Tuple, Union, overload @@ -1377,6 +1378,9 @@ def approx_percentile( ) +percentile_approx = approx_percentile + + @publicapi def approx_percentile_accumulate(col: ColumnOrName, _emit_ast: bool = True) -> Column: """Returns the internal representation of the t-Digest state (as a JSON object) at the end of aggregation. @@ -2962,6 +2966,7 @@ def log( base: Union[ColumnOrName, int, float], x: Union[ColumnOrName, int, float], _emit_ast: bool = True, + _ast: Optional[proto.Expr] = None, ) -> Column: """ Returns the logarithm of a numeric expression. @@ -2974,7 +2979,7 @@ def log( [Row(LOG=0), Row(LOG=1)] """ # Build AST here to prevent `base` and `x` from being recorded as a literal instead of int/float. - ast = build_function_expr("log", [base, x]) if _emit_ast else None + ast = build_function_expr("log", [base, x]) if _emit_ast else _ast b = ( lit(base, _emit_ast=False) if isinstance(base, (int, float)) @@ -2988,13 +2993,81 @@ def log( return builtin("log", _ast=ast, _emit_ast=False)(b, arg) +@publicapi +def log1p( + x: Union[ColumnOrName, int, float], + _emit_ast: bool = True, +) -> Column: + """ + Returns the natural logarithm of (1 + x). + + Example:: + + >>> df = session.create_dataframe([0, 1], schema=["a"]) + >>> df.select(log1p(df["a"]).alias("log1p")).collect() + [Row(LOG1P=0.0), Row(LOG1P=0.6931471805599453)] + """ + ast = build_function_expr("log1p", [x]) if _emit_ast else None + x = ( + lit(x, _emit_ast=False) + if isinstance(x, (int, float)) + else _to_col_if_str(x, "log") + ) + one_plus_x = _to_col_if_str(x, "log1p") + lit(1, _emit_ast=False) + return ln(one_plus_x, _emit_ast=False, _ast=ast) + + +@publicapi +def log10( + x: Union[ColumnOrName, int, float], + _emit_ast: bool = True, +) -> Column: + """ + Returns the base-10 logarithm of x. + + Example:: + + >>> df = session.create_dataframe([1, 10], schema=["a"]) + >>> df.select(log10(df["a"]).alias("log10")).collect() + [Row(LOG10=0.0), Row(LOG10=1.0)] + """ + ast = build_function_expr("log10", [x]) if _emit_ast else None + return _log10(x, _emit_ast=False, _ast=ast) + + +@publicapi +def log2( + x: Union[ColumnOrName, int, float], + _emit_ast: bool = True, +) -> Column: + """ + Returns the base-2 logarithm of x. + + Example:: + + >>> df = session.create_dataframe([1, 2, 8], schema=["a"]) + >>> df.select(log2(df["a"]).alias("log2")).collect() + [Row(LOG2=0.0), Row(LOG2=1.0), Row(LOG2=3.0)] + """ + ast = build_function_expr("log2", [x]) if _emit_ast else None + return _log2(x, _emit_ast=False, _ast=ast) + + # Create base 2 and base 10 wrappers for use with the Modin log2 and log10 functions -def _log2(x: Union[ColumnOrName, int, float], _emit_ast: bool = True) -> Column: - return log(2, x, _emit_ast=_emit_ast) +def _log2( + x: Union[ColumnOrName, int, float], + _emit_ast: bool = True, + _ast: Optional[proto.Expr] = None, +) -> Column: + return log(2, x, _emit_ast=_emit_ast, _ast=_ast) -def _log10(x: Union[ColumnOrName, int, float], _emit_ast: bool = True) -> Column: - return log(10, x, _emit_ast=_emit_ast) +def _log10( + x: Union[ColumnOrName, int, float], + _emit_ast: bool = True, + _ast: Optional[proto.Expr] = None, +) -> Column: + return log(10, x, _emit_ast=_emit_ast, _ast=_ast) @publicapi @@ -7144,12 +7217,15 @@ def array_unique_agg(col: ColumnOrName, _emit_ast: bool = True) -> Column: @publicapi -def map_cat(col1: ColumnOrName, col2: ColumnOrName, _emit_ast: bool = True): - """Returns the concatenatation of two MAPs. +def map_cat( + col1: ColumnOrName, col2: ColumnOrName, *cols: ColumnOrName, _emit_ast: bool = True +): + """Returns the concatenatation of two or more MAPs. Args: col1: The source map col2: The map to be appended to col1 + cols: More maps to be appended Example:: >>> df = session.sql("select {'k1': 'v1'} :: MAP(STRING,STRING) as A, {'k2': 'v2'} :: MAP(STRING,STRING) as B") @@ -7163,10 +7239,31 @@ def map_cat(col1: ColumnOrName, col2: ColumnOrName, _emit_ast: bool = True): |} | --------------------------- + >>> df = session.sql("select {'k1': 'v1'} :: MAP(STRING,STRING) as A, {'k2': 'v2'} :: MAP(STRING,STRING) as B, {'k3': 'v3'} :: MAP(STRING,STRING) as C") + >>> df.select(map_cat("A", "B", "C")).show() + ------------------------------------------- + |"MAP_CAT(MAP_CAT(""A"", ""B""), ""C"")" | + ------------------------------------------- + |{ | + | "k1": "v1", | + | "k2": "v2", | + | "k3": "v3" | + |} | + ------------------------------------------- + """ m1 = _to_col_if_str(col1, "map_cat") m2 = _to_col_if_str(col2, "map_cat") - return builtin("map_cat", _emit_ast=_emit_ast)(m1, m2) + ast = build_function_expr("map_cat", [col1, col2, *cols]) if _emit_ast else None + + def map_cat_two_maps(first, second): + return builtin("map_cat", _ast=ast, _emit_ast=False)(first, second) + + cols_to_concat = [m1, m2] + for c in cols: + cols_to_concat.append(_to_col_if_str(c, "map_cat")) + + return reduce(map_cat_two_maps, cols_to_concat) @publicapi @@ -7555,7 +7652,9 @@ def vector_inner_product( @publicapi -def ln(c: ColumnOrLiteral, _emit_ast: bool = True) -> Column: +def ln( + c: ColumnOrLiteral, _emit_ast: bool = True, _ast: Optional[proto.Expr] = None +) -> Column: """Returns the natrual logarithm of given column expression. Example:: @@ -7572,7 +7671,7 @@ def ln(c: ColumnOrLiteral, _emit_ast: bool = True) -> Column: """ # AST. - ast = None + ast = _ast if _emit_ast: ast = proto.Expr() build_builtin_fn_apply(ast, "ln", c) @@ -10903,6 +11002,7 @@ def position( expr2: ColumnOrName, start_pos: int = 1, _emit_ast: bool = True, + _ast: Optional[proto.Expr] = None, ) -> Column: """ Searches for the first occurrence of the first argument in the second argument and, if successful, returns @@ -10916,7 +11016,7 @@ def position( """ c1 = _to_col_if_str(expr1, "position") c2 = _to_col_if_str(expr2, "position") - return builtin("position", _emit_ast=_emit_ast)(c1, c2, lit(start_pos)) + return builtin("position", _ast=_ast, _emit_ast=_emit_ast)(c1, c2, lit(start_pos)) @publicapi @@ -11096,3 +11196,148 @@ def try_to_binary( if fmt else builtin("try_to_binary", _emit_ast=_emit_ast)(c) ) + + +@publicapi +def base64_encode( + e: ColumnOrName, + max_line_length: Optional[int] = 0, + alphabet: Optional[str] = None, + _emit_ast: bool = True, +) -> Column: + """ + Encodes the input (string or binary) using Base64 encoding. + + Example: + >>> df = session.create_dataframe(["Snowflake", "Data"], schema=["input"]) + >>> df.select(base64_encode(col("input")).alias("encoded")).collect() + [Row(ENCODED='U25vd2ZsYWtl'), Row(ENCODED='RGF0YQ==')] + """ + # Convert input to a column if it is not already one. + ast = ( + build_function_expr("base64_encode", [e, max_line_length, alphabet]) + if _emit_ast + else None + ) + col_input = _to_col_if_str(e, "base64_encode") + + # Prepare arguments for the function call. + args = [col_input] + + if max_line_length: + args.append(lit(max_line_length)) + + if alphabet: + args.append(lit(alphabet)) + + # Call the built-in Base64 encode function. + return builtin("base64_encode", _ast=ast, _emit_ast=False)(*args) + + +base64 = base64_encode + + +@publicapi +def base64_decode_string( + e: ColumnOrName, alphabet: Optional[str] = None, _emit_ast: bool = True +) -> Column: + """ + Decodes a Base64-encoded string to a string. + + Example: + >>> df = session.create_dataframe(["U25vd2ZsYWtl", "SEVMTE8="], schema=["input"]) + >>> df.select(base64_decode_string(col("input")).alias("decoded")).collect() + [Row(DECODED='Snowflake'), Row(DECODED='HELLO')] + """ + # Convert input to a column if it is not already one. + ast = ( + build_function_expr("base64_decode_string", [e, alphabet]) + if _emit_ast + else None + ) + col_input = _to_col_if_str(e, "base64_decode_string") + + # Prepare arguments for the function call. + args = [col_input] + + if alphabet: + args.append(lit(alphabet)) + + # Call the built-in Base64 encode function. + return builtin("base64_decode_string", _ast=ast, _emit_ast=False)(*args) + + +unbase64 = base64_decode_string + + +@publicapi +def hex_encode(e: ColumnOrName, case: int = 1, _emit_ast: bool = True): + """ + Encodes the input using hexadecimal (also ‘hex’ or ‘base16’) encoding. + + Example: + >>> df = session.create_dataframe(["Snowflake", "Hello"], schema=["input"]) + >>> df.select(hex_encode(col("input")).alias("hex_encoded")).collect() + [Row(HEX_ENCODED='536E6F77666C616B65'), Row(HEX_ENCODED='48656C6C6F')] + """ + ast = build_function_expr("hex_encode", [e, case]) if _emit_ast else None + col_input = _to_col_if_str(e, "hex_encode") + return builtin("hex_encode", _ast=ast, _emit_ast=False)(col_input, lit(case)) + + +hex = hex_encode + + +@publicapi +def editdistance( + e1: ColumnOrName, + e2: ColumnOrName, + max_distance: Optional[Union[int, ColumnOrName]] = None, + _emit_ast: bool = True, +) -> Column: + """Computes the Levenshtein distance between two input strings. + + Optionally, a maximum distance can be specified. If the distance exceeds this value, + the computation halts and returns the maximum distance. + + Example:: + + >>> df = session.create_dataframe( + ... [["abc", "def"], ["abcdef", "abc"], ["snow", "flake"]], + ... schema=["s1", "s2"] + ... ) + >>> df.select( + ... editdistance(col("s1"), col("s2")).alias("distance"), + ... editdistance(col("s1"), col("s2"), 2).alias("max_2_distance") + ... ).collect() + [Row(DISTANCE=3, MAX_2_DISTANCE=2), Row(DISTANCE=3, MAX_2_DISTANCE=2), Row(DISTANCE=5, MAX_2_DISTANCE=2)] + """ + ast = build_function_expr("editdistance", [e1, e2]) if _emit_ast else None + s1 = _to_col_if_str(e1, "editdistance") + s2 = _to_col_if_str(e2, "editdistance") + + args = [s1, s2] + if max_distance is not None: + max_dist = ( + lit(max_distance) + if isinstance(max_distance, int) + else _to_col_if_str(max_distance, "editdistance") + ) + args.append(max_dist) + + return builtin("editdistance", _ast=ast, _emit_ast=False)(*args) + + +@publicapi +def instr(str: ColumnOrName, substr: str, _emit_ast: bool = True): + """ + Locate the position of the first occurrence of substr column in the given string. Returns null if either of the arguments are null. + + Example:: + >>> df = session.create_dataframe([["hello world"], ["world hello"]], schema=["text"]) + >>> df.select(instr(col("text"), "world").alias("position")).collect() + [Row(POSITION=7), Row(POSITION=1)] + """ + ast = build_function_expr("instr", [str, substr]) if _emit_ast else None + s1 = _to_col_if_str(str, "instr") + return position(lit(substr), s1, _emit_ast=False, _ast=ast) diff --git a/src/snowflake/snowpark/mock/_plan.py b/src/snowflake/snowpark/mock/_plan.py index 1ad19fc658..c3ad3b9a48 100644 --- a/src/snowflake/snowpark/mock/_plan.py +++ b/src/snowflake/snowpark/mock/_plan.py @@ -415,9 +415,10 @@ def handle_function_expression( to_pass_args = [] type_hints = typing.get_type_hints(to_mock_func) parameters_except_ast = list(signatures.parameters) - if "_emit_ast" in parameters_except_ast: - parameters_except_ast.remove("_emit_ast") - del type_hints["_emit_ast"] + for clean_up_parameter in ["_emit_ast", "_ast"]: + if clean_up_parameter in parameters_except_ast: + parameters_except_ast.remove(clean_up_parameter) + del type_hints[clean_up_parameter] for idx, key in enumerate(parameters_except_ast): type_hint = str(type_hints[key]) keep_literal = "Column" not in type_hint diff --git a/tests/ast/data/functions2.test b/tests/ast/data/functions2.test index 771d84e12d..8e4f51f2ab 100644 --- a/tests/ast/data/functions2.test +++ b/tests/ast/data/functions2.test @@ -302,6 +302,24 @@ df304 = df.select(locate("needle", col("expr")), locate("needle", lit("test stri df305 = df.select(size(col("expr")), size("A")) +df306 = df.select(base64_encode("A")) + +df307 = df.select(base64_decode_string("A")) + +df308 = df.select(hex_encode("A")) + +df309 = df.select(editdistance("A", "B")) + +df310 = df.select(map_cat("A", "B")) + +df311 = df.select(log1p("A")) + +df312 = df.select(log10("A")) + +df313 = df.select(log2("A")) + +df314 = df.select(instr("A", "test_str")) + ## EXPECTED UNPARSER OUTPUT df = session.table("table1") @@ -604,6 +622,24 @@ df304 = df.select(charindex(lit("needle"), col("expr"), lit(1)), charindex(lit(" df305 = df.select(size(col("expr")), size("A")) +df306 = df.select(base64_encode("A", 0, None)) + +df307 = df.select(base64_decode_string("A", None)) + +df308 = df.select(hex_encode("A", 1)) + +df309 = df.select(editdistance("A", "B")) + +df310 = df.select(map_cat("A", "B")) + +df311 = df.select(log1p("A")) + +df312 = df.select(log10("A")) + +df313 = df.select(log2("A")) + +df314 = df.select(instr("A", "test_str")) + ## EXPECTED ENCODED AST interned_value_table { @@ -25350,6 +25386,663 @@ body { } } } +body { + assign { + expr { + sp_dataframe_select__columns { + cols { + apply_expr { + fn { + builtin_fn { + name { + name { + sp_name_flat { + name: "base64_encode" + } + } + } + } + } + pos_args { + string_val { + src { + end_column: 44 + end_line: 327 + file: 2 + start_column: 26 + start_line: 327 + } + v: "A" + } + } + pos_args { + int64_val { + src { + end_column: 44 + end_line: 327 + file: 2 + start_column: 26 + start_line: 327 + } + } + } + pos_args { + null_val { + src { + end_column: 44 + end_line: 327 + file: 2 + start_column: 26 + start_line: 327 + } + } + } + src { + end_column: 44 + end_line: 327 + file: 2 + start_column: 26 + start_line: 327 + } + } + } + df { + sp_dataframe_ref { + id { + bitfield1: 1 + } + } + } + src { + end_column: 45 + end_line: 327 + file: 2 + start_column: 16 + start_line: 327 + } + variadic: true + } + } + symbol { + value: "df306" + } + uid: 151 + var_id { + bitfield1: 151 + } + } +} +body { + assign { + expr { + sp_dataframe_select__columns { + cols { + apply_expr { + fn { + builtin_fn { + name { + name { + sp_name_flat { + name: "base64_decode_string" + } + } + } + } + } + pos_args { + string_val { + src { + end_column: 51 + end_line: 329 + file: 2 + start_column: 26 + start_line: 329 + } + v: "A" + } + } + pos_args { + null_val { + src { + end_column: 51 + end_line: 329 + file: 2 + start_column: 26 + start_line: 329 + } + } + } + src { + end_column: 51 + end_line: 329 + file: 2 + start_column: 26 + start_line: 329 + } + } + } + df { + sp_dataframe_ref { + id { + bitfield1: 1 + } + } + } + src { + end_column: 52 + end_line: 329 + file: 2 + start_column: 16 + start_line: 329 + } + variadic: true + } + } + symbol { + value: "df307" + } + uid: 152 + var_id { + bitfield1: 152 + } + } +} +body { + assign { + expr { + sp_dataframe_select__columns { + cols { + apply_expr { + fn { + builtin_fn { + name { + name { + sp_name_flat { + name: "hex_encode" + } + } + } + } + } + pos_args { + string_val { + src { + end_column: 41 + end_line: 331 + file: 2 + start_column: 26 + start_line: 331 + } + v: "A" + } + } + pos_args { + int64_val { + src { + end_column: 41 + end_line: 331 + file: 2 + start_column: 26 + start_line: 331 + } + v: 1 + } + } + src { + end_column: 41 + end_line: 331 + file: 2 + start_column: 26 + start_line: 331 + } + } + } + df { + sp_dataframe_ref { + id { + bitfield1: 1 + } + } + } + src { + end_column: 42 + end_line: 331 + file: 2 + start_column: 16 + start_line: 331 + } + variadic: true + } + } + symbol { + value: "df308" + } + uid: 153 + var_id { + bitfield1: 153 + } + } +} +body { + assign { + expr { + sp_dataframe_select__columns { + cols { + apply_expr { + fn { + builtin_fn { + name { + name { + sp_name_flat { + name: "editdistance" + } + } + } + } + } + pos_args { + string_val { + src { + end_column: 48 + end_line: 333 + file: 2 + start_column: 26 + start_line: 333 + } + v: "A" + } + } + pos_args { + string_val { + src { + end_column: 48 + end_line: 333 + file: 2 + start_column: 26 + start_line: 333 + } + v: "B" + } + } + src { + end_column: 48 + end_line: 333 + file: 2 + start_column: 26 + start_line: 333 + } + } + } + df { + sp_dataframe_ref { + id { + bitfield1: 1 + } + } + } + src { + end_column: 49 + end_line: 333 + file: 2 + start_column: 16 + start_line: 333 + } + variadic: true + } + } + symbol { + value: "df309" + } + uid: 154 + var_id { + bitfield1: 154 + } + } +} +body { + assign { + expr { + sp_dataframe_select__columns { + cols { + apply_expr { + fn { + builtin_fn { + name { + name { + sp_name_flat { + name: "map_cat" + } + } + } + } + } + pos_args { + string_val { + src { + end_column: 43 + end_line: 335 + file: 2 + start_column: 26 + start_line: 335 + } + v: "A" + } + } + pos_args { + string_val { + src { + end_column: 43 + end_line: 335 + file: 2 + start_column: 26 + start_line: 335 + } + v: "B" + } + } + src { + end_column: 43 + end_line: 335 + file: 2 + start_column: 26 + start_line: 335 + } + } + } + df { + sp_dataframe_ref { + id { + bitfield1: 1 + } + } + } + src { + end_column: 44 + end_line: 335 + file: 2 + start_column: 16 + start_line: 335 + } + variadic: true + } + } + symbol { + value: "df310" + } + uid: 155 + var_id { + bitfield1: 155 + } + } +} +body { + assign { + expr { + sp_dataframe_select__columns { + cols { + apply_expr { + fn { + builtin_fn { + name { + name { + sp_name_flat { + name: "log1p" + } + } + } + } + } + pos_args { + string_val { + src { + end_column: 36 + end_line: 337 + file: 2 + start_column: 26 + start_line: 337 + } + v: "A" + } + } + src { + end_column: 36 + end_line: 337 + file: 2 + start_column: 26 + start_line: 337 + } + } + } + df { + sp_dataframe_ref { + id { + bitfield1: 1 + } + } + } + src { + end_column: 37 + end_line: 337 + file: 2 + start_column: 16 + start_line: 337 + } + variadic: true + } + } + symbol { + value: "df311" + } + uid: 156 + var_id { + bitfield1: 156 + } + } +} +body { + assign { + expr { + sp_dataframe_select__columns { + cols { + apply_expr { + fn { + builtin_fn { + name { + name { + sp_name_flat { + name: "log10" + } + } + } + } + } + pos_args { + string_val { + src { + end_column: 36 + end_line: 339 + file: 2 + start_column: 26 + start_line: 339 + } + v: "A" + } + } + src { + end_column: 36 + end_line: 339 + file: 2 + start_column: 26 + start_line: 339 + } + } + } + df { + sp_dataframe_ref { + id { + bitfield1: 1 + } + } + } + src { + end_column: 37 + end_line: 339 + file: 2 + start_column: 16 + start_line: 339 + } + variadic: true + } + } + symbol { + value: "df312" + } + uid: 157 + var_id { + bitfield1: 157 + } + } +} +body { + assign { + expr { + sp_dataframe_select__columns { + cols { + apply_expr { + fn { + builtin_fn { + name { + name { + sp_name_flat { + name: "log2" + } + } + } + } + } + pos_args { + string_val { + src { + end_column: 35 + end_line: 341 + file: 2 + start_column: 26 + start_line: 341 + } + v: "A" + } + } + src { + end_column: 35 + end_line: 341 + file: 2 + start_column: 26 + start_line: 341 + } + } + } + df { + sp_dataframe_ref { + id { + bitfield1: 1 + } + } + } + src { + end_column: 36 + end_line: 341 + file: 2 + start_column: 16 + start_line: 341 + } + variadic: true + } + } + symbol { + value: "df313" + } + uid: 158 + var_id { + bitfield1: 158 + } + } +} +body { + assign { + expr { + sp_dataframe_select__columns { + cols { + apply_expr { + fn { + builtin_fn { + name { + name { + sp_name_flat { + name: "instr" + } + } + } + } + } + pos_args { + string_val { + src { + end_column: 48 + end_line: 343 + file: 2 + start_column: 26 + start_line: 343 + } + v: "A" + } + } + pos_args { + string_val { + src { + end_column: 48 + end_line: 343 + file: 2 + start_column: 26 + start_line: 343 + } + v: "test_str" + } + } + src { + end_column: 48 + end_line: 343 + file: 2 + start_column: 26 + start_line: 343 + } + } + } + df { + sp_dataframe_ref { + id { + bitfield1: 1 + } + } + } + src { + end_column: 49 + end_line: 343 + file: 2 + start_column: 16 + start_line: 343 + } + variadic: true + } + } + symbol { + value: "df314" + } + uid: 159 + var_id { + bitfield1: 159 + } + } +} client_ast_version: 1 client_language { python_language {