11from typing_extensions import Generic , TypeVar
22
33import pydantic_core
4- import ast
5- import io
6- import tokenize
7- import inspect
84
95from typechat ._internal .model import PromptSection , TypeChatLanguageModel
106from typechat ._internal .result import Failure , Result , Success
@@ -123,99 +119,4 @@ def _create_repair_prompt(self, validation_error: str) -> str:
123119'''
124120The following is a revised JSON object:
125121"""
126- return prompt
127-
128- def _convert_pythonic_comments_to_annotated_docs (schema_class , debug = False ):
129-
130- def _extract_tokens_between_line_numbers (gen , start_lineno , end_lineno ):
131- # Extract tokens between start_lineno and end_lineno obtained from the tokenize generator
132- tokens = []
133- for tok in gen :
134- if tok .start [0 ] < start_lineno : # Skip tokens before start_lineno
135- continue
136- if tok .start [0 ] >= start_lineno and tok .end [0 ] <= end_lineno :
137- # Add token if it is within the range
138- tokens .append ((tok .type , tok .string ))
139- elif tok .start [0 ] > end_lineno : # Stop if token is beyond end_lineno
140- break
141-
142- return tokens
143-
144- schema_path = inspect .getfile (schema_class )
145-
146- with open (schema_path , 'r' ) as f :
147- schema_class_source = f .read ()
148- gen = tokenize .tokenize (io .BytesIO (
149- schema_class_source .encode ('utf-8' )).readline )
150-
151- tree = ast .parse (schema_class_source )
152-
153- if debug :
154- print ("Source code before transformation:" )
155- print ("--" * 50 )
156- print (schema_class_source )
157- print ("--" * 50 )
158-
159- has_comments = False # Flag later used to perform imports of Annotated and Doc if needed
160-
161- for node in tree .body :
162- if isinstance (node , ast .ClassDef ):
163- for n in node .body :
164- if isinstance (n , ast .AnnAssign ): # Check if the node is an annotated assignment
165- assgn_comment = None
166- tokens = _extract_tokens_between_line_numbers (
167- # Extract tokens between the line numbers of the annotated assignment
168- gen , n .lineno , n .end_lineno
169- )
170- for toknum , tokval in tokens :
171- if toknum == tokenize .COMMENT :
172- # Extract the comment
173- assgn_comment = tokval
174- break
175-
176- if assgn_comment :
177- # If a comment is found, transform the annotation to include the comment
178- assgn_subscript = n .annotation
179- has_comments = True
180- n .annotation = ast .Subscript (
181- value = ast .Name (id = "Annotated" , ctx = ast .Load ()),
182- slice = ast .Tuple (
183- elts = [
184- assgn_subscript ,
185- ast .Call (
186- func = ast .Name (
187- id = "Doc" , ctx = ast .Load ()
188- ),
189- args = [
190- ast .Constant (
191- value = assgn_comment .strip ("#" ).strip ()
192- )
193- ],
194- keywords = []
195- )
196- ],
197- ctx = ast .Load ()
198- ),
199- ctx = ast .Load ()
200- )
201-
202- if has_comments :
203- for node in tree .body :
204- if isinstance (node , ast .ImportFrom ):
205- if node .module == "typing_extensions" :
206- if ast .alias (name = "Annotated" ) not in node .names :
207- node .names .append (ast .alias (name = "Annotated" ))
208- if ast .alias (name = "Doc" ) not in node .names :
209- node .names .append (ast .alias (name = "Doc" ))
210-
211- transformed_schema_source = ast .unparse (tree )
212-
213- if debug :
214- print ("Source code after transformation:" )
215- print ("--" * 50 )
216- print (transformed_schema_source )
217- print ("--" * 50 )
218-
219- namespace = {}
220- exec (transformed_schema_source , namespace )
221- return namespace [schema_class .__name__ ]
122+ return prompt
0 commit comments