-
Notifications
You must be signed in to change notification settings - Fork 177
/
Copy pathrust-parse-impl.h
14500 lines (12481 loc) · 424 KB
/
rust-parse-impl.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright (C) 2020-2024 Free Software Foundation, Inc.
// This file is part of GCC.
// GCC is free software; you can redistribute it and/or modify it under
// the terms of the GNU General Public License as published by the Free
// Software Foundation; either version 3, or (at your option) any later
// version.
// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// for more details.
// You should have received a copy of the GNU General Public License
// along with GCC; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
/* Template implementation for Rust::Parser. Previously in rust-parse.cc (before
* Parser was template). Separated from rust-parse.h for readability. */
/* DO NOT INCLUDE ANYWHERE - this is automatically included with rust-parse.h
* This is also the reason why there are no include guards. */
#include "rust-common.h"
#include "rust-expr.h"
#include "rust-item.h"
#include "rust-common.h"
#include "rust-token.h"
#define INCLUDE_ALGORITHM
#include "rust-diagnostics.h"
#include "rust-dir-owner.h"
#include "rust-attribute-values.h"
#include "rust-keyword-values.h"
#include "rust-edition.h"
#include "optional.h"
namespace Rust {
// Left binding powers of operations.
enum binding_powers
{
// Highest priority
LBP_HIGHEST = 100,
LBP_PATH = 95,
LBP_METHOD_CALL = 90,
LBP_FIELD_EXPR = 85,
LBP_FUNCTION_CALL = 80,
LBP_ARRAY_REF = LBP_FUNCTION_CALL,
LBP_QUESTION_MARK = 75, // unary postfix - counts as left
LBP_UNARY_PLUS = 70, // Used only when the null denotation is +
LBP_UNARY_MINUS = LBP_UNARY_PLUS, // Used only when the null denotation is -
LBP_UNARY_ASTERISK = LBP_UNARY_PLUS, // deref operator - unary prefix
LBP_UNARY_EXCLAM = LBP_UNARY_PLUS,
LBP_UNARY_AMP = LBP_UNARY_PLUS,
LBP_UNARY_AMP_MUT = LBP_UNARY_PLUS,
LBP_AS = 65,
LBP_MUL = 60,
LBP_DIV = LBP_MUL,
LBP_MOD = LBP_MUL,
LBP_PLUS = 55,
LBP_MINUS = LBP_PLUS,
LBP_L_SHIFT = 50,
LBP_R_SHIFT = LBP_L_SHIFT,
LBP_AMP = 45,
LBP_CARET = 40,
LBP_PIPE = 35,
LBP_EQUAL = 30,
LBP_NOT_EQUAL = LBP_EQUAL,
LBP_SMALLER_THAN = LBP_EQUAL,
LBP_SMALLER_EQUAL = LBP_EQUAL,
LBP_GREATER_THAN = LBP_EQUAL,
LBP_GREATER_EQUAL = LBP_EQUAL,
LBP_LOGICAL_AND = 25,
LBP_LOGICAL_OR = 20,
LBP_DOT_DOT = 15,
LBP_DOT_DOT_EQ = LBP_DOT_DOT,
// TODO: note all these assig operators are RIGHT associative!
LBP_ASSIG = 10,
LBP_PLUS_ASSIG = LBP_ASSIG,
LBP_MINUS_ASSIG = LBP_ASSIG,
LBP_MULT_ASSIG = LBP_ASSIG,
LBP_DIV_ASSIG = LBP_ASSIG,
LBP_MOD_ASSIG = LBP_ASSIG,
LBP_AMP_ASSIG = LBP_ASSIG,
LBP_PIPE_ASSIG = LBP_ASSIG,
LBP_CARET_ASSIG = LBP_ASSIG,
LBP_L_SHIFT_ASSIG = LBP_ASSIG,
LBP_R_SHIFT_ASSIG = LBP_ASSIG,
// return, break, and closures as lowest priority?
LBP_RETURN = 5,
LBP_BREAK = LBP_RETURN,
LBP_CLOSURE = LBP_RETURN, // unary prefix operators
#if 0
// rust precedences
// used for closures
PREC_CLOSURE = -40,
// used for break, continue, return, and yield
PREC_JUMP = -30,
// used for range (although weird comment in rustc about this)
PREC_RANGE = -10,
// used for binary operators mentioned below - also cast, colon (type),
// assign, assign_op
PREC_BINOP = FROM_ASSOC_OP,
// used for box, address_of, let, unary (again, weird comment on let)
PREC_PREFIX = 50,
// used for await, call, method call, field, index, try,
// inline asm, macro invocation
PREC_POSTFIX = 60,
// used for array, repeat, tuple, literal, path, paren, if,
// while, for, 'loop', match, block, try block, async, struct
PREC_PAREN = 99,
PREC_FORCE_PAREN = 100,
#endif
// lowest priority
LBP_LOWEST = 0
};
/* Returns whether the token can start a type (i.e. there is a valid type
* beginning with the token). */
inline bool
can_tok_start_type (TokenId id)
{
switch (id)
{
case EXCLAM:
case LEFT_SQUARE:
case LEFT_ANGLE:
case UNDERSCORE:
case ASTERISK:
case AMP:
case LIFETIME:
case IDENTIFIER:
case SUPER:
case SELF:
case SELF_ALIAS:
case CRATE:
case DOLLAR_SIGN:
case SCOPE_RESOLUTION:
case LEFT_PAREN:
case FOR:
case ASYNC:
case CONST:
case UNSAFE:
case EXTERN_KW:
case FN_KW:
case IMPL:
case DYN:
case QUESTION_MARK:
return true;
default:
return false;
}
}
/* Returns whether the token id is (or is likely to be) a right angle bracket.
* i.e. '>', '>>', '>=' and '>>=' tokens. */
inline bool
is_right_angle_tok (TokenId id)
{
switch (id)
{
case RIGHT_ANGLE:
case RIGHT_SHIFT:
case GREATER_OR_EQUAL:
case RIGHT_SHIFT_EQ:
return true;
default:
return false;
}
}
/* HACK-y special handling for skipping a right angle token at the end of
* generic arguments.
* Currently, this replaces the "current token" with one that is identical
* except has the leading '>' removed (e.g. '>>' becomes '>'). This is bad
* for several reasons - it modifies the token stream to something that
* actually doesn't make syntactic sense, it may not worked if the token
* has already been skipped, etc. It was done because it would not
* actually require inserting new items into the token stream (which I
* thought would take more work to not mess up) and because I wasn't sure
* if the "already seen right angle" flag in the parser would work
* correctly.
* Those two other approaches listed are in my opinion actually better
* long-term - insertion is probably best as it reflects syntactically
* what occurs. On the other hand, I need to do a code audit to make sure
* that insertion doesn't mess anything up. So that's a FIXME. */
template <typename ManagedTokenSource>
bool
Parser<ManagedTokenSource>::skip_generics_right_angle ()
{
/* OK, new great idea. Have a lexer method called
* "split_current_token(TokenType newLeft, TokenType newRight)", which is
* called here with whatever arguments are appropriate. That lexer method
* handles "replacing" the current token with the "newLeft" and "inserting"
* the next token with the "newRight" (and creating a location, etc. for it)
*/
/* HACK: special handling for right shift '>>', greater or equal '>=', and
* right shift assig */
// '>>='
const_TokenPtr tok = lexer.peek_token ();
switch (tok->get_id ())
{
case RIGHT_ANGLE:
// this is good - skip token
lexer.skip_token ();
return true;
case RIGHT_SHIFT: {
// new implementation that should be better
lexer.split_current_token (RIGHT_ANGLE, RIGHT_ANGLE);
lexer.skip_token ();
return true;
}
case GREATER_OR_EQUAL: {
// new implementation that should be better
lexer.split_current_token (RIGHT_ANGLE, EQUAL);
lexer.skip_token ();
return true;
}
case RIGHT_SHIFT_EQ: {
// new implementation that should be better
lexer.split_current_token (RIGHT_ANGLE, GREATER_OR_EQUAL);
lexer.skip_token ();
return true;
}
default:
add_error (Error (tok->get_locus (),
"expected %<>%> at end of generic argument - found %qs",
tok->get_token_description ()));
return false;
}
}
/* Gets left binding power for specified token.
* Not suitable for use at the moment or possibly ever because binding power
* cannot be purely determined from operator token with Rust grammar - e.g.
* method call and field access have
* different left binding powers but the same operator token. */
template <typename ManagedTokenSource>
int
Parser<ManagedTokenSource>::left_binding_power (const_TokenPtr token)
{
// HACK: called with "peek_token()", so lookahead is "peek_token(1)"
switch (token->get_id ())
{
/* TODO: issue here - distinguish between method calls and field access
* somehow? Also would have to distinguish between paths and function
* calls (:: operator), maybe more stuff. */
/* Current plan for tackling LBP - don't do it based on token, use
* lookahead. Or alternatively, only use Pratt parsing for OperatorExpr
* and handle other expressions without it. rustc only considers
* arithmetic, logical/relational, 'as',
* '?=', ranges, colons, and assignment to have operator precedence and
* associativity rules applicable. It then has
* a separate "ExprPrecedence" that also includes binary operators. */
// TODO: handle operator overloading - have a function replace the
// operator?
/*case DOT:
return LBP_DOT;*/
case SCOPE_RESOLUTION:
rust_debug (
"possible error - looked up LBP of scope resolution operator. should "
"be handled elsewhere.");
return LBP_PATH;
/* Resolved by lookahead HACK that should work with current code. If next
* token is identifier and token after that isn't parenthesised expression
* list, it is a field reference. */
case DOT:
if (lexer.peek_token (1)->get_id () == IDENTIFIER
&& lexer.peek_token (2)->get_id () != LEFT_PAREN)
{
return LBP_FIELD_EXPR;
}
return LBP_METHOD_CALL;
case LEFT_PAREN:
return LBP_FUNCTION_CALL;
case LEFT_SQUARE:
return LBP_ARRAY_REF;
// postfix question mark (i.e. error propagation expression)
case QUESTION_MARK:
return LBP_QUESTION_MARK;
case AS:
return LBP_AS;
case ASTERISK:
return LBP_MUL;
case DIV:
return LBP_DIV;
case PERCENT:
return LBP_MOD;
case PLUS:
return LBP_PLUS;
case MINUS:
return LBP_MINUS;
case LEFT_SHIFT:
return LBP_L_SHIFT;
case RIGHT_SHIFT:
return LBP_R_SHIFT;
// binary & operator
case AMP:
return LBP_AMP;
// binary ^ operator
case CARET:
return LBP_CARET;
// binary | operator
case PIPE:
return LBP_PIPE;
case EQUAL_EQUAL:
return LBP_EQUAL;
case NOT_EQUAL:
return LBP_NOT_EQUAL;
case RIGHT_ANGLE:
return LBP_GREATER_THAN;
case GREATER_OR_EQUAL:
return LBP_GREATER_EQUAL;
case LEFT_ANGLE:
return LBP_SMALLER_THAN;
case LESS_OR_EQUAL:
return LBP_SMALLER_EQUAL;
case LOGICAL_AND:
return LBP_LOGICAL_AND;
case OR:
return LBP_LOGICAL_OR;
case DOT_DOT:
return LBP_DOT_DOT;
case DOT_DOT_EQ:
return LBP_DOT_DOT_EQ;
case EQUAL:
return LBP_ASSIG;
case PLUS_EQ:
return LBP_PLUS_ASSIG;
case MINUS_EQ:
return LBP_MINUS_ASSIG;
case ASTERISK_EQ:
return LBP_MULT_ASSIG;
case DIV_EQ:
return LBP_DIV_ASSIG;
case PERCENT_EQ:
return LBP_MOD_ASSIG;
case AMP_EQ:
return LBP_AMP_ASSIG;
case PIPE_EQ:
return LBP_PIPE_ASSIG;
case CARET_EQ:
return LBP_CARET_ASSIG;
case LEFT_SHIFT_EQ:
return LBP_L_SHIFT_ASSIG;
case RIGHT_SHIFT_EQ:
return LBP_R_SHIFT_ASSIG;
/* HACK: float literal due to lexer misidentifying a dot then an integer as
* a float */
case FLOAT_LITERAL:
return LBP_FIELD_EXPR;
// field expr is same as tuple expr in precedence, i imagine
// TODO: is this needed anymore? lexer shouldn't do that anymore
// anything that can't appear in an infix position is given lowest priority
default:
return LBP_LOWEST;
}
}
// Returns true when current token is EOF.
template <typename ManagedTokenSource>
bool
Parser<ManagedTokenSource>::done_end_of_file ()
{
return lexer.peek_token ()->get_id () == END_OF_FILE;
}
// Parses a sequence of items within a module or the implicit top-level module
// in a crate
template <typename ManagedTokenSource>
std::vector<std::unique_ptr<AST::Item>>
Parser<ManagedTokenSource>::parse_items ()
{
std::vector<std::unique_ptr<AST::Item>> items;
const_TokenPtr t = lexer.peek_token ();
while (t->get_id () != END_OF_FILE)
{
std::unique_ptr<AST::Item> item = parse_item (false);
if (item == nullptr)
{
Error error (lexer.peek_token ()->get_locus (),
"failed to parse item in crate");
add_error (std::move (error));
// TODO: should all items be cleared?
items = std::vector<std::unique_ptr<AST::Item>> ();
break;
}
items.push_back (std::move (item));
t = lexer.peek_token ();
}
return items;
}
// Parses a crate (compilation unit) - entry point
template <typename ManagedTokenSource>
std::unique_ptr<AST::Crate>
Parser<ManagedTokenSource>::parse_crate ()
{
// parse inner attributes
AST::AttrVec inner_attrs = parse_inner_attributes ();
// parse items
std::vector<std::unique_ptr<AST::Item>> items = parse_items ();
// emit all errors
for (const auto &error : error_table)
error.emit ();
return std::unique_ptr<AST::Crate> (
new AST::Crate (std::move (items), std::move (inner_attrs)));
}
// Parse a contiguous block of inner attributes.
template <typename ManagedTokenSource>
AST::AttrVec
Parser<ManagedTokenSource>::parse_inner_attributes ()
{
AST::AttrVec inner_attributes;
// only try to parse it if it starts with "#!" not only "#"
while ((lexer.peek_token ()->get_id () == HASH
&& lexer.peek_token (1)->get_id () == EXCLAM)
|| lexer.peek_token ()->get_id () == INNER_DOC_COMMENT)
{
AST::Attribute inner_attr = parse_inner_attribute ();
/* Ensure only valid inner attributes are added to the inner_attributes
* list */
if (!inner_attr.is_empty ())
{
inner_attributes.push_back (std::move (inner_attr));
}
else
{
/* If no more valid inner attributes, break out of loop (only
* contiguous inner attributes parsed). */
break;
}
}
inner_attributes.shrink_to_fit ();
return inner_attributes;
}
// Parse a inner or outer doc comment into an doc attribute
template <typename ManagedTokenSource>
std::tuple<AST::SimplePath, std::unique_ptr<AST::AttrInput>, location_t>
Parser<ManagedTokenSource>::parse_doc_comment ()
{
const_TokenPtr token = lexer.peek_token ();
location_t locus = token->get_locus ();
AST::SimplePathSegment segment (Values::Attributes::DOC, locus);
std::vector<AST::SimplePathSegment> segments;
segments.push_back (std::move (segment));
AST::SimplePath attr_path (std::move (segments), false, locus);
AST::LiteralExpr lit_expr (token->get_str (), AST::Literal::STRING,
PrimitiveCoreType::CORETYPE_STR, {}, locus);
std::unique_ptr<AST::AttrInput> attr_input (
new AST::AttrInputLiteral (std::move (lit_expr)));
lexer.skip_token ();
return std::make_tuple (std::move (attr_path), std::move (attr_input), locus);
}
// Parse a single inner attribute.
template <typename ManagedTokenSource>
AST::Attribute
Parser<ManagedTokenSource>::parse_inner_attribute ()
{
if (lexer.peek_token ()->get_id () == INNER_DOC_COMMENT)
{
auto values = parse_doc_comment ();
auto path = std::move (std::get<0> (values));
auto input = std::move (std::get<1> (values));
auto loc = std::get<2> (values);
return AST::Attribute (std::move (path), std::move (input), loc, true);
}
if (lexer.peek_token ()->get_id () != HASH)
{
Error error (lexer.peek_token ()->get_locus (),
"BUG: token %<#%> is missing, but %<parse_inner_attribute%> "
"was invoked");
add_error (std::move (error));
return AST::Attribute::create_empty ();
}
lexer.skip_token ();
if (lexer.peek_token ()->get_id () != EXCLAM)
{
Error error (lexer.peek_token ()->get_locus (),
"expected %<!%> or %<[%> for inner attribute");
add_error (std::move (error));
return AST::Attribute::create_empty ();
}
lexer.skip_token ();
if (!skip_token (LEFT_SQUARE))
return AST::Attribute::create_empty ();
auto values = parse_attribute_body ();
auto path = std::move (std::get<0> (values));
auto input = std::move (std::get<1> (values));
auto loc = std::get<2> (values);
auto actual_attribute
= AST::Attribute (std::move (path), std::move (input), loc, true);
if (!skip_token (RIGHT_SQUARE))
return AST::Attribute::create_empty ();
return actual_attribute;
}
// Parses the body of an attribute (inner or outer).
template <typename ManagedTokenSource>
std::tuple<AST::SimplePath, std::unique_ptr<AST::AttrInput>, location_t>
Parser<ManagedTokenSource>::parse_attribute_body ()
{
location_t locus = lexer.peek_token ()->get_locus ();
AST::SimplePath attr_path = parse_simple_path ();
// ensure path is valid to parse attribute input
if (attr_path.is_empty ())
{
Error error (lexer.peek_token ()->get_locus (),
"empty simple path in attribute");
add_error (std::move (error));
// Skip past potential further info in attribute (i.e. attr_input)
skip_after_end_attribute ();
return std::make_tuple (std::move (attr_path), nullptr, UNDEF_LOCATION);
}
std::unique_ptr<AST::AttrInput> attr_input = parse_attr_input ();
// AttrInput is allowed to be null, so no checks here
return std::make_tuple (std::move (attr_path), std::move (attr_input), locus);
}
/* Determines whether token is a valid simple path segment. This does not
* include scope resolution operators. */
inline bool
is_simple_path_segment (TokenId id)
{
switch (id)
{
case IDENTIFIER:
case SUPER:
case SELF:
case CRATE:
return true;
case DOLLAR_SIGN:
// assume that dollar sign leads to $crate
return true;
default:
return false;
}
}
// Parses a SimplePath AST node, if it exists. Does nothing otherwise.
template <typename ManagedTokenSource>
AST::SimplePath
Parser<ManagedTokenSource>::parse_simple_path ()
{
bool has_opening_scope_resolution = false;
location_t locus = UNKNOWN_LOCATION;
// don't parse anything if not a path upfront
if (!is_simple_path_segment (lexer.peek_token ()->get_id ())
&& !is_simple_path_segment (lexer.peek_token (1)->get_id ()))
return AST::SimplePath::create_empty ();
/* Checks for opening scope resolution (i.e. global scope fully-qualified
* path) */
if (lexer.peek_token ()->get_id () == SCOPE_RESOLUTION)
{
has_opening_scope_resolution = true;
locus = lexer.peek_token ()->get_locus ();
lexer.skip_token ();
}
// Parse single required simple path segment
AST::SimplePathSegment segment = parse_simple_path_segment ();
// get location if not gotten already
if (locus == UNKNOWN_LOCATION)
locus = segment.get_locus ();
std::vector<AST::SimplePathSegment> segments;
// Return empty vector if first, actually required segment is an error
if (segment.is_error ())
return AST::SimplePath::create_empty ();
segments.push_back (std::move (segment));
// Parse all other simple path segments
while (lexer.peek_token ()->get_id () == SCOPE_RESOLUTION)
{
// Skip scope resolution operator
lexer.skip_token ();
AST::SimplePathSegment new_segment = parse_simple_path_segment ();
// Return path as currently constructed if segment in error state.
if (new_segment.is_error ())
break;
segments.push_back (std::move (new_segment));
}
// DEBUG: check for any empty segments
for (const auto &seg : segments)
{
if (seg.is_error ())
{
rust_debug (
"when parsing simple path, somehow empty path segment was "
"not filtered out. Path begins with '%s'",
segments.at (0).as_string ().c_str ());
}
}
return AST::SimplePath (std::move (segments), has_opening_scope_resolution,
locus);
/* TODO: now that is_simple_path_segment exists, could probably start
* actually making errors upon parse failure of segments and whatever */
}
/* Parses a single SimplePathSegment (does not handle the scope resolution
* operators) */
template <typename ManagedTokenSource>
AST::SimplePathSegment
Parser<ManagedTokenSource>::parse_simple_path_segment ()
{
using namespace Values;
const_TokenPtr t = lexer.peek_token ();
switch (t->get_id ())
{
case IDENTIFIER:
lexer.skip_token ();
return AST::SimplePathSegment (t->get_str (), t->get_locus ());
case SUPER:
lexer.skip_token ();
return AST::SimplePathSegment (Keywords::SUPER, t->get_locus ());
case SELF:
lexer.skip_token ();
return AST::SimplePathSegment (Keywords::SELF, t->get_locus ());
case CRATE:
lexer.skip_token ();
return AST::SimplePathSegment (Keywords::CRATE, t->get_locus ());
case DOLLAR_SIGN:
if (lexer.peek_token (1)->get_id () == CRATE)
{
lexer.skip_token (1);
return AST::SimplePathSegment ("$crate", t->get_locus ());
}
gcc_fallthrough ();
default:
// do nothing but inactivates warning from gcc when compiling
/* could put the rust_error_at thing here but fallthrough (from failing
* $crate condition) isn't completely obvious if it is. */
// test prevent error
return AST::SimplePathSegment::create_error ();
}
rust_unreachable ();
/*rust_error_at(
t->get_locus(), "invalid token '%s' in simple path segment",
t->get_token_description());*/
// this is not necessarily an error, e.g. end of path
// return AST::SimplePathSegment::create_error();
}
// Parses a PathIdentSegment - an identifier segment of a non-SimplePath path.
template <typename ManagedTokenSource>
AST::PathIdentSegment
Parser<ManagedTokenSource>::parse_path_ident_segment ()
{
const_TokenPtr t = lexer.peek_token ();
switch (t->get_id ())
{
case IDENTIFIER:
lexer.skip_token ();
return AST::PathIdentSegment (t->get_str (), t->get_locus ());
case SUPER:
lexer.skip_token ();
return AST::PathIdentSegment (Values::Keywords::SUPER, t->get_locus ());
case SELF:
lexer.skip_token ();
return AST::PathIdentSegment (Values::Keywords::SELF, t->get_locus ());
case SELF_ALIAS:
lexer.skip_token ();
return AST::PathIdentSegment (Values::Keywords::SELF_ALIAS,
t->get_locus ());
case CRATE:
lexer.skip_token ();
return AST::PathIdentSegment (Values::Keywords::CRATE, t->get_locus ());
case DOLLAR_SIGN:
if (lexer.peek_token (1)->get_id () == CRATE)
{
lexer.skip_token (1);
return AST::PathIdentSegment ("$crate", t->get_locus ());
}
gcc_fallthrough ();
default:
/* do nothing but inactivates warning from gcc when compiling
* could put the error_at thing here but fallthrough (from failing $crate
* condition) isn't completely obvious if it is. */
// test prevent error
return AST::PathIdentSegment::create_error ();
}
rust_unreachable ();
// not necessarily an error
}
// Parses an AttrInput AST node (polymorphic, as AttrInput is abstract)
template <typename ManagedTokenSource>
std::unique_ptr<AST::AttrInput>
Parser<ManagedTokenSource>::parse_attr_input ()
{
const_TokenPtr t = lexer.peek_token ();
switch (t->get_id ())
{
case LEFT_PAREN:
case LEFT_SQUARE:
case LEFT_CURLY: {
// must be a delimited token tree, so parse that
std::unique_ptr<AST::AttrInput> input_tree (
new AST::DelimTokenTree (parse_delim_token_tree ()));
// TODO: potential checks on DelimTokenTree before returning
return input_tree;
}
case EQUAL: {
// = LiteralExpr
lexer.skip_token ();
t = lexer.peek_token ();
// attempt to parse macro
// TODO: macros may/may not be allowed in attributes
// this is needed for "#[doc = include_str!(...)]"
if (is_simple_path_segment (t->get_id ()))
{
std::unique_ptr<AST::MacroInvocation> invoke
= parse_macro_invocation ({});
if (!invoke)
return nullptr;
return std::unique_ptr<AST::AttrInput> (
new AST::AttrInputMacro (std::move (invoke)));
}
/* Ensure token is a "literal expression" (literally only a literal
* token of any type) */
if (!t->is_literal ())
{
Error error (
t->get_locus (),
"unknown token %qs in attribute body - literal expected",
t->get_token_description ());
add_error (std::move (error));
skip_after_end_attribute ();
return nullptr;
}
AST::Literal::LitType lit_type = AST::Literal::STRING;
// Crappy mapping of token type to literal type
switch (t->get_id ())
{
case INT_LITERAL:
lit_type = AST::Literal::INT;
break;
case FLOAT_LITERAL:
lit_type = AST::Literal::FLOAT;
break;
case CHAR_LITERAL:
lit_type = AST::Literal::CHAR;
break;
case BYTE_CHAR_LITERAL:
lit_type = AST::Literal::BYTE;
break;
case BYTE_STRING_LITERAL:
lit_type = AST::Literal::BYTE_STRING;
break;
case RAW_STRING_LITERAL:
lit_type = AST::Literal::RAW_STRING;
break;
case STRING_LITERAL:
default:
lit_type = AST::Literal::STRING;
break; // TODO: raw string? don't eliminate it from lexer?
}
// create actual LiteralExpr
AST::LiteralExpr lit_expr (t->get_str (), lit_type, t->get_type_hint (),
{}, t->get_locus ());
lexer.skip_token ();
std::unique_ptr<AST::AttrInput> attr_input_lit (
new AST::AttrInputLiteral (std::move (lit_expr)));
// do checks or whatever? none required, really
// FIXME: shouldn't a skip token be required here?
return attr_input_lit;
}
break;
case RIGHT_SQUARE:
// means AttrInput is missing, which is allowed
return nullptr;
default:
add_error (
Error (t->get_locus (),
"unknown token %qs in attribute body - attribute input or "
"none expected",
t->get_token_description ()));
skip_after_end_attribute ();
return nullptr;
}
rust_unreachable ();
// TODO: find out how to stop gcc error on "no return value"
}
/* Returns true if the token id matches the delimiter type. Note that this only
* operates for END delimiter tokens. */
inline bool
token_id_matches_delims (TokenId token_id, AST::DelimType delim_type)
{
return ((token_id == RIGHT_PAREN && delim_type == AST::PARENS)
|| (token_id == RIGHT_SQUARE && delim_type == AST::SQUARE)
|| (token_id == RIGHT_CURLY && delim_type == AST::CURLY));
}
/* Returns true if the likely result of parsing the next few tokens is a path.
* Not guaranteed, though, especially in the case of syntax errors. */
inline bool
is_likely_path_next (TokenId next_token_id)
{
switch (next_token_id)
{
case IDENTIFIER:
case SUPER:
case SELF:
case SELF_ALIAS:
case CRATE:
// maybe - maybe do extra check. But then requires another TokenId.
case DOLLAR_SIGN:
case SCOPE_RESOLUTION:
return true;
default:
return false;
}
}
// Parses a delimited token tree
template <typename ManagedTokenSource>
AST::DelimTokenTree
Parser<ManagedTokenSource>::parse_delim_token_tree ()
{
const_TokenPtr t = lexer.peek_token ();
lexer.skip_token ();
location_t initial_loc = t->get_locus ();
// save delim type to ensure it is reused later
AST::DelimType delim_type = AST::PARENS;
// Map tokens to DelimType
switch (t->get_id ())
{
case LEFT_PAREN:
delim_type = AST::PARENS;
break;
case LEFT_SQUARE:
delim_type = AST::SQUARE;
break;
case LEFT_CURLY:
delim_type = AST::CURLY;
break;
default:
add_error (Error (t->get_locus (),
"unexpected token %qs - expecting delimiters (for a "
"delimited token tree)",
t->get_token_description ()));
return AST::DelimTokenTree::create_empty ();
}
// parse actual token tree vector - 0 or more
std::vector<std::unique_ptr<AST::TokenTree>> token_trees_in_tree;
auto delim_open
= std::unique_ptr<AST::Token> (new AST::Token (std::move (t)));
token_trees_in_tree.push_back (std::move (delim_open));
// repeat loop until finding the matching delimiter
t = lexer.peek_token ();
while (!token_id_matches_delims (t->get_id (), delim_type)
&& t->get_id () != END_OF_FILE)
{
std::unique_ptr<AST::TokenTree> tok_tree = parse_token_tree ();
if (tok_tree == nullptr)
{
// TODO: is this error handling appropriate?
Error error (
t->get_locus (),
"failed to parse token tree in delimited token tree - found %qs",
t->get_token_description ());
add_error (std::move (error));
return AST::DelimTokenTree::create_empty ();
}
token_trees_in_tree.push_back (std::move (tok_tree));
// lexer.skip_token();
t = lexer.peek_token ();
}
auto delim_close
= std::unique_ptr<AST::Token> (new AST::Token (std::move (t)));
token_trees_in_tree.push_back (std::move (delim_close));
AST::DelimTokenTree token_tree (delim_type, std::move (token_trees_in_tree),
initial_loc);
// parse end delimiters
t = lexer.peek_token ();