@@ -295,9 +295,10 @@ def maximum_line_length(physical_line, max_line_length, multiline,
295
295
# comments, but still report the error when the 72 first chars
296
296
# are whitespaces.
297
297
chunks = line .split ()
298
- if ((len (chunks ) == 1 and multiline ) or
299
- (len (chunks ) == 2 and chunks [0 ] == '#' )) and \
300
- len (line ) - len (chunks [- 1 ]) < max_line_length - 7 :
298
+ len_chunks = len (chunks )
299
+ if ((len_chunks == 1 and multiline ) or
300
+ (len_chunks == 2 and chunks [0 ] == '#' )) and \
301
+ length - len (chunks [- 1 ]) < max_line_length - 7 :
301
302
return
302
303
if length > max_line_length :
303
304
return (max_line_length , "E501 line too long "
@@ -406,8 +407,9 @@ def blank_lines(logical_line, blank_lines, indent_level, line_number,
406
407
# Search backwards for a def ancestor or tree root
407
408
# (top level).
408
409
for line in lines [line_number - top_level_lines ::- 1 ]:
409
- if line .strip () and expand_indent (line ) < ancestor_level :
410
- ancestor_level = expand_indent (line )
410
+ line_indents = expand_indent (line )
411
+ if line .strip () and line_indents < ancestor_level :
412
+ ancestor_level = line_indents
411
413
nested = STARTSWITH_DEF_REGEX .match (line .lstrip ())
412
414
if nested or ancestor_level == 0 :
413
415
break
@@ -786,7 +788,7 @@ def whitespace_before_parameters(logical_line, tokens):
786
788
E211: dict['key'] = list [index]
787
789
"""
788
790
prev_type , prev_text , __ , prev_end , __ = tokens [0 ]
789
- for index in range ( 1 , len ( tokens ) ):
791
+ for index , _ in enumerate ( tokens ):
790
792
token_type , text , start , end , __ = tokens [index ]
791
793
if (
792
794
token_type == tokenize .OP and
@@ -1160,6 +1162,14 @@ def imports_on_separate_lines(logical_line):
1160
1162
yield found , "E401 multiple imports on one line"
1161
1163
1162
1164
1165
+ def is_string_literal (line ):
1166
+ if line [0 ] in 'uUbB' :
1167
+ line = line [1 :]
1168
+ if line and line [0 ] in 'rR' :
1169
+ line = line [1 :]
1170
+ return line and (line [0 ] == '"' or line [0 ] == "'" )
1171
+
1172
+
1163
1173
@register_check
1164
1174
def module_imports_on_top_of_file (
1165
1175
logical_line , indent_level , checker_state , noqa ):
@@ -1178,12 +1188,6 @@ def module_imports_on_top_of_file(
1178
1188
1179
1189
Okay: if x:\n import os
1180
1190
""" # noqa
1181
- def is_string_literal (line ):
1182
- if line [0 ] in 'uUbB' :
1183
- line = line [1 :]
1184
- if line and line [0 ] in 'rR' :
1185
- line = line [1 :]
1186
- return line and (line [0 ] == '"' or line [0 ] == "'" )
1187
1191
1188
1192
allowed_keywords = (
1189
1193
'try' , 'except' , 'else' , 'finally' , 'with' , 'if' , 'elif' )
@@ -1576,7 +1580,8 @@ def ambiguous_identifier(logical_line, tokens):
1576
1580
brace_depth = 0
1577
1581
idents_to_avoid = ('l' , 'O' , 'I' )
1578
1582
prev_type , prev_text , prev_start , prev_end , __ = tokens [0 ]
1579
- for index in range (1 , len (tokens )):
1583
+ len_tokens = len (tokens )
1584
+ for index in range (1 , len_tokens ):
1580
1585
token_type , text , start , end , line = tokens [index ]
1581
1586
ident = pos = None
1582
1587
# find function definitions
@@ -1601,32 +1606,55 @@ def ambiguous_identifier(logical_line, tokens):
1601
1606
pos = prev_start
1602
1607
# identifiers bound to values with 'as', 'for',
1603
1608
# 'global', or 'nonlocal'
1604
- if prev_text in ('as' , 'for' , 'global' , 'nonlocal' ):
1605
- if text in idents_to_avoid :
1606
- ident = text
1607
- pos = start
1609
+ if prev_text in ('as' , 'for' , 'global' , 'nonlocal' ) and \
1610
+ text in idents_to_avoid :
1611
+ ident = text
1612
+ pos = start
1608
1613
# function / lambda parameter definitions
1609
1614
if (
1610
1615
func_depth is not None and
1611
1616
not seen_colon and
1612
- index < len ( tokens ) - 1 and tokens [index + 1 ][1 ] in ':,=)' and
1617
+ index < len_tokens - 1 and tokens [index + 1 ][1 ] in ':,=)' and
1613
1618
prev_text in {'lambda' , ',' , '*' , '**' , '(' } and
1614
1619
text in idents_to_avoid
1615
1620
):
1616
1621
ident = text
1617
1622
pos = start
1618
- if prev_text == 'class' :
1619
- if text in idents_to_avoid :
1620
- yield start , "E742 ambiguous class definition '%s'" % text
1621
- if prev_text == 'def' :
1622
- if text in idents_to_avoid :
1623
- yield start , "E743 ambiguous function definition '%s'" % text
1623
+ if prev_text == 'class' and \
1624
+ text in idents_to_avoid :
1625
+ yield start , "E742 ambiguous class definition '%s'" % text
1626
+ if prev_text == 'def' and \
1627
+ text in idents_to_avoid :
1628
+ yield start , "E743 ambiguous function definition '%s'" % text
1624
1629
if ident :
1625
1630
yield pos , "E741 ambiguous variable name '%s'" % ident
1626
1631
prev_text = text
1627
1632
prev_start = start
1628
1633
1629
1634
1635
+ # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
1636
+ python_3000_valid = frozenset ([
1637
+ '\n ' ,
1638
+ '\\ ' ,
1639
+ '\' ' ,
1640
+ '"' ,
1641
+ 'a' ,
1642
+ 'b' ,
1643
+ 'f' ,
1644
+ 'n' ,
1645
+ 'r' ,
1646
+ 't' ,
1647
+ 'v' ,
1648
+ '0' , '1' , '2' , '3' , '4' , '5' , '6' , '7' ,
1649
+ 'x' ,
1650
+
1651
+ # Escape sequences only recognized in string literals
1652
+ 'N' ,
1653
+ 'u' ,
1654
+ 'U' ,
1655
+ ])
1656
+
1657
+
1630
1658
@register_check
1631
1659
def python_3000_invalid_escape_sequence (logical_line , tokens , noqa ):
1632
1660
r"""Invalid escape sequences are deprecated in Python 3.6.
@@ -1637,27 +1665,7 @@ def python_3000_invalid_escape_sequence(logical_line, tokens, noqa):
1637
1665
if noqa :
1638
1666
return
1639
1667
1640
- # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
1641
- valid = [
1642
- '\n ' ,
1643
- '\\ ' ,
1644
- '\' ' ,
1645
- '"' ,
1646
- 'a' ,
1647
- 'b' ,
1648
- 'f' ,
1649
- 'n' ,
1650
- 'r' ,
1651
- 't' ,
1652
- 'v' ,
1653
- '0' , '1' , '2' , '3' , '4' , '5' , '6' , '7' ,
1654
- 'x' ,
1655
-
1656
- # Escape sequences only recognized in string literals
1657
- 'N' ,
1658
- 'u' ,
1659
- 'U' ,
1660
- ]
1668
+ valid = python_3000_valid
1661
1669
1662
1670
prefixes = []
1663
1671
for token_type , text , start , _ , _ in tokens :
@@ -1701,11 +1709,13 @@ def maximum_doc_length(logical_line, max_doc_length, noqa, tokens):
1701
1709
return
1702
1710
1703
1711
prev_token = None
1704
- skip_lines = set ()
1712
+ lines_to_skip = SKIP_COMMENTS .union ([tokenize .STRING ])
1713
+ skip_lines = False
1705
1714
# Skip lines that
1706
1715
for token_type , text , start , end , line in tokens :
1707
- if token_type not in SKIP_COMMENTS .union ([tokenize .STRING ]):
1708
- skip_lines .add (line )
1716
+ if token_type not in lines_to_skip :
1717
+ skip_lines = True
1718
+ break
1709
1719
1710
1720
for token_type , text , start , end , line in tokens :
1711
1721
# Skip lines that aren't pure strings
@@ -1715,19 +1725,22 @@ def maximum_doc_length(logical_line, max_doc_length, noqa, tokens):
1715
1725
# Only check comment-only lines
1716
1726
if prev_token is None or prev_token in SKIP_TOKENS :
1717
1727
lines = line .splitlines ()
1728
+ lines_len = len (lines )
1718
1729
for line_num , physical_line in enumerate (lines ):
1719
1730
if start [0 ] + line_num == 1 and line .startswith ('#!' ):
1720
1731
return
1721
1732
length = len (physical_line )
1722
1733
chunks = physical_line .split ()
1723
- if token_type == tokenize .COMMENT :
1724
- if (len (chunks ) == 2 and
1725
- length - len (chunks [- 1 ]) < MAX_DOC_LENGTH ):
1726
- continue
1727
- if len (chunks ) == 1 and line_num + 1 < len (lines ):
1728
- if (len (chunks ) == 1 and
1729
- length - len (chunks [- 1 ]) < MAX_DOC_LENGTH ):
1730
- continue
1734
+ len_chunks = len (chunks )
1735
+ len_last_chunk = len (chunks [- 1 ]) if chunks else None
1736
+ if token_type == tokenize .COMMENT and \
1737
+ (len_chunks == 2 and
1738
+ length - len_last_chunk < MAX_DOC_LENGTH ):
1739
+ continue
1740
+ if len_chunks == 1 and line_num + 1 < lines_len and \
1741
+ (len_chunks == 1 and
1742
+ length - len_last_chunk < MAX_DOC_LENGTH ):
1743
+ continue
1731
1744
if length > max_doc_length :
1732
1745
doc_error = (start [0 ] + line_num , max_doc_length )
1733
1746
yield (doc_error , "W505 doc line too long "
@@ -2145,17 +2158,16 @@ def check_all(self, expected=None, line_offset=0):
2145
2158
parens += 1
2146
2159
elif text in '}])' :
2147
2160
parens -= 1
2148
- elif not parens :
2149
- if token_type in NEWLINE :
2150
- if token_type == tokenize .NEWLINE :
2151
- self .check_logical ()
2152
- self .blank_before = 0
2153
- elif len (self .tokens ) == 1 :
2154
- # The physical line contains only this token.
2155
- self .blank_lines += 1
2156
- del self .tokens [0 ]
2157
- else :
2158
- self .check_logical ()
2161
+ elif not parens and token_type in NEWLINE :
2162
+ if token_type == tokenize .NEWLINE :
2163
+ self .check_logical ()
2164
+ self .blank_before = 0
2165
+ elif len (self .tokens ) == 1 :
2166
+ # The physical line contains only this token.
2167
+ self .blank_lines += 1
2168
+ del self .tokens [0 ]
2169
+ else :
2170
+ self .check_logical ()
2159
2171
if self .tokens :
2160
2172
self .check_physical (self .lines [- 1 ])
2161
2173
self .check_logical ()
0 commit comments