Coverage for /var/srv/projects/api.amasfac.comuna18.com/tmp/venv/lib/python3.9/site-packages/sqlparse/filters/others.py: 23%

83 statements  

« prev     ^ index     » next       coverage.py v6.4.4, created at 2023-07-17 14:22 -0600

1# 

2# Copyright (C) 2009-2020 the sqlparse authors and contributors 

3# <see AUTHORS file> 

4# 

5# This module is part of python-sqlparse and is released under 

6# the BSD License: https://opensource.org/licenses/BSD-3-Clause 

7 

8import re 

9 

10from sqlparse import sql, tokens as T 

11from sqlparse.utils import split_unquoted_newlines 

12 

13 

14class StripCommentsFilter: 

15 

16 @staticmethod 

17 def _process(tlist): 

18 def get_next_comment(): 

19 # TODO(andi) Comment types should be unified, see related issue38 

20 return tlist.token_next_by(i=sql.Comment, t=T.Comment) 

21 

22 def _get_insert_token(token): 

23 """Returns either a whitespace or the line breaks from token.""" 

24 # See issue484 why line breaks should be preserved. 

25 # Note: The actual value for a line break is replaced by \n 

26 # in SerializerUnicode which will be executed in the 

27 # postprocessing state. 

28 m = re.search(r'((\r|\n)+) *$', token.value) 

29 if m is not None: 

30 return sql.Token(T.Whitespace.Newline, m.groups()[0]) 

31 else: 

32 return sql.Token(T.Whitespace, ' ') 

33 

34 tidx, token = get_next_comment() 

35 while token: 

36 pidx, prev_ = tlist.token_prev(tidx, skip_ws=False) 

37 nidx, next_ = tlist.token_next(tidx, skip_ws=False) 

38 # Replace by whitespace if prev and next exist and if they're not 

39 # whitespaces. This doesn't apply if prev or next is a parenthesis. 

40 if (prev_ is None or next_ is None 

41 or prev_.is_whitespace or prev_.match(T.Punctuation, '(') 

42 or next_.is_whitespace or next_.match(T.Punctuation, ')')): 

43 # Insert a whitespace to ensure the following SQL produces 

44 # a valid SQL (see #425). 

45 if prev_ is not None and not prev_.match(T.Punctuation, '('): 

46 tlist.tokens.insert(tidx, _get_insert_token(token)) 

47 tlist.tokens.remove(token) 

48 else: 

49 tlist.tokens[tidx] = _get_insert_token(token) 

50 

51 tidx, token = get_next_comment() 

52 

53 def process(self, stmt): 

54 [self.process(sgroup) for sgroup in stmt.get_sublists()] 

55 StripCommentsFilter._process(stmt) 

56 return stmt 

57 

58 

59class StripWhitespaceFilter: 

60 def _stripws(self, tlist): 

61 func_name = '_stripws_{cls}'.format(cls=type(tlist).__name__) 

62 func = getattr(self, func_name.lower(), self._stripws_default) 

63 func(tlist) 

64 

65 @staticmethod 

66 def _stripws_default(tlist): 

67 last_was_ws = False 

68 is_first_char = True 

69 for token in tlist.tokens: 

70 if token.is_whitespace: 

71 token.value = '' if last_was_ws or is_first_char else ' ' 

72 last_was_ws = token.is_whitespace 

73 is_first_char = False 

74 

75 def _stripws_identifierlist(self, tlist): 

76 # Removes newlines before commas, see issue140 

77 last_nl = None 

78 for token in list(tlist.tokens): 

79 if last_nl and token.ttype is T.Punctuation and token.value == ',': 

80 tlist.tokens.remove(last_nl) 

81 last_nl = token if token.is_whitespace else None 

82 

83 # next_ = tlist.token_next(token, skip_ws=False) 

84 # if (next_ and not next_.is_whitespace and 

85 # token.ttype is T.Punctuation and token.value == ','): 

86 # tlist.insert_after(token, sql.Token(T.Whitespace, ' ')) 

87 return self._stripws_default(tlist) 

88 

89 def _stripws_parenthesis(self, tlist): 

90 while tlist.tokens[1].is_whitespace: 

91 tlist.tokens.pop(1) 

92 while tlist.tokens[-2].is_whitespace: 

93 tlist.tokens.pop(-2) 

94 self._stripws_default(tlist) 

95 

96 def process(self, stmt, depth=0): 

97 [self.process(sgroup, depth + 1) for sgroup in stmt.get_sublists()] 

98 self._stripws(stmt) 

99 if depth == 0 and stmt.tokens and stmt.tokens[-1].is_whitespace: 

100 stmt.tokens.pop(-1) 

101 return stmt 

102 

103 

104class SpacesAroundOperatorsFilter: 

105 @staticmethod 

106 def _process(tlist): 

107 

108 ttypes = (T.Operator, T.Comparison) 

109 tidx, token = tlist.token_next_by(t=ttypes) 

110 while token: 

111 nidx, next_ = tlist.token_next(tidx, skip_ws=False) 

112 if next_ and next_.ttype != T.Whitespace: 

113 tlist.insert_after(tidx, sql.Token(T.Whitespace, ' ')) 

114 

115 pidx, prev_ = tlist.token_prev(tidx, skip_ws=False) 

116 if prev_ and prev_.ttype != T.Whitespace: 

117 tlist.insert_before(tidx, sql.Token(T.Whitespace, ' ')) 

118 tidx += 1 # has to shift since token inserted before it 

119 

120 # assert tlist.token_index(token) == tidx 

121 tidx, token = tlist.token_next_by(t=ttypes, idx=tidx) 

122 

123 def process(self, stmt): 

124 [self.process(sgroup) for sgroup in stmt.get_sublists()] 

125 SpacesAroundOperatorsFilter._process(stmt) 

126 return stmt 

127 

128 

129# --------------------------- 

130# postprocess 

131 

132class SerializerUnicode: 

133 @staticmethod 

134 def process(stmt): 

135 lines = split_unquoted_newlines(stmt) 

136 return '\n'.join(line.rstrip() for line in lines)