Coverage for /var/srv/projects/api.amasfac.comuna18.com/tmp/venv/lib/python3.9/site-packages/django/db/models/sql/query.py: 54%
1271 statements
« prev ^ index » next coverage.py v6.4.4, created at 2023-07-17 14:22 -0600
« prev ^ index » next coverage.py v6.4.4, created at 2023-07-17 14:22 -0600
1"""
2Create SQL statements for QuerySets.
4The code in here encapsulates all of the SQL construction so that QuerySets
5themselves do not have to (and could be backed by things other than SQL
6databases). The abstraction barrier only works one way: this module has to know
7all about the internals of models in order to get the information it needs.
8"""
9import copy
10import difflib
11import functools
12import sys
13from collections import Counter, namedtuple
14from collections.abc import Iterator, Mapping
15from itertools import chain, count, product
16from string import ascii_uppercase
18from django.core.exceptions import FieldDoesNotExist, FieldError
19from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections
20from django.db.models.aggregates import Count
21from django.db.models.constants import LOOKUP_SEP
22from django.db.models.expressions import (
23 BaseExpression,
24 Col,
25 Exists,
26 F,
27 OuterRef,
28 Ref,
29 ResolvedOuterRef,
30)
31from django.db.models.fields import Field
32from django.db.models.fields.related_lookups import MultiColSource
33from django.db.models.lookups import Lookup
34from django.db.models.query_utils import (
35 Q,
36 check_rel_lookup_compatibility,
37 refs_expression,
38)
39from django.db.models.sql.constants import INNER, LOUTER, ORDER_DIR, SINGLE
40from django.db.models.sql.datastructures import BaseTable, Empty, Join, MultiJoin
41from django.db.models.sql.where import AND, OR, ExtraWhere, NothingNode, WhereNode
42from django.utils.functional import cached_property
43from django.utils.regex_helper import _lazy_re_compile
44from django.utils.tree import Node
46__all__ = ["Query", "RawQuery"]
48# Quotation marks ('"`[]), whitespace characters, semicolons, or inline
49# SQL comments are forbidden in column aliases.
50FORBIDDEN_ALIAS_PATTERN = _lazy_re_compile(r"['`\"\]\[;\s]|--|/\*|\*/")
52# Inspired from
53# https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS
54EXPLAIN_OPTIONS_PATTERN = _lazy_re_compile(r"[\w\-]+")
57def get_field_names_from_opts(opts):
58 return set(
59 chain.from_iterable(
60 (f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields()
61 )
62 )
65def get_children_from_q(q):
66 for child in q.children:
67 if isinstance(child, Node):
68 yield from get_children_from_q(child)
69 else:
70 yield child
73JoinInfo = namedtuple(
74 "JoinInfo",
75 ("final_field", "targets", "opts", "joins", "path", "transform_function"),
76)
79class RawQuery:
80 """A single raw SQL query."""
82 def __init__(self, sql, using, params=()):
83 self.params = params
84 self.sql = sql
85 self.using = using
86 self.cursor = None
88 # Mirror some properties of a normal query so that
89 # the compiler can be used to process results.
90 self.low_mark, self.high_mark = 0, None # Used for offset/limit
91 self.extra_select = {}
92 self.annotation_select = {}
94 def chain(self, using):
95 return self.clone(using)
97 def clone(self, using):
98 return RawQuery(self.sql, using, params=self.params)
100 def get_columns(self):
101 if self.cursor is None:
102 self._execute_query()
103 converter = connections[self.using].introspection.identifier_converter
104 return [converter(column_meta[0]) for column_meta in self.cursor.description]
106 def __iter__(self):
107 # Always execute a new query for a new iterator.
108 # This could be optimized with a cache at the expense of RAM.
109 self._execute_query()
110 if not connections[self.using].features.can_use_chunked_reads:
111 # If the database can't use chunked reads we need to make sure we
112 # evaluate the entire query up front.
113 result = list(self.cursor)
114 else:
115 result = self.cursor
116 return iter(result)
118 def __repr__(self):
119 return "<%s: %s>" % (self.__class__.__name__, self)
121 @property
122 def params_type(self):
123 if self.params is None:
124 return None
125 return dict if isinstance(self.params, Mapping) else tuple
127 def __str__(self):
128 if self.params_type is None:
129 return self.sql
130 return self.sql % self.params_type(self.params)
132 def _execute_query(self):
133 connection = connections[self.using]
135 # Adapt parameters to the database, as much as possible considering
136 # that the target type isn't known. See #17755.
137 params_type = self.params_type
138 adapter = connection.ops.adapt_unknown_value
139 if params_type is tuple:
140 params = tuple(adapter(val) for val in self.params)
141 elif params_type is dict:
142 params = {key: adapter(val) for key, val in self.params.items()}
143 elif params_type is None:
144 params = None
145 else:
146 raise RuntimeError("Unexpected params type: %s" % params_type)
148 self.cursor = connection.cursor()
149 self.cursor.execute(self.sql, params)
152ExplainInfo = namedtuple("ExplainInfo", ("format", "options"))
155class Query(BaseExpression):
156 """A single SQL query."""
158 alias_prefix = "T"
159 empty_result_set_value = None
160 subq_aliases = frozenset([alias_prefix])
162 compiler = "SQLCompiler"
164 def __init__(self, model, alias_cols=True):
165 self.model = model
166 self.alias_refcount = {}
167 # alias_map is the most important data structure regarding joins.
168 # It's used for recording which joins exist in the query and what
169 # types they are. The key is the alias of the joined table (possibly
170 # the table name) and the value is a Join-like object (see
171 # sql.datastructures.Join for more information).
172 self.alias_map = {}
173 # Whether to provide alias to columns during reference resolving.
174 self.alias_cols = alias_cols
175 # Sometimes the query contains references to aliases in outer queries (as
176 # a result of split_exclude). Correct alias quoting needs to know these
177 # aliases too.
178 # Map external tables to whether they are aliased.
179 self.external_aliases = {}
180 self.table_map = {} # Maps table names to list of aliases.
181 self.default_cols = True
182 self.default_ordering = True
183 self.standard_ordering = True
184 self.used_aliases = set()
185 self.filter_is_sticky = False
186 self.subquery = False
188 # SQL-related attributes
189 # Select and related select clauses are expressions to use in the
190 # SELECT clause of the query.
191 # The select is used for cases where we want to set up the select
192 # clause to contain other than default fields (values(), subqueries...)
193 # Note that annotations go to annotations dictionary.
194 self.select = ()
195 self.where = WhereNode()
196 # The group_by attribute can have one of the following forms:
197 # - None: no group by at all in the query
198 # - A tuple of expressions: group by (at least) those expressions.
199 # String refs are also allowed for now.
200 # - True: group by all select fields of the model
201 # See compiler.get_group_by() for details.
202 self.group_by = None
203 self.order_by = ()
204 self.low_mark, self.high_mark = 0, None # Used for offset/limit
205 self.distinct = False
206 self.distinct_fields = ()
207 self.select_for_update = False
208 self.select_for_update_nowait = False
209 self.select_for_update_skip_locked = False
210 self.select_for_update_of = ()
211 self.select_for_no_key_update = False
213 self.select_related = False
214 # Arbitrary limit for select_related to prevents infinite recursion.
215 self.max_depth = 5
217 # Holds the selects defined by a call to values() or values_list()
218 # excluding annotation_select and extra_select.
219 self.values_select = ()
221 # SQL annotation-related attributes
222 self.annotations = {} # Maps alias -> Annotation Expression
223 self.annotation_select_mask = None
224 self._annotation_select_cache = None
226 # Set combination attributes
227 self.combinator = None
228 self.combinator_all = False
229 self.combined_queries = ()
231 # These are for extensions. The contents are more or less appended
232 # verbatim to the appropriate clause.
233 self.extra = {} # Maps col_alias -> (col_sql, params).
234 self.extra_select_mask = None
235 self._extra_select_cache = None
237 self.extra_tables = ()
238 self.extra_order_by = ()
240 # A tuple that is a set of model field names and either True, if these
241 # are the fields to defer, or False if these are the only fields to
242 # load.
243 self.deferred_loading = (frozenset(), True)
245 self._filtered_relations = {}
247 self.explain_info = None
249 @property
250 def output_field(self):
251 if len(self.select) == 1:
252 select = self.select[0]
253 return getattr(select, "target", None) or select.field
254 elif len(self.annotation_select) == 1:
255 return next(iter(self.annotation_select.values())).output_field
257 @property
258 def has_select_fields(self):
259 return bool(
260 self.select or self.annotation_select_mask or self.extra_select_mask
261 )
263 @cached_property
264 def base_table(self):
265 for alias in self.alias_map: 265 ↛ exitline 265 didn't return from function 'base_table', because the loop on line 265 didn't complete
266 return alias
268 def __str__(self):
269 """
270 Return the query as a string of SQL with the parameter values
271 substituted in (use sql_with_params() to see the unsubstituted string).
273 Parameter values won't necessarily be quoted correctly, since that is
274 done by the database interface at execution time.
275 """
276 sql, params = self.sql_with_params()
277 return sql % params
279 def sql_with_params(self):
280 """
281 Return the query as an SQL string and the parameters that will be
282 substituted into the query.
283 """
284 return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
286 def __deepcopy__(self, memo):
287 """Limit the amount of work when a Query is deepcopied."""
288 result = self.clone()
289 memo[id(self)] = result
290 return result
292 def get_compiler(self, using=None, connection=None, elide_empty=True):
293 if using is None and connection is None: 293 ↛ 294line 293 didn't jump to line 294, because the condition on line 293 was never true
294 raise ValueError("Need either using or connection")
295 if using:
296 connection = connections[using]
297 return connection.ops.compiler(self.compiler)(
298 self, connection, using, elide_empty
299 )
301 def get_meta(self):
302 """
303 Return the Options instance (the model._meta) from which to start
304 processing. Normally, this is self.model._meta, but it can be changed
305 by subclasses.
306 """
307 return self.model._meta
309 def clone(self):
310 """
311 Return a copy of the current Query. A lightweight alternative to
312 to deepcopy().
313 """
314 obj = Empty()
315 obj.__class__ = self.__class__
316 # Copy references to everything.
317 obj.__dict__ = self.__dict__.copy()
318 # Clone attributes that can't use shallow copy.
319 obj.alias_refcount = self.alias_refcount.copy()
320 obj.alias_map = self.alias_map.copy()
321 obj.external_aliases = self.external_aliases.copy()
322 obj.table_map = self.table_map.copy()
323 obj.where = self.where.clone()
324 obj.annotations = self.annotations.copy()
325 if self.annotation_select_mask is not None:
326 obj.annotation_select_mask = self.annotation_select_mask.copy()
327 if self.combined_queries: 327 ↛ 328line 327 didn't jump to line 328, because the condition on line 327 was never true
328 obj.combined_queries = tuple(
329 [query.clone() for query in self.combined_queries]
330 )
331 # _annotation_select_cache cannot be copied, as doing so breaks the
332 # (necessary) state in which both annotations and
333 # _annotation_select_cache point to the same underlying objects.
334 # It will get re-populated in the cloned queryset the next time it's
335 # used.
336 obj._annotation_select_cache = None
337 obj.extra = self.extra.copy()
338 if self.extra_select_mask is not None:
339 obj.extra_select_mask = self.extra_select_mask.copy()
340 if self._extra_select_cache is not None: 340 ↛ 341line 340 didn't jump to line 341, because the condition on line 340 was never true
341 obj._extra_select_cache = self._extra_select_cache.copy()
342 if self.select_related is not False: 342 ↛ 345line 342 didn't jump to line 345, because the condition on line 342 was never true
343 # Use deepcopy because select_related stores fields in nested
344 # dicts.
345 obj.select_related = copy.deepcopy(obj.select_related)
346 if "subq_aliases" in self.__dict__:
347 obj.subq_aliases = self.subq_aliases.copy()
348 obj.used_aliases = self.used_aliases.copy()
349 obj._filtered_relations = self._filtered_relations.copy()
350 # Clear the cached_property
351 try:
352 del obj.base_table
353 except AttributeError:
354 pass
355 return obj
357 def chain(self, klass=None):
358 """
359 Return a copy of the current Query that's ready for another operation.
360 The klass argument changes the type of the Query, e.g. UpdateQuery.
361 """
362 obj = self.clone()
363 if klass and obj.__class__ != klass:
364 obj.__class__ = klass
365 if not obj.filter_is_sticky:
366 obj.used_aliases = set()
367 obj.filter_is_sticky = False
368 if hasattr(obj, "_setup_query"):
369 obj._setup_query()
370 return obj
372 def relabeled_clone(self, change_map):
373 clone = self.clone()
374 clone.change_aliases(change_map)
375 return clone
377 def _get_col(self, target, field, alias):
378 if not self.alias_cols: 378 ↛ 379line 378 didn't jump to line 379, because the condition on line 378 was never true
379 alias = None
380 return target.get_col(alias, field)
382 def rewrite_cols(self, annotation, col_cnt):
383 # We must make sure the inner query has the referred columns in it.
384 # If we are aggregating over an annotation, then Django uses Ref()
385 # instances to note this. However, if we are annotating over a column
386 # of a related model, then it might be that column isn't part of the
387 # SELECT clause of the inner query, and we must manually make sure
388 # the column is selected. An example case is:
389 # .aggregate(Sum('author__awards'))
390 # Resolving this expression results in a join to author, but there
391 # is no guarantee the awards column of author is in the select clause
392 # of the query. Thus we must manually add the column to the inner
393 # query.
394 orig_exprs = annotation.get_source_expressions()
395 new_exprs = []
396 for expr in orig_exprs:
397 # FIXME: These conditions are fairly arbitrary. Identify a better
398 # method of having expressions decide which code path they should
399 # take.
400 if isinstance(expr, Ref):
401 # Its already a Ref to subquery (see resolve_ref() for
402 # details)
403 new_exprs.append(expr)
404 elif isinstance(expr, (WhereNode, Lookup)):
405 # Decompose the subexpressions further. The code here is
406 # copied from the else clause, but this condition must appear
407 # before the contains_aggregate/is_summary condition below.
408 new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
409 new_exprs.append(new_expr)
410 else:
411 # Reuse aliases of expressions already selected in subquery.
412 for col_alias, selected_annotation in self.annotation_select.items():
413 if selected_annotation is expr:
414 new_expr = Ref(col_alias, expr)
415 break
416 else:
417 # An expression that is not selected the subquery.
418 if isinstance(expr, Col) or (
419 expr.contains_aggregate and not expr.is_summary
420 ):
421 # Reference column or another aggregate. Select it
422 # under a non-conflicting alias.
423 col_cnt += 1
424 col_alias = "__col%d" % col_cnt
425 self.annotations[col_alias] = expr
426 self.append_annotation_mask([col_alias])
427 new_expr = Ref(col_alias, expr)
428 else:
429 # Some other expression not referencing database values
430 # directly. Its subexpression might contain Cols.
431 new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
432 new_exprs.append(new_expr)
433 annotation.set_source_expressions(new_exprs)
434 return annotation, col_cnt
436 def get_aggregation(self, using, added_aggregate_names):
437 """
438 Return the dictionary with the values of the existing aggregations.
439 """
440 if not self.annotation_select: 440 ↛ 441line 440 didn't jump to line 441, because the condition on line 440 was never true
441 return {}
442 existing_annotations = [
443 annotation
444 for alias, annotation in self.annotations.items()
445 if alias not in added_aggregate_names
446 ]
447 # Decide if we need to use a subquery.
448 #
449 # Existing annotations would cause incorrect results as get_aggregation()
450 # must produce just one result and thus must not use GROUP BY. But we
451 # aren't smart enough to remove the existing annotations from the
452 # query, so those would force us to use GROUP BY.
453 #
454 # If the query has limit or distinct, or uses set operations, then
455 # those operations must be done in a subquery so that the query
456 # aggregates on the limit and/or distinct results instead of applying
457 # the distinct and limit after the aggregation.
458 if ( 458 ↛ 465line 458 didn't jump to line 465
459 isinstance(self.group_by, tuple)
460 or self.is_sliced
461 or existing_annotations
462 or self.distinct
463 or self.combinator
464 ):
465 from django.db.models.sql.subqueries import AggregateQuery
467 inner_query = self.clone()
468 inner_query.subquery = True
469 outer_query = AggregateQuery(self.model, inner_query)
470 inner_query.select_for_update = False
471 inner_query.select_related = False
472 inner_query.set_annotation_mask(self.annotation_select)
473 # Queries with distinct_fields need ordering and when a limit is
474 # applied we must take the slice from the ordered query. Otherwise
475 # no need for ordering.
476 inner_query.clear_ordering(force=False)
477 if not inner_query.distinct:
478 # If the inner query uses default select and it has some
479 # aggregate annotations, then we must make sure the inner
480 # query is grouped by the main model's primary key. However,
481 # clearing the select clause can alter results if distinct is
482 # used.
483 has_existing_aggregate_annotations = any(
484 annotation
485 for annotation in existing_annotations
486 if getattr(annotation, "contains_aggregate", True)
487 )
488 if inner_query.default_cols and has_existing_aggregate_annotations:
489 inner_query.group_by = (
490 self.model._meta.pk.get_col(inner_query.get_initial_alias()),
491 )
492 inner_query.default_cols = False
494 relabels = {t: "subquery" for t in inner_query.alias_map}
495 relabels[None] = "subquery"
496 # Remove any aggregates marked for reduction from the subquery
497 # and move them to the outer AggregateQuery.
498 col_cnt = 0
499 for alias, expression in list(inner_query.annotation_select.items()):
500 annotation_select_mask = inner_query.annotation_select_mask
501 if expression.is_summary:
502 expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt)
503 outer_query.annotations[alias] = expression.relabeled_clone(
504 relabels
505 )
506 del inner_query.annotations[alias]
507 annotation_select_mask.remove(alias)
508 # Make sure the annotation_select wont use cached results.
509 inner_query.set_annotation_mask(inner_query.annotation_select_mask)
510 if (
511 inner_query.select == ()
512 and not inner_query.default_cols
513 and not inner_query.annotation_select_mask
514 ):
515 # In case of Model.objects[0:3].count(), there would be no
516 # field selected in the inner query, yet we must use a subquery.
517 # So, make sure at least one field is selected.
518 inner_query.select = (
519 self.model._meta.pk.get_col(inner_query.get_initial_alias()),
520 )
521 else:
522 outer_query = self
523 self.select = ()
524 self.default_cols = False
525 self.extra = {}
527 empty_set_result = [
528 expression.empty_result_set_value
529 for expression in outer_query.annotation_select.values()
530 ]
531 elide_empty = not any(result is NotImplemented for result in empty_set_result)
532 outer_query.clear_ordering(force=True)
533 outer_query.clear_limits()
534 outer_query.select_for_update = False
535 outer_query.select_related = False
536 compiler = outer_query.get_compiler(using, elide_empty=elide_empty)
537 result = compiler.execute_sql(SINGLE)
538 if result is None:
539 result = empty_set_result
541 converters = compiler.get_converters(outer_query.annotation_select.values())
542 result = next(compiler.apply_converters((result,), converters))
544 return dict(zip(outer_query.annotation_select, result))
546 def get_count(self, using):
547 """
548 Perform a COUNT() query using the current filter constraints.
549 """
550 obj = self.clone()
551 obj.add_annotation(Count("*"), alias="__count", is_summary=True)
552 number = obj.get_aggregation(using, ["__count"])["__count"]
553 if number is None: 553 ↛ 554line 553 didn't jump to line 554, because the condition on line 553 was never true
554 number = 0
555 return number
557 def has_filters(self):
558 return self.where
560 def exists(self, using, limit=True):
561 q = self.clone()
562 if not q.distinct: 562 ↛ 571line 562 didn't jump to line 571, because the condition on line 562 was never false
563 if q.group_by is True: 563 ↛ 564line 563 didn't jump to line 564, because the condition on line 563 was never true
564 q.add_fields(
565 (f.attname for f in self.model._meta.concrete_fields), False
566 )
567 # Disable GROUP BY aliases to avoid orphaning references to the
568 # SELECT clause which is about to be cleared.
569 q.set_group_by(allow_aliases=False)
570 q.clear_select_clause()
571 if q.combined_queries and q.combinator == "union": 571 ↛ 572line 571 didn't jump to line 572, because the condition on line 571 was never true
572 limit_combined = connections[
573 using
574 ].features.supports_slicing_ordering_in_compound
575 q.combined_queries = tuple(
576 combined_query.exists(using, limit=limit_combined)
577 for combined_query in q.combined_queries
578 )
579 q.clear_ordering(force=True)
580 if limit: 580 ↛ 582line 580 didn't jump to line 582, because the condition on line 580 was never false
581 q.set_limits(high=1)
582 q.add_extra({"a": 1}, None, None, None, None, None)
583 q.set_extra_mask(["a"])
584 return q
586 def has_results(self, using):
587 q = self.exists(using)
588 compiler = q.get_compiler(using=using)
589 return compiler.has_results()
591 def explain(self, using, format=None, **options):
592 q = self.clone()
593 for option_name in options:
594 if (
595 not EXPLAIN_OPTIONS_PATTERN.fullmatch(option_name)
596 or "--" in option_name
597 ):
598 raise ValueError(f"Invalid option name: {option_name!r}.")
599 q.explain_info = ExplainInfo(format, options)
600 compiler = q.get_compiler(using=using)
601 return "\n".join(compiler.explain_query())
603 def combine(self, rhs, connector):
604 """
605 Merge the 'rhs' query into the current one (with any 'rhs' effects
606 being applied *after* (that is, "to the right of") anything in the
607 current query. 'rhs' is not modified during a call to this function.
609 The 'connector' parameter describes how to connect filters from the
610 'rhs' query.
611 """
612 if self.model != rhs.model:
613 raise TypeError("Cannot combine queries on two different base models.")
614 if self.is_sliced:
615 raise TypeError("Cannot combine queries once a slice has been taken.")
616 if self.distinct != rhs.distinct:
617 raise TypeError("Cannot combine a unique query with a non-unique query.")
618 if self.distinct_fields != rhs.distinct_fields:
619 raise TypeError("Cannot combine queries with different distinct fields.")
621 # Work out how to relabel the rhs aliases, if necessary.
622 change_map = {}
623 conjunction = connector == AND
625 # Determine which existing joins can be reused. When combining the
626 # query with AND we must recreate all joins for m2m filters. When
627 # combining with OR we can reuse joins. The reason is that in AND
628 # case a single row can't fulfill a condition like:
629 # revrel__col=1 & revrel__col=2
630 # But, there might be two different related rows matching this
631 # condition. In OR case a single True is enough, so single row is
632 # enough, too.
633 #
634 # Note that we will be creating duplicate joins for non-m2m joins in
635 # the AND case. The results will be correct but this creates too many
636 # joins. This is something that could be fixed later on.
637 reuse = set() if conjunction else set(self.alias_map)
638 # Base table must be present in the query - this is the same
639 # table on both sides.
640 self.get_initial_alias()
641 joinpromoter = JoinPromoter(connector, 2, False)
642 joinpromoter.add_votes(
643 j for j in self.alias_map if self.alias_map[j].join_type == INNER
644 )
645 rhs_votes = set()
646 # Now, add the joins from rhs query into the new query (skipping base
647 # table).
648 rhs_tables = list(rhs.alias_map)[1:]
649 for alias in rhs_tables:
650 join = rhs.alias_map[alias]
651 # If the left side of the join was already relabeled, use the
652 # updated alias.
653 join = join.relabeled_clone(change_map)
654 new_alias = self.join(join, reuse=reuse)
655 if join.join_type == INNER:
656 rhs_votes.add(new_alias)
657 # We can't reuse the same join again in the query. If we have two
658 # distinct joins for the same connection in rhs query, then the
659 # combined query must have two joins, too.
660 reuse.discard(new_alias)
661 if alias != new_alias:
662 change_map[alias] = new_alias
663 if not rhs.alias_refcount[alias]:
664 # The alias was unused in the rhs query. Unref it so that it
665 # will be unused in the new query, too. We have to add and
666 # unref the alias so that join promotion has information of
667 # the join type for the unused alias.
668 self.unref_alias(new_alias)
669 joinpromoter.add_votes(rhs_votes)
670 joinpromoter.update_join_types(self)
672 # Combine subqueries aliases to ensure aliases relabelling properly
673 # handle subqueries when combining where and select clauses.
674 self.subq_aliases |= rhs.subq_aliases
676 # Now relabel a copy of the rhs where-clause and add it to the current
677 # one.
678 w = rhs.where.clone()
679 w.relabel_aliases(change_map)
680 self.where.add(w, connector)
682 # Selection columns and extra extensions are those provided by 'rhs'.
683 if rhs.select:
684 self.set_select([col.relabeled_clone(change_map) for col in rhs.select])
685 else:
686 self.select = ()
688 if connector == OR:
689 # It would be nice to be able to handle this, but the queries don't
690 # really make sense (or return consistent value sets). Not worth
691 # the extra complexity when you can write a real query instead.
692 if self.extra and rhs.extra:
693 raise ValueError(
694 "When merging querysets using 'or', you cannot have "
695 "extra(select=...) on both sides."
696 )
697 self.extra.update(rhs.extra)
698 extra_select_mask = set()
699 if self.extra_select_mask is not None:
700 extra_select_mask.update(self.extra_select_mask)
701 if rhs.extra_select_mask is not None:
702 extra_select_mask.update(rhs.extra_select_mask)
703 if extra_select_mask:
704 self.set_extra_mask(extra_select_mask)
705 self.extra_tables += rhs.extra_tables
707 # Ordering uses the 'rhs' ordering, unless it has none, in which case
708 # the current ordering is used.
709 self.order_by = rhs.order_by or self.order_by
710 self.extra_order_by = rhs.extra_order_by or self.extra_order_by
712 def deferred_to_data(self, target, callback):
713 """
714 Convert the self.deferred_loading data structure to an alternate data
715 structure, describing the field that *will* be loaded. This is used to
716 compute the columns to select from the database and also by the
717 QuerySet class to work out which fields are being initialized on each
718 model. Models that have all their fields included aren't mentioned in
719 the result, only those that have field restrictions in place.
721 The "target" parameter is the instance that is populated (in place).
722 The "callback" is a function that is called whenever a (model, field)
723 pair need to be added to "target". It accepts three parameters:
724 "target", and the model and list of fields being added for that model.
725 """
726 field_names, defer = self.deferred_loading
727 if not field_names:
728 return
729 orig_opts = self.get_meta()
730 seen = {}
731 must_include = {orig_opts.concrete_model: {orig_opts.pk}}
732 for field_name in field_names:
733 parts = field_name.split(LOOKUP_SEP)
734 cur_model = self.model._meta.concrete_model
735 opts = orig_opts
736 for name in parts[:-1]: 736 ↛ 737line 736 didn't jump to line 737, because the loop on line 736 never started
737 old_model = cur_model
738 if name in self._filtered_relations:
739 name = self._filtered_relations[name].relation_name
740 source = opts.get_field(name)
741 if is_reverse_o2o(source):
742 cur_model = source.related_model
743 else:
744 cur_model = source.remote_field.model
745 opts = cur_model._meta
746 # Even if we're "just passing through" this model, we must add
747 # both the current model's pk and the related reference field
748 # (if it's not a reverse relation) to the things we select.
749 if not is_reverse_o2o(source):
750 must_include[old_model].add(source)
751 add_to_dict(must_include, cur_model, opts.pk)
752 field = opts.get_field(parts[-1])
753 is_reverse_object = field.auto_created and not field.concrete
754 model = field.related_model if is_reverse_object else field.model
755 model = model._meta.concrete_model
756 if model == opts.model:
757 model = cur_model
758 if not is_reverse_o2o(field): 758 ↛ 732line 758 didn't jump to line 732, because the condition on line 758 was never false
759 add_to_dict(seen, model, field)
761 if defer: 761 ↛ 766line 761 didn't jump to line 766, because the condition on line 761 was never true
762 # We need to load all fields for each model, except those that
763 # appear in "seen" (for all models that appear in "seen"). The only
764 # slight complexity here is handling fields that exist on parent
765 # models.
766 workset = {}
767 for model, values in seen.items():
768 for field in model._meta.local_fields:
769 if field not in values:
770 m = field.model._meta.concrete_model
771 add_to_dict(workset, m, field)
772 for model, values in must_include.items():
773 # If we haven't included a model in workset, we don't add the
774 # corresponding must_include fields for that model, since an
775 # empty set means "include all fields". That's why there's no
776 # "else" branch here.
777 if model in workset:
778 workset[model].update(values)
779 for model, values in workset.items():
780 callback(target, model, values)
781 else:
782 for model, values in must_include.items():
783 if model in seen:
784 seen[model].update(values)
785 else:
786 # As we've passed through this model, but not explicitly
787 # included any fields, we have to make sure it's mentioned
788 # so that only the "must include" fields are pulled in.
789 seen[model] = values
790 # Now ensure that every model in the inheritance chain is mentioned
791 # in the parent list. Again, it must be mentioned to ensure that
792 # only "must include" fields are pulled in.
793 for model in orig_opts.get_parent_list():
794 seen.setdefault(model, set())
795 for model, values in seen.items():
796 callback(target, model, values)
798 def table_alias(self, table_name, create=False, filtered_relation=None):
799 """
800 Return a table alias for the given table_name and whether this is a
801 new alias or not.
803 If 'create' is true, a new alias is always created. Otherwise, the
804 most recently created alias for the table (if one exists) is reused.
805 """
806 alias_list = self.table_map.get(table_name)
807 if not create and alias_list: 807 ↛ 808line 807 didn't jump to line 808, because the condition on line 807 was never true
808 alias = alias_list[0]
809 self.alias_refcount[alias] += 1
810 return alias, False
812 # Create a new alias for this table.
813 if alias_list:
814 alias = "%s%d" % (self.alias_prefix, len(self.alias_map) + 1)
815 alias_list.append(alias)
816 else:
817 # The first occurrence of a table uses the table name directly.
818 alias = (
819 filtered_relation.alias if filtered_relation is not None else table_name
820 )
821 self.table_map[table_name] = [alias]
822 self.alias_refcount[alias] = 1
823 return alias, True
825 def ref_alias(self, alias):
826 """Increases the reference count for this alias."""
827 self.alias_refcount[alias] += 1
829 def unref_alias(self, alias, amount=1):
830 """Decreases the reference count for this alias."""
831 self.alias_refcount[alias] -= amount
833 def promote_joins(self, aliases):
834 """
835 Promote recursively the join type of given aliases and its children to
836 an outer join. If 'unconditional' is False, only promote the join if
837 it is nullable or the parent join is an outer join.
839 The children promotion is done to avoid join chains that contain a LOUTER
840 b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
841 then we must also promote b->c automatically, or otherwise the promotion
842 of a->b doesn't actually change anything in the query results.
843 """
844 aliases = list(aliases)
845 while aliases: 845 ↛ 846line 845 didn't jump to line 846, because the condition on line 845 was never true
846 alias = aliases.pop(0)
847 if self.alias_map[alias].join_type is None:
848 # This is the base table (first FROM entry) - this table
849 # isn't really joined at all in the query, so we should not
850 # alter its join type.
851 continue
852 # Only the first alias (skipped above) should have None join_type
853 assert self.alias_map[alias].join_type is not None
854 parent_alias = self.alias_map[alias].parent_alias
855 parent_louter = (
856 parent_alias and self.alias_map[parent_alias].join_type == LOUTER
857 )
858 already_louter = self.alias_map[alias].join_type == LOUTER
859 if (self.alias_map[alias].nullable or parent_louter) and not already_louter:
860 self.alias_map[alias] = self.alias_map[alias].promote()
861 # Join type of 'alias' changed, so re-examine all aliases that
862 # refer to this one.
863 aliases.extend(
864 join
865 for join in self.alias_map
866 if self.alias_map[join].parent_alias == alias
867 and join not in aliases
868 )
870 def demote_joins(self, aliases):
871 """
872 Change join type from LOUTER to INNER for all joins in aliases.
874 Similarly to promote_joins(), this method must ensure no join chains
875 containing first an outer, then an inner join are generated. If we
876 are demoting b->c join in chain a LOUTER b LOUTER c then we must
877 demote a->b automatically, or otherwise the demotion of b->c doesn't
878 actually change anything in the query results. .
879 """
880 aliases = list(aliases)
881 while aliases:
882 alias = aliases.pop(0)
883 if self.alias_map[alias].join_type == LOUTER:
884 self.alias_map[alias] = self.alias_map[alias].demote()
885 parent_alias = self.alias_map[alias].parent_alias
886 if self.alias_map[parent_alias].join_type == INNER:
887 aliases.append(parent_alias)
889 def reset_refcounts(self, to_counts):
890 """
891 Reset reference counts for aliases so that they match the value passed
892 in `to_counts`.
893 """
894 for alias, cur_refcount in self.alias_refcount.copy().items():
895 unref_amount = cur_refcount - to_counts.get(alias, 0)
896 self.unref_alias(alias, unref_amount)
898 def change_aliases(self, change_map):
899 """
900 Change the aliases in change_map (which maps old-alias -> new-alias),
901 relabelling any references to them in select columns and the where
902 clause.
903 """
904 assert set(change_map).isdisjoint(change_map.values())
906 # 1. Update references in "select" (normal columns plus aliases),
907 # "group by" and "where".
908 self.where.relabel_aliases(change_map)
909 if isinstance(self.group_by, tuple): 909 ↛ 910line 909 didn't jump to line 910, because the condition on line 909 was never true
910 self.group_by = tuple(
911 [col.relabeled_clone(change_map) for col in self.group_by]
912 )
913 self.select = tuple([col.relabeled_clone(change_map) for col in self.select])
914 self.annotations = self.annotations and { 914 ↛ exitline 914 didn't run the dictionary comprehension on line 914
915 key: col.relabeled_clone(change_map)
916 for key, col in self.annotations.items()
917 }
919 # 2. Rename the alias in the internal table/alias datastructures.
920 for old_alias, new_alias in change_map.items():
921 if old_alias not in self.alias_map: 921 ↛ 922line 921 didn't jump to line 922, because the condition on line 921 was never true
922 continue
923 alias_data = self.alias_map[old_alias].relabeled_clone(change_map)
924 self.alias_map[new_alias] = alias_data
925 self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
926 del self.alias_refcount[old_alias]
927 del self.alias_map[old_alias]
929 table_aliases = self.table_map[alias_data.table_name]
930 for pos, alias in enumerate(table_aliases): 930 ↛ 920line 930 didn't jump to line 920, because the loop on line 930 didn't complete
931 if alias == old_alias: 931 ↛ 930line 931 didn't jump to line 930, because the condition on line 931 was never false
932 table_aliases[pos] = new_alias
933 break
934 self.external_aliases = {
935 # Table is aliased or it's being changed and thus is aliased.
936 change_map.get(alias, alias): (aliased or alias in change_map)
937 for alias, aliased in self.external_aliases.items()
938 }
940 def bump_prefix(self, outer_query):
941 """
942 Change the alias prefix to the next letter in the alphabet in a way
943 that the outer query's aliases and this query's aliases will not
944 conflict. Even tables that previously had no alias will get an alias
945 after this call.
946 """
948 def prefix_gen():
949 """
950 Generate a sequence of characters in alphabetical order:
951 -> 'A', 'B', 'C', ...
953 When the alphabet is finished, the sequence will continue with the
954 Cartesian product:
955 -> 'AA', 'AB', 'AC', ...
956 """
957 alphabet = ascii_uppercase
958 prefix = chr(ord(self.alias_prefix) + 1)
959 yield prefix
960 for n in count(1):
961 seq = alphabet[alphabet.index(prefix) :] if prefix else alphabet
962 for s in product(seq, repeat=n):
963 yield "".join(s)
964 prefix = None
966 if self.alias_prefix != outer_query.alias_prefix: 966 ↛ 968line 966 didn't jump to line 968, because the condition on line 966 was never true
967 # No clashes between self and outer query should be possible.
968 return
970 # Explicitly avoid infinite loop. The constant divider is based on how
971 # much depth recursive subquery references add to the stack. This value
972 # might need to be adjusted when adding or removing function calls from
973 # the code path in charge of performing these operations.
974 local_recursion_limit = sys.getrecursionlimit() // 16
975 for pos, prefix in enumerate(prefix_gen()): 975 ↛ 983line 975 didn't jump to line 983, because the loop on line 975 didn't complete
976 if prefix not in self.subq_aliases: 976 ↛ 979line 976 didn't jump to line 979, because the condition on line 976 was never false
977 self.alias_prefix = prefix
978 break
979 if pos > local_recursion_limit:
980 raise RecursionError(
981 "Maximum recursion depth exceeded: too many subqueries."
982 )
983 self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
984 outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases)
985 self.change_aliases(
986 {
987 alias: "%s%d" % (self.alias_prefix, pos)
988 for pos, alias in enumerate(self.alias_map)
989 }
990 )
992 def get_initial_alias(self):
993 """
994 Return the first alias for this query, after increasing its reference
995 count.
996 """
997 if self.alias_map:
998 alias = self.base_table
999 self.ref_alias(alias)
1000 else:
1001 alias = self.join(BaseTable(self.get_meta().db_table, None))
1002 return alias
1004 def count_active_tables(self):
1005 """
1006 Return the number of tables in this query with a non-zero reference
1007 count. After execution, the reference counts are zeroed, so tables
1008 added in compiler will not be seen by this method.
1009 """
1010 return len([1 for count in self.alias_refcount.values() if count])
1012 def join(self, join, reuse=None, reuse_with_filtered_relation=False):
1013 """
1014 Return an alias for the 'join', either reusing an existing alias for
1015 that join or creating a new one. 'join' is either a
1016 sql.datastructures.BaseTable or Join.
1018 The 'reuse' parameter can be either None which means all joins are
1019 reusable, or it can be a set containing the aliases that can be reused.
1021 The 'reuse_with_filtered_relation' parameter is used when computing
1022 FilteredRelation instances.
1024 A join is always created as LOUTER if the lhs alias is LOUTER to make
1025 sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new
1026 joins are created as LOUTER if the join is nullable.
1027 """
1028 if reuse_with_filtered_relation and reuse: 1028 ↛ 1029line 1028 didn't jump to line 1029, because the condition on line 1028 was never true
1029 reuse_aliases = [
1030 a for a, j in self.alias_map.items() if a in reuse and j.equals(join)
1031 ]
1032 else:
1033 reuse_aliases = [
1034 a
1035 for a, j in self.alias_map.items()
1036 if (reuse is None or a in reuse) and j == join
1037 ]
1038 if reuse_aliases:
1039 if join.table_alias in reuse_aliases: 1039 ↛ 1040line 1039 didn't jump to line 1040, because the condition on line 1039 was never true
1040 reuse_alias = join.table_alias
1041 else:
1042 # Reuse the most recent alias of the joined table
1043 # (a many-to-many relation may be joined multiple times).
1044 reuse_alias = reuse_aliases[-1]
1045 self.ref_alias(reuse_alias)
1046 return reuse_alias
1048 # No reuse is possible, so we need a new alias.
1049 alias, _ = self.table_alias(
1050 join.table_name, create=True, filtered_relation=join.filtered_relation
1051 )
1052 if join.join_type:
1053 if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:
1054 join_type = LOUTER
1055 else:
1056 join_type = INNER
1057 join.join_type = join_type
1058 join.table_alias = alias
1059 self.alias_map[alias] = join
1060 return alias
1062 def join_parent_model(self, opts, model, alias, seen):
1063 """
1064 Make sure the given 'model' is joined in the query. If 'model' isn't
1065 a parent of 'opts' or if it is None this method is a no-op.
1067 The 'alias' is the root alias for starting the join, 'seen' is a dict
1068 of model -> alias of existing joins. It must also contain a mapping
1069 of None -> some alias. This will be returned in the no-op case.
1070 """
1071 if model in seen:
1072 return seen[model]
1073 chain = opts.get_base_chain(model)
1074 if not chain: 1074 ↛ 1075line 1074 didn't jump to line 1075, because the condition on line 1074 was never true
1075 return alias
1076 curr_opts = opts
1077 for int_model in chain:
1078 if int_model in seen: 1078 ↛ 1079line 1078 didn't jump to line 1079, because the condition on line 1078 was never true
1079 curr_opts = int_model._meta
1080 alias = seen[int_model]
1081 continue
1082 # Proxy model have elements in base chain
1083 # with no parents, assign the new options
1084 # object and skip to the next base in that
1085 # case
1086 if not curr_opts.parents[int_model]: 1086 ↛ 1087line 1086 didn't jump to line 1087, because the condition on line 1086 was never true
1087 curr_opts = int_model._meta
1088 continue
1089 link_field = curr_opts.get_ancestor_link(int_model)
1090 join_info = self.setup_joins([link_field.name], curr_opts, alias)
1091 curr_opts = int_model._meta
1092 alias = seen[int_model] = join_info.joins[-1]
1093 return alias or seen[None]
1095 def check_alias(self, alias):
1096 if FORBIDDEN_ALIAS_PATTERN.search(alias): 1096 ↛ 1097line 1096 didn't jump to line 1097, because the condition on line 1096 was never true
1097 raise ValueError(
1098 "Column aliases cannot contain whitespace characters, quotation marks, "
1099 "semicolons, or SQL comments."
1100 )
1102 def add_annotation(self, annotation, alias, is_summary=False, select=True):
1103 """Add a single annotation expression to the Query."""
1104 self.check_alias(alias)
1105 annotation = annotation.resolve_expression(
1106 self, allow_joins=True, reuse=None, summarize=is_summary
1107 )
1108 if select: 1108 ↛ 1111line 1108 didn't jump to line 1111, because the condition on line 1108 was never false
1109 self.append_annotation_mask([alias])
1110 else:
1111 self.set_annotation_mask(set(self.annotation_select).difference({alias}))
1112 self.annotations[alias] = annotation
1114 def resolve_expression(self, query, *args, **kwargs):
1115 clone = self.clone()
1116 # Subqueries need to use a different set of aliases than the outer query.
1117 clone.bump_prefix(query)
1118 clone.subquery = True
1119 clone.where.resolve_expression(query, *args, **kwargs)
1120 for key, value in clone.annotations.items(): 1120 ↛ 1121line 1120 didn't jump to line 1121, because the loop on line 1120 never started
1121 resolved = value.resolve_expression(query, *args, **kwargs)
1122 if hasattr(resolved, "external_aliases"):
1123 resolved.external_aliases.update(clone.external_aliases)
1124 clone.annotations[key] = resolved
1125 # Outer query's aliases are considered external.
1126 for alias, table in query.alias_map.items():
1127 clone.external_aliases[alias] = (
1128 isinstance(table, Join)
1129 and table.join_field.related_model._meta.db_table != alias
1130 ) or (
1131 isinstance(table, BaseTable) and table.table_name != table.table_alias
1132 )
1133 return clone
1135 def get_external_cols(self):
1136 exprs = chain(self.annotations.values(), self.where.children)
1137 return [
1138 col
1139 for col in self._gen_cols(exprs, include_external=True)
1140 if col.alias in self.external_aliases
1141 ]
1143 def as_sql(self, compiler, connection):
1144 # Some backends (e.g. Oracle) raise an error when a subquery contains
1145 # unnecessary ORDER BY clause.
1146 if ( 1146 ↛ 1150line 1146 didn't jump to line 1150
1147 self.subquery
1148 and not connection.features.ignores_unnecessary_order_by_in_subqueries
1149 ):
1150 self.clear_ordering(force=False)
1151 sql, params = self.get_compiler(connection=connection).as_sql()
1152 if self.subquery: 1152 ↛ 1154line 1152 didn't jump to line 1154, because the condition on line 1152 was never false
1153 sql = "(%s)" % sql
1154 return sql, params
1156 def resolve_lookup_value(self, value, can_reuse, allow_joins):
1157 if hasattr(value, "resolve_expression"):
1158 value = value.resolve_expression(
1159 self,
1160 reuse=can_reuse,
1161 allow_joins=allow_joins,
1162 )
1163 elif isinstance(value, (list, tuple)):
1164 # The items of the iterable may be expressions and therefore need
1165 # to be resolved independently.
1166 values = (
1167 self.resolve_lookup_value(sub_value, can_reuse, allow_joins)
1168 for sub_value in value
1169 )
1170 type_ = type(value)
1171 if hasattr(type_, "_make"): # namedtuple 1171 ↛ 1172line 1171 didn't jump to line 1172, because the condition on line 1171 was never true
1172 return type_(*values)
1173 return type_(values)
1174 return value
1176 def solve_lookup_type(self, lookup):
1177 """
1178 Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains').
1179 """
1180 lookup_splitted = lookup.split(LOOKUP_SEP)
1181 if self.annotations: 1181 ↛ 1182line 1181 didn't jump to line 1182, because the condition on line 1181 was never true
1182 expression, expression_lookups = refs_expression(
1183 lookup_splitted, self.annotations
1184 )
1185 if expression:
1186 return expression_lookups, (), expression
1187 _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
1188 field_parts = lookup_splitted[0 : len(lookup_splitted) - len(lookup_parts)]
1189 if len(lookup_parts) > 1 and not field_parts: 1189 ↛ 1190line 1189 didn't jump to line 1190, because the condition on line 1189 was never true
1190 raise FieldError(
1191 'Invalid lookup "%s" for model %s".'
1192 % (lookup, self.get_meta().model.__name__)
1193 )
1194 return lookup_parts, field_parts, False
1196 def check_query_object_type(self, value, opts, field):
1197 """
1198 Check whether the object passed while querying is of the correct type.
1199 If not, raise a ValueError specifying the wrong object.
1200 """
1201 if hasattr(value, "_meta"):
1202 if not check_rel_lookup_compatibility(value._meta.model, opts, field): 1202 ↛ 1203line 1202 didn't jump to line 1203, because the condition on line 1202 was never true
1203 raise ValueError(
1204 'Cannot query "%s": Must be "%s" instance.'
1205 % (value, opts.object_name)
1206 )
1208 def check_related_objects(self, field, value, opts):
1209 """Check the type of object passed to query relations."""
1210 if field.is_relation:
1211 # Check that the field and the queryset use the same model in a
1212 # query like .filter(author=Author.objects.all()). For example, the
1213 # opts would be Author's (from the author field) and value.model
1214 # would be Author.objects.all() queryset's .model (Author also).
1215 # The field is the related field on the lhs side.
1216 if ( 1216 ↛ 1221line 1216 didn't jump to line 1221
1217 isinstance(value, Query)
1218 and not value.has_select_fields
1219 and not check_rel_lookup_compatibility(value.model, opts, field)
1220 ):
1221 raise ValueError(
1222 'Cannot use QuerySet for "%s": Use a QuerySet for "%s".'
1223 % (value.model._meta.object_name, opts.object_name)
1224 )
1225 elif hasattr(value, "_meta"):
1226 self.check_query_object_type(value, opts, field)
1227 elif hasattr(value, "__iter__"):
1228 for v in value:
1229 self.check_query_object_type(v, opts, field)
1231 def check_filterable(self, expression):
1232 """Raise an error if expression cannot be used in a WHERE clause."""
1233 if hasattr(expression, "resolve_expression") and not getattr( 1233 ↛ 1236line 1233 didn't jump to line 1236, because the condition on line 1233 was never true
1234 expression, "filterable", True
1235 ):
1236 raise NotSupportedError(
1237 expression.__class__.__name__ + " is disallowed in the filter "
1238 "clause."
1239 )
1240 if hasattr(expression, "get_source_expressions"):
1241 for expr in expression.get_source_expressions(): 1241 ↛ 1242line 1241 didn't jump to line 1242, because the loop on line 1241 never started
1242 self.check_filterable(expr)
1244 def build_lookup(self, lookups, lhs, rhs):
1245 """
1246 Try to extract transforms and lookup from given lhs.
1248 The lhs value is something that works like SQLExpression.
1249 The rhs value is what the lookup is going to compare against.
1250 The lookups is a list of names to extract using get_lookup()
1251 and get_transform().
1252 """
1253 # __exact is the default lookup if one isn't given.
1254 *transforms, lookup_name = lookups or ["exact"]
1255 for name in transforms: 1255 ↛ 1256line 1255 didn't jump to line 1256, because the loop on line 1255 never started
1256 lhs = self.try_transform(lhs, name)
1257 # First try get_lookup() so that the lookup takes precedence if the lhs
1258 # supports both transform and lookup for the name.
1259 lookup_class = lhs.get_lookup(lookup_name)
1260 if not lookup_class:
1261 if lhs.field.is_relation: 1261 ↛ 1262line 1261 didn't jump to line 1262, because the condition on line 1261 was never true
1262 raise FieldError(
1263 "Related Field got invalid lookup: {}".format(lookup_name)
1264 )
1265 # A lookup wasn't found. Try to interpret the name as a transform
1266 # and do an Exact lookup against it.
1267 lhs = self.try_transform(lhs, lookup_name)
1268 lookup_name = "exact"
1269 lookup_class = lhs.get_lookup(lookup_name)
1270 if not lookup_class: 1270 ↛ 1271line 1270 didn't jump to line 1271, because the condition on line 1270 was never true
1271 return
1273 lookup = lookup_class(lhs, rhs)
1274 # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
1275 # uses of None as a query value unless the lookup supports it.
1276 if lookup.rhs is None and not lookup.can_use_none_as_rhs:
1277 if lookup_name not in ("exact", "iexact"): 1277 ↛ 1278line 1277 didn't jump to line 1278, because the condition on line 1277 was never true
1278 raise ValueError("Cannot use None as a query value")
1279 return lhs.get_lookup("isnull")(lhs, True)
1281 # For Oracle '' is equivalent to null. The check must be done at this
1282 # stage because join promotion can't be done in the compiler. Using
1283 # DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here.
1284 # A similar thing is done in is_nullable(), too.
1285 if ( 1285 ↛ 1290line 1285 didn't jump to line 1290
1286 lookup_name == "exact"
1287 and lookup.rhs == ""
1288 and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
1289 ):
1290 return lhs.get_lookup("isnull")(lhs, True)
1292 return lookup
1294 def try_transform(self, lhs, name):
1295 """
1296 Helper method for build_lookup(). Try to fetch and initialize
1297 a transform for name parameter from lhs.
1298 """
1299 transform_class = lhs.get_transform(name)
1300 if transform_class: 1300 ↛ 1303line 1300 didn't jump to line 1303, because the condition on line 1300 was never false
1301 return transform_class(lhs)
1302 else:
1303 output_field = lhs.output_field.__class__
1304 suggested_lookups = difflib.get_close_matches(
1305 name, output_field.get_lookups()
1306 )
1307 if suggested_lookups:
1308 suggestion = ", perhaps you meant %s?" % " or ".join(suggested_lookups)
1309 else:
1310 suggestion = "."
1311 raise FieldError(
1312 "Unsupported lookup '%s' for %s or join on the field not "
1313 "permitted%s" % (name, output_field.__name__, suggestion)
1314 )
1316 def build_filter(
1317 self,
1318 filter_expr,
1319 branch_negated=False,
1320 current_negated=False,
1321 can_reuse=None,
1322 allow_joins=True,
1323 split_subq=True,
1324 reuse_with_filtered_relation=False,
1325 check_filterable=True,
1326 ):
1327 """
1328 Build a WhereNode for a single filter clause but don't add it
1329 to this Query. Query.add_q() will then add this filter to the where
1330 Node.
1332 The 'branch_negated' tells us if the current branch contains any
1333 negations. This will be used to determine if subqueries are needed.
1335 The 'current_negated' is used to determine if the current filter is
1336 negated or not and this will be used to determine if IS NULL filtering
1337 is needed.
1339 The difference between current_negated and branch_negated is that
1340 branch_negated is set on first negation, but current_negated is
1341 flipped for each negation.
1343 Note that add_filter will not do any negating itself, that is done
1344 upper in the code by add_q().
1346 The 'can_reuse' is a set of reusable joins for multijoins.
1348 If 'reuse_with_filtered_relation' is True, then only joins in can_reuse
1349 will be reused.
1351 The method will create a filter clause that can be added to the current
1352 query. However, if the filter isn't added to the query then the caller
1353 is responsible for unreffing the joins used.
1354 """
1355 if isinstance(filter_expr, dict): 1355 ↛ 1356line 1355 didn't jump to line 1356, because the condition on line 1355 was never true
1356 raise FieldError("Cannot parse keyword query as dict")
1357 if isinstance(filter_expr, Q):
1358 return self._add_q(
1359 filter_expr,
1360 branch_negated=branch_negated,
1361 current_negated=current_negated,
1362 used_aliases=can_reuse,
1363 allow_joins=allow_joins,
1364 split_subq=split_subq,
1365 check_filterable=check_filterable,
1366 )
1367 if hasattr(filter_expr, "resolve_expression"): 1367 ↛ 1368line 1367 didn't jump to line 1368, because the condition on line 1367 was never true
1368 if not getattr(filter_expr, "conditional", False):
1369 raise TypeError("Cannot filter against a non-conditional expression.")
1370 condition = filter_expr.resolve_expression(self, allow_joins=allow_joins)
1371 if not isinstance(condition, Lookup):
1372 condition = self.build_lookup(["exact"], condition, True)
1373 return WhereNode([condition], connector=AND), []
1374 arg, value = filter_expr
1375 if not arg: 1375 ↛ 1376line 1375 didn't jump to line 1376, because the condition on line 1375 was never true
1376 raise FieldError("Cannot parse keyword query %r" % arg)
1377 lookups, parts, reffed_expression = self.solve_lookup_type(arg)
1379 if check_filterable: 1379 ↛ 1382line 1379 didn't jump to line 1382, because the condition on line 1379 was never false
1380 self.check_filterable(reffed_expression)
1382 if not allow_joins and len(parts) > 1: 1382 ↛ 1383line 1382 didn't jump to line 1383, because the condition on line 1382 was never true
1383 raise FieldError("Joined field references are not permitted in this query")
1385 pre_joins = self.alias_refcount.copy()
1386 value = self.resolve_lookup_value(value, can_reuse, allow_joins)
1387 used_joins = {
1388 k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)
1389 }
1391 if check_filterable: 1391 ↛ 1394line 1391 didn't jump to line 1394, because the condition on line 1391 was never false
1392 self.check_filterable(value)
1394 if reffed_expression: 1394 ↛ 1395line 1394 didn't jump to line 1395, because the condition on line 1394 was never true
1395 condition = self.build_lookup(lookups, reffed_expression, value)
1396 return WhereNode([condition], connector=AND), []
1398 opts = self.get_meta()
1399 alias = self.get_initial_alias()
1400 allow_many = not branch_negated or not split_subq
1402 try:
1403 join_info = self.setup_joins(
1404 parts,
1405 opts,
1406 alias,
1407 can_reuse=can_reuse,
1408 allow_many=allow_many,
1409 reuse_with_filtered_relation=reuse_with_filtered_relation,
1410 )
1412 # Prevent iterator from being consumed by check_related_objects()
1413 if isinstance(value, Iterator): 1413 ↛ 1414line 1413 didn't jump to line 1414, because the condition on line 1413 was never true
1414 value = list(value)
1415 self.check_related_objects(join_info.final_field, value, join_info.opts)
1417 # split_exclude() needs to know which joins were generated for the
1418 # lookup parts
1419 self._lookup_joins = join_info.joins
1420 except MultiJoin as e:
1421 return self.split_exclude(filter_expr, can_reuse, e.names_with_path)
1423 # Update used_joins before trimming since they are reused to determine
1424 # which joins could be later promoted to INNER.
1425 used_joins.update(join_info.joins)
1426 targets, alias, join_list = self.trim_joins(
1427 join_info.targets, join_info.joins, join_info.path
1428 )
1429 if can_reuse is not None: 1429 ↛ 1432line 1429 didn't jump to line 1432, because the condition on line 1429 was never false
1430 can_reuse.update(join_list)
1432 if join_info.final_field.is_relation:
1433 # No support for transforms for relational fields
1434 num_lookups = len(lookups)
1435 if num_lookups > 1: 1435 ↛ 1436line 1435 didn't jump to line 1436, because the condition on line 1435 was never true
1436 raise FieldError(
1437 "Related Field got invalid lookup: {}".format(lookups[0])
1438 )
1439 if len(targets) == 1: 1439 ↛ 1442line 1439 didn't jump to line 1442, because the condition on line 1439 was never false
1440 col = self._get_col(targets[0], join_info.final_field, alias)
1441 else:
1442 col = MultiColSource(
1443 alias, targets, join_info.targets, join_info.final_field
1444 )
1445 else:
1446 col = self._get_col(targets[0], join_info.final_field, alias)
1448 condition = self.build_lookup(lookups, col, value)
1449 lookup_type = condition.lookup_name
1450 clause = WhereNode([condition], connector=AND)
1452 require_outer = (
1453 lookup_type == "isnull" and condition.rhs is True and not current_negated
1454 )
1455 if (
1456 current_negated
1457 and (lookup_type != "isnull" or condition.rhs is False)
1458 and condition.rhs is not None
1459 ):
1460 require_outer = True
1461 if lookup_type != "isnull": 1461 ↛ 1483line 1461 didn't jump to line 1483, because the condition on line 1461 was never false
1462 # The condition added here will be SQL like this:
1463 # NOT (col IS NOT NULL), where the first NOT is added in
1464 # upper layers of code. The reason for addition is that if col
1465 # is null, then col != someval will result in SQL "unknown"
1466 # which isn't the same as in Python. The Python None handling
1467 # is wanted, and it can be gotten by
1468 # (col IS NULL OR col != someval)
1469 # <=>
1470 # NOT (col IS NOT NULL AND col = someval).
1471 if (
1472 self.is_nullable(targets[0])
1473 or self.alias_map[join_list[-1]].join_type == LOUTER
1474 ):
1475 lookup_class = targets[0].get_lookup("isnull")
1476 col = self._get_col(targets[0], join_info.targets[0], alias)
1477 clause.add(lookup_class(col, False), AND)
1478 # If someval is a nullable column, someval IS NOT NULL is
1479 # added.
1480 if isinstance(value, Col) and self.is_nullable(value.target): 1480 ↛ 1481line 1480 didn't jump to line 1481, because the condition on line 1480 was never true
1481 lookup_class = value.target.get_lookup("isnull")
1482 clause.add(lookup_class(value, False), AND)
1483 return clause, used_joins if not require_outer else ()
1485 def add_filter(self, filter_lhs, filter_rhs):
1486 self.add_q(Q((filter_lhs, filter_rhs)))
1488 def add_q(self, q_object):
1489 """
1490 A preprocessor for the internal _add_q(). Responsible for doing final
1491 join promotion.
1492 """
1493 # For join promotion this case is doing an AND for the added q_object
1494 # and existing conditions. So, any existing inner join forces the join
1495 # type to remain inner. Existing outer joins can however be demoted.
1496 # (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
1497 # rel_a doesn't produce any rows, then the whole condition must fail.
1498 # So, demotion is OK.
1499 existing_inner = {
1500 a for a in self.alias_map if self.alias_map[a].join_type == INNER
1501 }
1502 clause, _ = self._add_q(q_object, self.used_aliases)
1503 if clause:
1504 self.where.add(clause, AND)
1505 self.demote_joins(existing_inner)
1507 def build_where(self, filter_expr):
1508 return self.build_filter(filter_expr, allow_joins=False)[0]
1510 def clear_where(self):
1511 self.where = WhereNode()
1513 def _add_q(
1514 self,
1515 q_object,
1516 used_aliases,
1517 branch_negated=False,
1518 current_negated=False,
1519 allow_joins=True,
1520 split_subq=True,
1521 check_filterable=True,
1522 ):
1523 """Add a Q-object to the current filter."""
1524 connector = q_object.connector
1525 current_negated = current_negated ^ q_object.negated
1526 branch_negated = branch_negated or q_object.negated
1527 target_clause = WhereNode(connector=connector, negated=q_object.negated)
1528 joinpromoter = JoinPromoter(
1529 q_object.connector, len(q_object.children), current_negated
1530 )
1531 for child in q_object.children:
1532 child_clause, needed_inner = self.build_filter(
1533 child,
1534 can_reuse=used_aliases,
1535 branch_negated=branch_negated,
1536 current_negated=current_negated,
1537 allow_joins=allow_joins,
1538 split_subq=split_subq,
1539 check_filterable=check_filterable,
1540 )
1541 joinpromoter.add_votes(needed_inner)
1542 if child_clause: 1542 ↛ 1531line 1542 didn't jump to line 1531, because the condition on line 1542 was never false
1543 target_clause.add(child_clause, connector)
1544 needed_inner = joinpromoter.update_join_types(self)
1545 return target_clause, needed_inner
1547 def build_filtered_relation_q(
1548 self, q_object, reuse, branch_negated=False, current_negated=False
1549 ):
1550 """Add a FilteredRelation object to the current filter."""
1551 connector = q_object.connector
1552 current_negated ^= q_object.negated
1553 branch_negated = branch_negated or q_object.negated
1554 target_clause = WhereNode(connector=connector, negated=q_object.negated)
1555 for child in q_object.children:
1556 if isinstance(child, Node):
1557 child_clause = self.build_filtered_relation_q(
1558 child,
1559 reuse=reuse,
1560 branch_negated=branch_negated,
1561 current_negated=current_negated,
1562 )
1563 else:
1564 child_clause, _ = self.build_filter(
1565 child,
1566 can_reuse=reuse,
1567 branch_negated=branch_negated,
1568 current_negated=current_negated,
1569 allow_joins=True,
1570 split_subq=False,
1571 reuse_with_filtered_relation=True,
1572 )
1573 target_clause.add(child_clause, connector)
1574 return target_clause
1576 def add_filtered_relation(self, filtered_relation, alias):
1577 filtered_relation.alias = alias
1578 lookups = dict(get_children_from_q(filtered_relation.condition))
1579 relation_lookup_parts, relation_field_parts, _ = self.solve_lookup_type(
1580 filtered_relation.relation_name
1581 )
1582 if relation_lookup_parts:
1583 raise ValueError(
1584 "FilteredRelation's relation_name cannot contain lookups "
1585 "(got %r)." % filtered_relation.relation_name
1586 )
1587 for lookup in chain(lookups):
1588 lookup_parts, lookup_field_parts, _ = self.solve_lookup_type(lookup)
1589 shift = 2 if not lookup_parts else 1
1590 lookup_field_path = lookup_field_parts[:-shift]
1591 for idx, lookup_field_part in enumerate(lookup_field_path):
1592 if len(relation_field_parts) > idx:
1593 if relation_field_parts[idx] != lookup_field_part:
1594 raise ValueError(
1595 "FilteredRelation's condition doesn't support "
1596 "relations outside the %r (got %r)."
1597 % (filtered_relation.relation_name, lookup)
1598 )
1599 else:
1600 raise ValueError(
1601 "FilteredRelation's condition doesn't support nested "
1602 "relations deeper than the relation_name (got %r for "
1603 "%r)." % (lookup, filtered_relation.relation_name)
1604 )
1605 self._filtered_relations[filtered_relation.alias] = filtered_relation
1607 def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
1608 """
1609 Walk the list of names and turns them into PathInfo tuples. A single
1610 name in 'names' can generate multiple PathInfos (m2m, for example).
1612 'names' is the path of names to travel, 'opts' is the model Options we
1613 start the name resolving from, 'allow_many' is as for setup_joins().
1614 If fail_on_missing is set to True, then a name that can't be resolved
1615 will generate a FieldError.
1617 Return a list of PathInfo tuples. In addition return the final field
1618 (the last used join field) and target (which is a field guaranteed to
1619 contain the same value as the final field). Finally, return those names
1620 that weren't found (which are likely transforms and the final lookup).
1621 """
1622 path, names_with_path = [], []
1623 for pos, name in enumerate(names):
1624 cur_names_with_path = (name, [])
1625 if name == "pk":
1626 name = opts.pk.name
1628 field = None
1629 filtered_relation = None
1630 try:
1631 field = opts.get_field(name)
1632 except FieldDoesNotExist:
1633 if name in self.annotation_select: 1633 ↛ 1634line 1633 didn't jump to line 1634, because the condition on line 1633 was never true
1634 field = self.annotation_select[name].output_field
1635 elif name in self._filtered_relations and pos == 0: 1635 ↛ 1636line 1635 didn't jump to line 1636, because the condition on line 1635 was never true
1636 filtered_relation = self._filtered_relations[name]
1637 if LOOKUP_SEP in filtered_relation.relation_name:
1638 parts = filtered_relation.relation_name.split(LOOKUP_SEP)
1639 filtered_relation_path, field, _, _ = self.names_to_path(
1640 parts,
1641 opts,
1642 allow_many,
1643 fail_on_missing,
1644 )
1645 path.extend(filtered_relation_path[:-1])
1646 else:
1647 field = opts.get_field(filtered_relation.relation_name)
1648 if field is not None:
1649 # Fields that contain one-to-many relations with a generic
1650 # model (like a GenericForeignKey) cannot generate reverse
1651 # relations and therefore cannot be used for reverse querying.
1652 if field.is_relation and not field.related_model: 1652 ↛ 1653line 1652 didn't jump to line 1653, because the condition on line 1652 was never true
1653 raise FieldError(
1654 "Field %r does not generate an automatic reverse "
1655 "relation and therefore cannot be used for reverse "
1656 "querying. If it is a GenericForeignKey, consider "
1657 "adding a GenericRelation." % name
1658 )
1659 try:
1660 model = field.model._meta.concrete_model
1661 except AttributeError:
1662 # QuerySet.annotate() may introduce fields that aren't
1663 # attached to a model.
1664 model = None
1665 else:
1666 # We didn't find the current field, so move position back
1667 # one step.
1668 pos -= 1
1669 if pos == -1 or fail_on_missing: 1669 ↛ 1670line 1669 didn't jump to line 1670, because the condition on line 1669 was never true
1670 available = sorted(
1671 [
1672 *get_field_names_from_opts(opts),
1673 *self.annotation_select,
1674 *self._filtered_relations,
1675 ]
1676 )
1677 raise FieldError(
1678 "Cannot resolve keyword '%s' into field. "
1679 "Choices are: %s" % (name, ", ".join(available))
1680 )
1681 break
1682 # Check if we need any joins for concrete inheritance cases (the
1683 # field lives in parent, but we are currently in one of its
1684 # children)
1685 if model is not opts.model:
1686 path_to_parent = opts.get_path_to_parent(model)
1687 if path_to_parent:
1688 path.extend(path_to_parent)
1689 cur_names_with_path[1].extend(path_to_parent)
1690 opts = path_to_parent[-1].to_opts
1691 if hasattr(field, "get_path_info"):
1692 pathinfos = field.get_path_info(filtered_relation)
1693 if not allow_many:
1694 for inner_pos, p in enumerate(pathinfos):
1695 if p.m2m: 1695 ↛ 1696line 1695 didn't jump to line 1696, because the condition on line 1695 was never true
1696 cur_names_with_path[1].extend(pathinfos[0 : inner_pos + 1])
1697 names_with_path.append(cur_names_with_path)
1698 raise MultiJoin(pos + 1, names_with_path)
1699 last = pathinfos[-1]
1700 path.extend(pathinfos)
1701 final_field = last.join_field
1702 opts = last.to_opts
1703 targets = last.target_fields
1704 cur_names_with_path[1].extend(pathinfos)
1705 names_with_path.append(cur_names_with_path)
1706 else:
1707 # Local non-relational field.
1708 final_field = field
1709 targets = (field,)
1710 if fail_on_missing and pos + 1 != len(names): 1710 ↛ 1711line 1710 didn't jump to line 1711, because the condition on line 1710 was never true
1711 raise FieldError(
1712 "Cannot resolve keyword %r into field. Join on '%s'"
1713 " not permitted." % (names[pos + 1], name)
1714 )
1715 break
1716 return path, final_field, targets, names[pos + 1 :]
1718 def setup_joins(
1719 self,
1720 names,
1721 opts,
1722 alias,
1723 can_reuse=None,
1724 allow_many=True,
1725 reuse_with_filtered_relation=False,
1726 ):
1727 """
1728 Compute the necessary table joins for the passage through the fields
1729 given in 'names'. 'opts' is the Options class for the current model
1730 (which gives the table we are starting from), 'alias' is the alias for
1731 the table to start the joining from.
1733 The 'can_reuse' defines the reverse foreign key joins we can reuse. It
1734 can be None in which case all joins are reusable or a set of aliases
1735 that can be reused. Note that non-reverse foreign keys are always
1736 reusable when using setup_joins().
1738 The 'reuse_with_filtered_relation' can be used to force 'can_reuse'
1739 parameter and force the relation on the given connections.
1741 If 'allow_many' is False, then any reverse foreign key seen will
1742 generate a MultiJoin exception.
1744 Return the final field involved in the joins, the target field (used
1745 for any 'where' constraint), the final 'opts' value, the joins, the
1746 field path traveled to generate the joins, and a transform function
1747 that takes a field and alias and is equivalent to `field.get_col(alias)`
1748 in the simple case but wraps field transforms if they were included in
1749 names.
1751 The target field is the field containing the concrete value. Final
1752 field can be something different, for example foreign key pointing to
1753 that value. Final field is needed for example in some value
1754 conversions (convert 'obj' in fk__id=obj to pk val using the foreign
1755 key field for example).
1756 """
1757 joins = [alias]
1758 # The transform can't be applied yet, as joins must be trimmed later.
1759 # To avoid making every caller of this method look up transforms
1760 # directly, compute transforms here and create a partial that converts
1761 # fields to the appropriate wrapped version.
1763 def final_transformer(field, alias):
1764 if not self.alias_cols: 1764 ↛ 1765line 1764 didn't jump to line 1765, because the condition on line 1764 was never true
1765 alias = None
1766 return field.get_col(alias)
1768 # Try resolving all the names as fields first. If there's an error,
1769 # treat trailing names as lookups until a field can be resolved.
1770 last_field_exception = None
1771 for pivot in range(len(names), 0, -1): 1771 ↛ 1791line 1771 didn't jump to line 1791, because the loop on line 1771 didn't complete
1772 try:
1773 path, final_field, targets, rest = self.names_to_path(
1774 names[:pivot],
1775 opts,
1776 allow_many,
1777 fail_on_missing=True,
1778 )
1779 except FieldError as exc:
1780 if pivot == 1:
1781 # The first item cannot be a lookup, so it's safe
1782 # to raise the field error here.
1783 raise
1784 else:
1785 last_field_exception = exc
1786 else:
1787 # The transforms are the remaining items that couldn't be
1788 # resolved into fields.
1789 transforms = names[pivot:]
1790 break
1791 for name in transforms: 1791 ↛ 1793line 1791 didn't jump to line 1793, because the loop on line 1791 never started
1793 def transform(field, alias, *, name, previous):
1794 try:
1795 wrapped = previous(field, alias)
1796 return self.try_transform(wrapped, name)
1797 except FieldError:
1798 # FieldError is raised if the transform doesn't exist.
1799 if isinstance(final_field, Field) and last_field_exception:
1800 raise last_field_exception
1801 else:
1802 raise
1804 final_transformer = functools.partial(
1805 transform, name=name, previous=final_transformer
1806 )
1807 # Then, add the path to the query's joins. Note that we can't trim
1808 # joins at this stage - we will need the information about join type
1809 # of the trimmed joins.
1810 for join in path:
1811 if join.filtered_relation: 1811 ↛ 1812line 1811 didn't jump to line 1812, because the condition on line 1811 was never true
1812 filtered_relation = join.filtered_relation.clone()
1813 table_alias = filtered_relation.alias
1814 else:
1815 filtered_relation = None
1816 table_alias = None
1817 opts = join.to_opts
1818 if join.direct:
1819 nullable = self.is_nullable(join.join_field)
1820 else:
1821 nullable = True
1822 connection = Join(
1823 opts.db_table,
1824 alias,
1825 table_alias,
1826 INNER,
1827 join.join_field,
1828 nullable,
1829 filtered_relation=filtered_relation,
1830 )
1831 reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None
1832 alias = self.join(
1833 connection,
1834 reuse=reuse,
1835 reuse_with_filtered_relation=reuse_with_filtered_relation,
1836 )
1837 joins.append(alias)
1838 if filtered_relation: 1838 ↛ 1839line 1838 didn't jump to line 1839, because the condition on line 1838 was never true
1839 filtered_relation.path = joins[:]
1840 return JoinInfo(final_field, targets, opts, joins, path, final_transformer)
1842 def trim_joins(self, targets, joins, path):
1843 """
1844 The 'target' parameter is the final field being joined to, 'joins'
1845 is the full list of join aliases. The 'path' contain the PathInfos
1846 used to create the joins.
1848 Return the final target field and table alias and the new active
1849 joins.
1851 Always trim any direct join if the target column is already in the
1852 previous table. Can't trim reverse joins as it's unknown if there's
1853 anything on the other side of the join.
1854 """
1855 joins = joins[:]
1856 for pos, info in enumerate(reversed(path)):
1857 if len(joins) == 1 or not info.direct:
1858 break
1859 if info.filtered_relation: 1859 ↛ 1860line 1859 didn't jump to line 1860, because the condition on line 1859 was never true
1860 break
1861 join_targets = {t.column for t in info.join_field.foreign_related_fields}
1862 cur_targets = {t.column for t in targets}
1863 if not cur_targets.issubset(join_targets):
1864 break
1865 targets_dict = {
1866 r[1].column: r[0]
1867 for r in info.join_field.related_fields
1868 if r[1].column in cur_targets
1869 }
1870 targets = tuple(targets_dict[t.column] for t in targets)
1871 self.unref_alias(joins.pop())
1872 return targets, joins[-1], joins
1874 @classmethod
1875 def _gen_cols(cls, exprs, include_external=False):
1876 for expr in exprs:
1877 if isinstance(expr, Col):
1878 yield expr
1879 elif include_external and callable(
1880 getattr(expr, "get_external_cols", None)
1881 ):
1882 yield from expr.get_external_cols()
1883 elif hasattr(expr, "get_source_expressions"):
1884 yield from cls._gen_cols(
1885 expr.get_source_expressions(),
1886 include_external=include_external,
1887 )
1889 @classmethod
1890 def _gen_col_aliases(cls, exprs):
1891 yield from (expr.alias for expr in cls._gen_cols(exprs))
1893 def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False):
1894 annotation = self.annotations.get(name)
1895 if annotation is not None: 1895 ↛ 1896line 1895 didn't jump to line 1896, because the condition on line 1895 was never true
1896 if not allow_joins:
1897 for alias in self._gen_col_aliases([annotation]):
1898 if isinstance(self.alias_map[alias], Join):
1899 raise FieldError(
1900 "Joined field references are not permitted in this query"
1901 )
1902 if summarize:
1903 # Summarize currently means we are doing an aggregate() query
1904 # which is executed as a wrapped subquery if any of the
1905 # aggregate() elements reference an existing annotation. In
1906 # that case we need to return a Ref to the subquery's annotation.
1907 if name not in self.annotation_select:
1908 raise FieldError(
1909 "Cannot aggregate over the '%s' alias. Use annotate() "
1910 "to promote it." % name
1911 )
1912 return Ref(name, self.annotation_select[name])
1913 else:
1914 return annotation
1915 else:
1916 field_list = name.split(LOOKUP_SEP)
1917 annotation = self.annotations.get(field_list[0])
1918 if annotation is not None: 1918 ↛ 1919line 1918 didn't jump to line 1919, because the condition on line 1918 was never true
1919 for transform in field_list[1:]:
1920 annotation = self.try_transform(annotation, transform)
1921 return annotation
1922 join_info = self.setup_joins(
1923 field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse
1924 )
1925 targets, final_alias, join_list = self.trim_joins(
1926 join_info.targets, join_info.joins, join_info.path
1927 )
1928 if not allow_joins and len(join_list) > 1: 1928 ↛ 1929line 1928 didn't jump to line 1929, because the condition on line 1928 was never true
1929 raise FieldError(
1930 "Joined field references are not permitted in this query"
1931 )
1932 if len(targets) > 1: 1932 ↛ 1933line 1932 didn't jump to line 1933, because the condition on line 1932 was never true
1933 raise FieldError(
1934 "Referencing multicolumn fields with F() objects isn't supported"
1935 )
1936 # Verify that the last lookup in name is a field or a transform:
1937 # transform_function() raises FieldError if not.
1938 transform = join_info.transform_function(targets[0], final_alias)
1939 if reuse is not None: 1939 ↛ 1940line 1939 didn't jump to line 1940, because the condition on line 1939 was never true
1940 reuse.update(join_list)
1941 return transform
1943 def split_exclude(self, filter_expr, can_reuse, names_with_path):
1944 """
1945 When doing an exclude against any kind of N-to-many relation, we need
1946 to use a subquery. This method constructs the nested query, given the
1947 original exclude filter (filter_expr) and the portion up to the first
1948 N-to-many relation field.
1950 For example, if the origin filter is ~Q(child__name='foo'), filter_expr
1951 is ('child__name', 'foo') and can_reuse is a set of joins usable for
1952 filters in the original query.
1954 We will turn this into equivalent of:
1955 WHERE NOT EXISTS(
1956 SELECT 1
1957 FROM child
1958 WHERE name = 'foo' AND child.parent_id = parent.id
1959 LIMIT 1
1960 )
1961 """
1962 # Generate the inner query.
1963 query = Query(self.model)
1964 query._filtered_relations = self._filtered_relations
1965 filter_lhs, filter_rhs = filter_expr
1966 if isinstance(filter_rhs, OuterRef):
1967 filter_rhs = OuterRef(filter_rhs)
1968 elif isinstance(filter_rhs, F):
1969 filter_rhs = OuterRef(filter_rhs.name)
1970 query.add_filter(filter_lhs, filter_rhs)
1971 query.clear_ordering(force=True)
1972 # Try to have as simple as possible subquery -> trim leading joins from
1973 # the subquery.
1974 trimmed_prefix, contains_louter = query.trim_start(names_with_path)
1976 col = query.select[0]
1977 select_field = col.target
1978 alias = col.alias
1979 if alias in can_reuse:
1980 pk = select_field.model._meta.pk
1981 # Need to add a restriction so that outer query's filters are in effect for
1982 # the subquery, too.
1983 query.bump_prefix(self)
1984 lookup_class = select_field.get_lookup("exact")
1985 # Note that the query.select[0].alias is different from alias
1986 # due to bump_prefix above.
1987 lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias))
1988 query.where.add(lookup, AND)
1989 query.external_aliases[alias] = True
1991 lookup_class = select_field.get_lookup("exact")
1992 lookup = lookup_class(col, ResolvedOuterRef(trimmed_prefix))
1993 query.where.add(lookup, AND)
1994 condition, needed_inner = self.build_filter(Exists(query))
1996 if contains_louter:
1997 or_null_condition, _ = self.build_filter(
1998 ("%s__isnull" % trimmed_prefix, True),
1999 current_negated=True,
2000 branch_negated=True,
2001 can_reuse=can_reuse,
2002 )
2003 condition.add(or_null_condition, OR)
2004 # Note that the end result will be:
2005 # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
2006 # This might look crazy but due to how IN works, this seems to be
2007 # correct. If the IS NOT NULL check is removed then outercol NOT
2008 # IN will return UNKNOWN. If the IS NULL check is removed, then if
2009 # outercol IS NULL we will not match the row.
2010 return condition, needed_inner
2012 def set_empty(self):
2013 self.where.add(NothingNode(), AND)
2014 for query in self.combined_queries: 2014 ↛ 2015line 2014 didn't jump to line 2015, because the loop on line 2014 never started
2015 query.set_empty()
2017 def is_empty(self):
2018 return any(isinstance(c, NothingNode) for c in self.where.children)
2020 def set_limits(self, low=None, high=None):
2021 """
2022 Adjust the limits on the rows retrieved. Use low/high to set these,
2023 as it makes it more Pythonic to read and write. When the SQL query is
2024 created, convert them to the appropriate offset and limit values.
2026 Apply any limits passed in here to the existing constraints. Add low
2027 to the current low value and clamp both to any existing high value.
2028 """
2029 if high is not None: 2029 ↛ 2034line 2029 didn't jump to line 2034, because the condition on line 2029 was never false
2030 if self.high_mark is not None: 2030 ↛ 2031line 2030 didn't jump to line 2031, because the condition on line 2030 was never true
2031 self.high_mark = min(self.high_mark, self.low_mark + high)
2032 else:
2033 self.high_mark = self.low_mark + high
2034 if low is not None:
2035 if self.high_mark is not None: 2035 ↛ 2038line 2035 didn't jump to line 2038, because the condition on line 2035 was never false
2036 self.low_mark = min(self.high_mark, self.low_mark + low)
2037 else:
2038 self.low_mark = self.low_mark + low
2040 if self.low_mark == self.high_mark: 2040 ↛ 2041line 2040 didn't jump to line 2041, because the condition on line 2040 was never true
2041 self.set_empty()
2043 def clear_limits(self):
2044 """Clear any existing limits."""
2045 self.low_mark, self.high_mark = 0, None
2047 @property
2048 def is_sliced(self):
2049 return self.low_mark != 0 or self.high_mark is not None
2051 def has_limit_one(self):
2052 return self.high_mark is not None and (self.high_mark - self.low_mark) == 1
2054 def can_filter(self):
2055 """
2056 Return True if adding filters to this instance is still possible.
2058 Typically, this means no limits or offsets have been put on the results.
2059 """
2060 return not self.is_sliced
2062 def clear_select_clause(self):
2063 """Remove all fields from SELECT clause."""
2064 self.select = ()
2065 self.default_cols = False
2066 self.select_related = False
2067 self.set_extra_mask(())
2068 self.set_annotation_mask(())
2070 def clear_select_fields(self):
2071 """
2072 Clear the list of fields to select (but not extra_select columns).
2073 Some queryset types completely replace any existing list of select
2074 columns.
2075 """
2076 self.select = ()
2077 self.values_select = ()
2079 def add_select_col(self, col, name):
2080 self.select += (col,)
2081 self.values_select += (name,)
2083 def set_select(self, cols):
2084 self.default_cols = False
2085 self.select = tuple(cols)
2087 def add_distinct_fields(self, *field_names):
2088 """
2089 Add and resolve the given fields to the query's "distinct on" clause.
2090 """
2091 self.distinct_fields = field_names
2092 self.distinct = True
2094 def add_fields(self, field_names, allow_m2m=True):
2095 """
2096 Add the given (model) fields to the select set. Add the field names in
2097 the order specified.
2098 """
2099 alias = self.get_initial_alias()
2100 opts = self.get_meta()
2102 try:
2103 cols = []
2104 for name in field_names:
2105 # Join promotion note - we must not remove any rows here, so
2106 # if there is no existing joins, use outer join.
2107 join_info = self.setup_joins(
2108 name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m
2109 )
2110 targets, final_alias, joins = self.trim_joins(
2111 join_info.targets,
2112 join_info.joins,
2113 join_info.path,
2114 )
2115 for target in targets:
2116 cols.append(join_info.transform_function(target, final_alias))
2117 if cols: 2117 ↛ exitline 2117 didn't return from function 'add_fields', because the condition on line 2117 was never false
2118 self.set_select(cols)
2119 except MultiJoin:
2120 raise FieldError("Invalid field name: '%s'" % name)
2121 except FieldError:
2122 if LOOKUP_SEP in name:
2123 # For lookups spanning over relationships, show the error
2124 # from the model on which the lookup failed.
2125 raise
2126 elif name in self.annotations:
2127 raise FieldError(
2128 "Cannot select the '%s' alias. Use annotate() to promote "
2129 "it." % name
2130 )
2131 else:
2132 names = sorted(
2133 [
2134 *get_field_names_from_opts(opts),
2135 *self.extra,
2136 *self.annotation_select,
2137 *self._filtered_relations,
2138 ]
2139 )
2140 raise FieldError(
2141 "Cannot resolve keyword %r into field. "
2142 "Choices are: %s" % (name, ", ".join(names))
2143 )
2145 def add_ordering(self, *ordering):
2146 """
2147 Add items from the 'ordering' sequence to the query's "order by"
2148 clause. These items are either field names (not column names) --
2149 possibly with a direction prefix ('-' or '?') -- or OrderBy
2150 expressions.
2152 If 'ordering' is empty, clear all ordering from the query.
2153 """
2154 errors = []
2155 for item in ordering:
2156 if isinstance(item, str): 2156 ↛ 2168line 2156 didn't jump to line 2168, because the condition on line 2156 was never false
2157 if item == "?": 2157 ↛ 2158line 2157 didn't jump to line 2158, because the condition on line 2157 was never true
2158 continue
2159 if item.startswith("-"):
2160 item = item[1:]
2161 if item in self.annotations: 2161 ↛ 2162line 2161 didn't jump to line 2162, because the condition on line 2161 was never true
2162 continue
2163 if self.extra and item in self.extra: 2163 ↛ 2164line 2163 didn't jump to line 2164, because the condition on line 2163 was never true
2164 continue
2165 # names_to_path() validates the lookup. A descriptive
2166 # FieldError will be raise if it's not.
2167 self.names_to_path(item.split(LOOKUP_SEP), self.model._meta)
2168 elif not hasattr(item, "resolve_expression"):
2169 errors.append(item)
2170 if getattr(item, "contains_aggregate", False): 2170 ↛ 2171line 2170 didn't jump to line 2171, because the condition on line 2170 was never true
2171 raise FieldError(
2172 "Using an aggregate in order_by() without also including "
2173 "it in annotate() is not allowed: %s" % item
2174 )
2175 if errors: 2175 ↛ 2176line 2175 didn't jump to line 2176, because the condition on line 2175 was never true
2176 raise FieldError("Invalid order_by arguments: %s" % errors)
2177 if ordering:
2178 self.order_by += ordering
2179 else:
2180 self.default_ordering = False
2182 def clear_ordering(self, force=False, clear_default=True):
2183 """
2184 Remove any ordering settings if the current query allows it without
2185 side effects, set 'force' to True to clear the ordering regardless.
2186 If 'clear_default' is True, there will be no ordering in the resulting
2187 query (not even the model's default).
2188 """
2189 if not force and ( 2189 ↛ 2192line 2189 didn't jump to line 2192, because the condition on line 2189 was never true
2190 self.is_sliced or self.distinct_fields or self.select_for_update
2191 ):
2192 return
2193 self.order_by = ()
2194 self.extra_order_by = ()
2195 if clear_default:
2196 self.default_ordering = False
2198 def set_group_by(self, allow_aliases=True):
2199 """
2200 Expand the GROUP BY clause required by the query.
2202 This will usually be the set of all non-aggregate fields in the
2203 return data. If the database backend supports grouping by the
2204 primary key, and the query would be equivalent, the optimization
2205 will be made automatically.
2206 """
2207 # Column names from JOINs to check collisions with aliases.
2208 if allow_aliases:
2209 column_names = set()
2210 seen_models = set()
2211 for join in list(self.alias_map.values())[1:]: # Skip base table.
2212 model = join.join_field.related_model
2213 if model not in seen_models:
2214 column_names.update(
2215 {field.column for field in model._meta.local_concrete_fields}
2216 )
2217 seen_models.add(model)
2219 group_by = list(self.select)
2220 if self.annotation_select:
2221 for alias, annotation in self.annotation_select.items():
2222 if not allow_aliases or alias in column_names:
2223 alias = None
2224 group_by_cols = annotation.get_group_by_cols(alias=alias)
2225 group_by.extend(group_by_cols)
2226 self.group_by = tuple(group_by)
2228 def add_select_related(self, fields):
2229 """
2230 Set up the select_related data structure so that we only select
2231 certain related models (as opposed to all models, when
2232 self.select_related=True).
2233 """
2234 if isinstance(self.select_related, bool):
2235 field_dict = {}
2236 else:
2237 field_dict = self.select_related
2238 for field in fields:
2239 d = field_dict
2240 for part in field.split(LOOKUP_SEP):
2241 d = d.setdefault(part, {})
2242 self.select_related = field_dict
2244 def add_extra(self, select, select_params, where, params, tables, order_by):
2245 """
2246 Add data to the various extra_* attributes for user-created additions
2247 to the query.
2248 """
2249 if select: 2249 ↛ 2270line 2249 didn't jump to line 2270, because the condition on line 2249 was never false
2250 # We need to pair any placeholder markers in the 'select'
2251 # dictionary with their parameters in 'select_params' so that
2252 # subsequent updates to the select dictionary also adjust the
2253 # parameters appropriately.
2254 select_pairs = {}
2255 if select_params: 2255 ↛ 2256line 2255 didn't jump to line 2256, because the condition on line 2255 was never true
2256 param_iter = iter(select_params)
2257 else:
2258 param_iter = iter([])
2259 for name, entry in select.items():
2260 self.check_alias(name)
2261 entry = str(entry)
2262 entry_params = []
2263 pos = entry.find("%s")
2264 while pos != -1: 2264 ↛ 2265line 2264 didn't jump to line 2265, because the condition on line 2264 was never true
2265 if pos == 0 or entry[pos - 1] != "%":
2266 entry_params.append(next(param_iter))
2267 pos = entry.find("%s", pos + 2)
2268 select_pairs[name] = (entry, entry_params)
2269 self.extra.update(select_pairs)
2270 if where or params: 2270 ↛ 2271line 2270 didn't jump to line 2271, because the condition on line 2270 was never true
2271 self.where.add(ExtraWhere(where, params), AND)
2272 if tables: 2272 ↛ 2273line 2272 didn't jump to line 2273, because the condition on line 2272 was never true
2273 self.extra_tables += tuple(tables)
2274 if order_by: 2274 ↛ 2275line 2274 didn't jump to line 2275, because the condition on line 2274 was never true
2275 self.extra_order_by = order_by
2277 def clear_deferred_loading(self):
2278 """Remove any fields from the deferred loading set."""
2279 self.deferred_loading = (frozenset(), True)
2281 def add_deferred_loading(self, field_names):
2282 """
2283 Add the given list of model field names to the set of fields to
2284 exclude from loading from the database when automatic column selection
2285 is done. Add the new field names to any existing field names that
2286 are deferred (or removed from any existing field names that are marked
2287 as the only ones for immediate loading).
2288 """
2289 # Fields on related models are stored in the literal double-underscore
2290 # format, so that we can use a set datastructure. We do the foo__bar
2291 # splitting and handling when computing the SQL column names (as part of
2292 # get_columns()).
2293 existing, defer = self.deferred_loading
2294 if defer:
2295 # Add to existing deferred names.
2296 self.deferred_loading = existing.union(field_names), True
2297 else:
2298 # Remove names from the set of any existing "immediate load" names.
2299 if new_existing := existing.difference(field_names):
2300 self.deferred_loading = new_existing, False
2301 else:
2302 self.clear_deferred_loading()
2303 if new_only := set(field_names).difference(existing):
2304 self.deferred_loading = new_only, True
2306 def add_immediate_loading(self, field_names):
2307 """
2308 Add the given list of model field names to the set of fields to
2309 retrieve when the SQL is executed ("immediate loading" fields). The
2310 field names replace any existing immediate loading field names. If
2311 there are field names already specified for deferred loading, remove
2312 those names from the new field_names before storing the new names
2313 for immediate loading. (That is, immediate loading overrides any
2314 existing immediate values, but respects existing deferrals.)
2315 """
2316 existing, defer = self.deferred_loading
2317 field_names = set(field_names)
2318 if "pk" in field_names: 2318 ↛ 2319line 2318 didn't jump to line 2319, because the condition on line 2318 was never true
2319 field_names.remove("pk")
2320 field_names.add(self.get_meta().pk.name)
2322 if defer: 2322 ↛ 2328line 2322 didn't jump to line 2328, because the condition on line 2322 was never false
2323 # Remove any existing deferred names from the current set before
2324 # setting the new names.
2325 self.deferred_loading = field_names.difference(existing), False
2326 else:
2327 # Replace any existing "immediate load" field names.
2328 self.deferred_loading = frozenset(field_names), False
2330 def get_loaded_field_names(self):
2331 """
2332 If any fields are marked to be deferred, return a dictionary mapping
2333 models to a set of names in those fields that will be loaded. If a
2334 model is not in the returned dictionary, none of its fields are
2335 deferred.
2337 If no fields are marked for deferral, return an empty dictionary.
2338 """
2339 # We cache this because we call this function multiple times
2340 # (compiler.fill_related_selections, query.iterator)
2341 try:
2342 return self._loaded_field_names_cache
2343 except AttributeError:
2344 collection = {}
2345 self.deferred_to_data(collection, self.get_loaded_field_names_cb)
2346 self._loaded_field_names_cache = collection
2347 return collection
2349 def get_loaded_field_names_cb(self, target, model, fields):
2350 """Callback used by get_deferred_field_names()."""
2351 target[model] = {f.attname for f in fields}
2353 def set_annotation_mask(self, names):
2354 """Set the mask of annotations that will be returned by the SELECT."""
2355 if names is None: 2355 ↛ 2356line 2355 didn't jump to line 2356, because the condition on line 2355 was never true
2356 self.annotation_select_mask = None
2357 else:
2358 self.annotation_select_mask = set(names)
2359 self._annotation_select_cache = None
2361 def append_annotation_mask(self, names):
2362 if self.annotation_select_mask is not None: 2362 ↛ 2363line 2362 didn't jump to line 2363, because the condition on line 2362 was never true
2363 self.set_annotation_mask(self.annotation_select_mask.union(names))
2365 def set_extra_mask(self, names):
2366 """
2367 Set the mask of extra select items that will be returned by SELECT.
2368 Don't remove them from the Query since they might be used later.
2369 """
2370 if names is None: 2370 ↛ 2371line 2370 didn't jump to line 2371, because the condition on line 2370 was never true
2371 self.extra_select_mask = None
2372 else:
2373 self.extra_select_mask = set(names)
2374 self._extra_select_cache = None
2376 def set_values(self, fields):
2377 self.select_related = False
2378 self.clear_deferred_loading()
2379 self.clear_select_fields()
2381 if fields: 2381 ↛ 2402line 2381 didn't jump to line 2402, because the condition on line 2381 was never false
2382 field_names = []
2383 extra_names = []
2384 annotation_names = []
2385 if not self.extra and not self.annotations: 2385 ↛ 2390line 2385 didn't jump to line 2390, because the condition on line 2385 was never false
2386 # Shortcut - if there are no extra or annotations, then
2387 # the values() clause must be just field names.
2388 field_names = list(fields)
2389 else:
2390 self.default_cols = False
2391 for f in fields:
2392 if f in self.extra_select:
2393 extra_names.append(f)
2394 elif f in self.annotation_select:
2395 annotation_names.append(f)
2396 else:
2397 field_names.append(f)
2398 self.set_extra_mask(extra_names)
2399 self.set_annotation_mask(annotation_names)
2400 selected = frozenset(field_names + extra_names + annotation_names)
2401 else:
2402 field_names = [f.attname for f in self.model._meta.concrete_fields]
2403 selected = frozenset(field_names)
2404 # Selected annotations must be known before setting the GROUP BY
2405 # clause.
2406 if self.group_by is True: 2406 ↛ 2407line 2406 didn't jump to line 2407, because the condition on line 2406 was never true
2407 self.add_fields(
2408 (f.attname for f in self.model._meta.concrete_fields), False
2409 )
2410 # Disable GROUP BY aliases to avoid orphaning references to the
2411 # SELECT clause which is about to be cleared.
2412 self.set_group_by(allow_aliases=False)
2413 self.clear_select_fields()
2414 elif self.group_by: 2414 ↛ 2417line 2414 didn't jump to line 2417, because the condition on line 2414 was never true
2415 # Resolve GROUP BY annotation references if they are not part of
2416 # the selected fields anymore.
2417 group_by = []
2418 for expr in self.group_by:
2419 if isinstance(expr, Ref) and expr.refs not in selected:
2420 expr = self.annotations[expr.refs]
2421 group_by.append(expr)
2422 self.group_by = tuple(group_by)
2424 self.values_select = tuple(field_names)
2425 self.add_fields(field_names, True)
2427 @property
2428 def annotation_select(self):
2429 """
2430 Return the dictionary of aggregate columns that are not masked and
2431 should be used in the SELECT clause. Cache this result for performance.
2432 """
2433 if self._annotation_select_cache is not None: 2433 ↛ 2434line 2433 didn't jump to line 2434, because the condition on line 2433 was never true
2434 return self._annotation_select_cache
2435 elif not self.annotations:
2436 return {}
2437 elif self.annotation_select_mask is not None: 2437 ↛ 2438line 2437 didn't jump to line 2438, because the condition on line 2437 was never true
2438 self._annotation_select_cache = {
2439 k: v
2440 for k, v in self.annotations.items()
2441 if k in self.annotation_select_mask
2442 }
2443 return self._annotation_select_cache
2444 else:
2445 return self.annotations
2447 @property
2448 def extra_select(self):
2449 if self._extra_select_cache is not None:
2450 return self._extra_select_cache
2451 if not self.extra:
2452 return {}
2453 elif self.extra_select_mask is not None: 2453 ↛ 2459line 2453 didn't jump to line 2459, because the condition on line 2453 was never false
2454 self._extra_select_cache = {
2455 k: v for k, v in self.extra.items() if k in self.extra_select_mask
2456 }
2457 return self._extra_select_cache
2458 else:
2459 return self.extra
2461 def trim_start(self, names_with_path):
2462 """
2463 Trim joins from the start of the join path. The candidates for trim
2464 are the PathInfos in names_with_path structure that are m2m joins.
2466 Also set the select column so the start matches the join.
2468 This method is meant to be used for generating the subquery joins &
2469 cols in split_exclude().
2471 Return a lookup usable for doing outerq.filter(lookup=self) and a
2472 boolean indicating if the joins in the prefix contain a LEFT OUTER join.
2473 _"""
2474 all_paths = []
2475 for _, paths in names_with_path:
2476 all_paths.extend(paths)
2477 contains_louter = False
2478 # Trim and operate only on tables that were generated for
2479 # the lookup part of the query. That is, avoid trimming
2480 # joins generated for F() expressions.
2481 lookup_tables = [
2482 t for t in self.alias_map if t in self._lookup_joins or t == self.base_table
2483 ]
2484 for trimmed_paths, path in enumerate(all_paths):
2485 if path.m2m:
2486 break
2487 if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER:
2488 contains_louter = True
2489 alias = lookup_tables[trimmed_paths]
2490 self.unref_alias(alias)
2491 # The path.join_field is a Rel, lets get the other side's field
2492 join_field = path.join_field.field
2493 # Build the filter prefix.
2494 paths_in_prefix = trimmed_paths
2495 trimmed_prefix = []
2496 for name, path in names_with_path:
2497 if paths_in_prefix - len(path) < 0:
2498 break
2499 trimmed_prefix.append(name)
2500 paths_in_prefix -= len(path)
2501 trimmed_prefix.append(join_field.foreign_related_fields[0].name)
2502 trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
2503 # Lets still see if we can trim the first join from the inner query
2504 # (that is, self). We can't do this for:
2505 # - LEFT JOINs because we would miss those rows that have nothing on
2506 # the outer side,
2507 # - INNER JOINs from filtered relations because we would miss their
2508 # filters.
2509 first_join = self.alias_map[lookup_tables[trimmed_paths + 1]]
2510 if first_join.join_type != LOUTER and not first_join.filtered_relation:
2511 select_fields = [r[0] for r in join_field.related_fields]
2512 select_alias = lookup_tables[trimmed_paths + 1]
2513 self.unref_alias(lookup_tables[trimmed_paths])
2514 extra_restriction = join_field.get_extra_restriction(
2515 None, lookup_tables[trimmed_paths + 1]
2516 )
2517 if extra_restriction:
2518 self.where.add(extra_restriction, AND)
2519 else:
2520 # TODO: It might be possible to trim more joins from the start of the
2521 # inner query if it happens to have a longer join chain containing the
2522 # values in select_fields. Lets punt this one for now.
2523 select_fields = [r[1] for r in join_field.related_fields]
2524 select_alias = lookup_tables[trimmed_paths]
2525 # The found starting point is likely a Join instead of a BaseTable reference.
2526 # But the first entry in the query's FROM clause must not be a JOIN.
2527 for table in self.alias_map:
2528 if self.alias_refcount[table] > 0:
2529 self.alias_map[table] = BaseTable(
2530 self.alias_map[table].table_name, table
2531 )
2532 break
2533 self.set_select([f.get_col(select_alias) for f in select_fields])
2534 return trimmed_prefix, contains_louter
2536 def is_nullable(self, field):
2537 """
2538 Check if the given field should be treated as nullable.
2540 Some backends treat '' as null and Django treats such fields as
2541 nullable for those backends. In such situations field.null can be
2542 False even if we should treat the field as nullable.
2543 """
2544 # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
2545 # (nor should it have) knowledge of which connection is going to be
2546 # used. The proper fix would be to defer all decisions where
2547 # is_nullable() is needed to the compiler stage, but that is not easy
2548 # to do currently.
2549 return (
2550 connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
2551 and field.empty_strings_allowed
2552 ) or field.null
2555def get_order_dir(field, default="ASC"):
2556 """
2557 Return the field name and direction for an order specification. For
2558 example, '-foo' is returned as ('foo', 'DESC').
2560 The 'default' param is used to indicate which way no prefix (or a '+'
2561 prefix) should sort. The '-' prefix always sorts the opposite way.
2562 """
2563 dirn = ORDER_DIR[default]
2564 if field[0] == "-":
2565 return field[1:], dirn[1]
2566 return field, dirn[0]
2569def add_to_dict(data, key, value):
2570 """
2571 Add "value" to the set of values for "key", whether or not "key" already
2572 exists.
2573 """
2574 if key in data:
2575 data[key].add(value)
2576 else:
2577 data[key] = {value}
2580def is_reverse_o2o(field):
2581 """
2582 Check if the given field is reverse-o2o. The field is expected to be some
2583 sort of relation field or related object.
2584 """
2585 return field.is_relation and field.one_to_one and not field.concrete
2588class JoinPromoter:
2589 """
2590 A class to abstract away join promotion problems for complex filter
2591 conditions.
2592 """
2594 def __init__(self, connector, num_children, negated):
2595 self.connector = connector
2596 self.negated = negated
2597 if self.negated:
2598 if connector == AND: 2598 ↛ 2601line 2598 didn't jump to line 2601, because the condition on line 2598 was never false
2599 self.effective_connector = OR
2600 else:
2601 self.effective_connector = AND
2602 else:
2603 self.effective_connector = self.connector
2604 self.num_children = num_children
2605 # Maps of table alias to how many times it is seen as required for
2606 # inner and/or outer joins.
2607 self.votes = Counter()
2609 def add_votes(self, votes):
2610 """
2611 Add single vote per item to self.votes. Parameter can be any
2612 iterable.
2613 """
2614 self.votes.update(votes)
2616 def update_join_types(self, query):
2617 """
2618 Change join types so that the generated query is as efficient as
2619 possible, but still correct. So, change as many joins as possible
2620 to INNER, but don't make OUTER joins INNER if that could remove
2621 results from the query.
2622 """
2623 to_promote = set()
2624 to_demote = set()
2625 # The effective_connector is used so that NOT (a AND b) is treated
2626 # similarly to (a OR b) for join promotion.
2627 for table, votes in self.votes.items():
2628 # We must use outer joins in OR case when the join isn't contained
2629 # in all of the joins. Otherwise the INNER JOIN itself could remove
2630 # valid results. Consider the case where a model with rel_a and
2631 # rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
2632 # if rel_a join doesn't produce any results is null (for example
2633 # reverse foreign key or null value in direct foreign key), and
2634 # there is a matching row in rel_b with col=2, then an INNER join
2635 # to rel_a would remove a valid match from the query. So, we need
2636 # to promote any existing INNER to LOUTER (it is possible this
2637 # promotion in turn will be demoted later on).
2638 if self.effective_connector == "OR" and votes < self.num_children: 2638 ↛ 2639line 2638 didn't jump to line 2639, because the condition on line 2638 was never true
2639 to_promote.add(table)
2640 # If connector is AND and there is a filter that can match only
2641 # when there is a joinable row, then use INNER. For example, in
2642 # rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
2643 # as join output, then the col=1 or col=2 can't match (as
2644 # NULL=anything is always false).
2645 # For the OR case, if all children voted for a join to be inner,
2646 # then we can use INNER for the join. For example:
2647 # (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
2648 # then if rel_a doesn't produce any rows, the whole condition
2649 # can't match. Hence we can safely use INNER join.
2650 if self.effective_connector == "AND" or ( 2650 ↛ 2627line 2650 didn't jump to line 2627, because the condition on line 2650 was never false
2651 self.effective_connector == "OR" and votes == self.num_children
2652 ):
2653 to_demote.add(table)
2654 # Finally, what happens in cases where we have:
2655 # (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
2656 # Now, we first generate the OR clause, and promote joins for it
2657 # in the first if branch above. Both rel_a and rel_b are promoted
2658 # to LOUTER joins. After that we do the AND case. The OR case
2659 # voted no inner joins but the rel_a__col__gte=0 votes inner join
2660 # for rel_a. We demote it back to INNER join (in AND case a single
2661 # vote is enough). The demotion is OK, if rel_a doesn't produce
2662 # rows, then the rel_a__col__gte=0 clause can't be true, and thus
2663 # the whole clause must be false. So, it is safe to use INNER
2664 # join.
2665 # Note that in this example we could just as well have the __gte
2666 # clause and the OR clause swapped. Or we could replace the __gte
2667 # clause with an OR clause containing rel_a__col=1|rel_a__col=2,
2668 # and again we could safely demote to INNER.
2669 query.promote_joins(to_promote)
2670 query.demote_joins(to_demote)
2671 return to_demote