Coverage for /var/srv/projects/api.amasfac.comuna18.com/tmp/venv/lib/python3.9/site-packages/django/db/models/query.py: 38%
1141 statements
« prev ^ index » next coverage.py v6.4.4, created at 2023-07-17 14:22 -0600
« prev ^ index » next coverage.py v6.4.4, created at 2023-07-17 14:22 -0600
1"""
2The main QuerySet implementation. This provides the public API for the ORM.
3"""
5import copy
6import operator
7import warnings
8from itertools import chain
10import django
11from django.conf import settings
12from django.core import exceptions
13from django.db import (
14 DJANGO_VERSION_PICKLE_KEY,
15 IntegrityError,
16 NotSupportedError,
17 connections,
18 router,
19 transaction,
20)
21from django.db.models import AutoField, DateField, DateTimeField, sql
22from django.db.models.constants import LOOKUP_SEP
23from django.db.models.deletion import Collector
24from django.db.models.expressions import Case, Expression, F, Ref, Value, When
25from django.db.models.functions import Cast, Trunc
26from django.db.models.query_utils import FilteredRelation, Q
27from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE
28from django.db.models.utils import create_namedtuple_class, resolve_callables
29from django.utils import timezone
30from django.utils.functional import cached_property, partition
32# The maximum number of results to fetch in a get() query.
33MAX_GET_RESULTS = 21
35# The maximum number of items to display in a QuerySet.__repr__
36REPR_OUTPUT_SIZE = 20
39class BaseIterable:
40 def __init__(
41 self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE
42 ):
43 self.queryset = queryset
44 self.chunked_fetch = chunked_fetch
45 self.chunk_size = chunk_size
48class ModelIterable(BaseIterable):
49 """Iterable that yields a model instance for each row."""
51 def __iter__(self):
52 queryset = self.queryset
53 db = queryset.db
54 compiler = queryset.query.get_compiler(using=db)
55 # Execute the query. This will also fill compiler.select, klass_info,
56 # and annotations.
57 results = compiler.execute_sql(
58 chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
59 )
60 select, klass_info, annotation_col_map = (
61 compiler.select,
62 compiler.klass_info,
63 compiler.annotation_col_map,
64 )
65 model_cls = klass_info["model"]
66 select_fields = klass_info["select_fields"]
67 model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
68 init_list = [
69 f[0].target.attname for f in select[model_fields_start:model_fields_end]
70 ]
71 related_populators = get_related_populators(klass_info, select, db)
72 known_related_objects = [
73 (
74 field,
75 related_objs,
76 operator.attrgetter(
77 *[
78 field.attname
79 if from_field == "self"
80 else queryset.model._meta.get_field(from_field).attname
81 for from_field in field.from_fields
82 ]
83 ),
84 )
85 for field, related_objs in queryset._known_related_objects.items()
86 ]
87 for row in compiler.results_iter(results):
88 obj = model_cls.from_db(
89 db, init_list, row[model_fields_start:model_fields_end]
90 )
91 for rel_populator in related_populators: 91 ↛ 92line 91 didn't jump to line 92, because the loop on line 91 never started
92 rel_populator.populate(row, obj)
93 if annotation_col_map: 93 ↛ 94line 93 didn't jump to line 94, because the condition on line 93 was never true
94 for attr_name, col_pos in annotation_col_map.items():
95 setattr(obj, attr_name, row[col_pos])
97 # Add the known related objects to the model.
98 for field, rel_objs, rel_getter in known_related_objects:
99 # Avoid overwriting objects loaded by, e.g., select_related().
100 if field.is_cached(obj): 100 ↛ 101line 100 didn't jump to line 101, because the condition on line 100 was never true
101 continue
102 rel_obj_id = rel_getter(obj)
103 try:
104 rel_obj = rel_objs[rel_obj_id]
105 except KeyError:
106 pass # May happen in qs1 | qs2 scenarios.
107 else:
108 setattr(obj, field.name, rel_obj)
110 yield obj
113class ValuesIterable(BaseIterable):
114 """
115 Iterable returned by QuerySet.values() that yields a dict for each row.
116 """
118 def __iter__(self):
119 queryset = self.queryset
120 query = queryset.query
121 compiler = query.get_compiler(queryset.db)
123 # extra(select=...) cols are always at the start of the row.
124 names = [
125 *query.extra_select,
126 *query.values_select,
127 *query.annotation_select,
128 ]
129 indexes = range(len(names))
130 for row in compiler.results_iter(
131 chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
132 ):
133 yield {names[i]: row[i] for i in indexes}
136class ValuesListIterable(BaseIterable):
137 """
138 Iterable returned by QuerySet.values_list(flat=False) that yields a tuple
139 for each row.
140 """
142 def __iter__(self):
143 queryset = self.queryset
144 query = queryset.query
145 compiler = query.get_compiler(queryset.db)
147 if queryset._fields: 147 ↛ 168line 147 didn't jump to line 168, because the condition on line 147 was never false
148 # extra(select=...) cols are always at the start of the row.
149 names = [
150 *query.extra_select,
151 *query.values_select,
152 *query.annotation_select,
153 ]
154 fields = [
155 *queryset._fields,
156 *(f for f in query.annotation_select if f not in queryset._fields),
157 ]
158 if fields != names: 158 ↛ 160line 158 didn't jump to line 160, because the condition on line 158 was never true
159 # Reorder according to fields.
160 index_map = {name: idx for idx, name in enumerate(names)}
161 rowfactory = operator.itemgetter(*[index_map[f] for f in fields])
162 return map(
163 rowfactory,
164 compiler.results_iter(
165 chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
166 ),
167 )
168 return compiler.results_iter(
169 tuple_expected=True,
170 chunked_fetch=self.chunked_fetch,
171 chunk_size=self.chunk_size,
172 )
175class NamedValuesListIterable(ValuesListIterable):
176 """
177 Iterable returned by QuerySet.values_list(named=True) that yields a
178 namedtuple for each row.
179 """
181 def __iter__(self):
182 queryset = self.queryset
183 if queryset._fields:
184 names = queryset._fields
185 else:
186 query = queryset.query
187 names = [
188 *query.extra_select,
189 *query.values_select,
190 *query.annotation_select,
191 ]
192 tuple_class = create_namedtuple_class(*names)
193 new = tuple.__new__
194 for row in super().__iter__():
195 yield new(tuple_class, row)
198class FlatValuesListIterable(BaseIterable):
199 """
200 Iterable returned by QuerySet.values_list(flat=True) that yields single
201 values.
202 """
204 def __iter__(self):
205 queryset = self.queryset
206 compiler = queryset.query.get_compiler(queryset.db)
207 for row in compiler.results_iter(
208 chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
209 ):
210 yield row[0]
213class QuerySet:
214 """Represent a lazy database lookup for a set of objects."""
216 def __init__(self, model=None, query=None, using=None, hints=None):
217 self.model = model
218 self._db = using
219 self._hints = hints or {}
220 self._query = query or sql.Query(self.model)
221 self._result_cache = None
222 self._sticky_filter = False
223 self._for_write = False
224 self._prefetch_related_lookups = ()
225 self._prefetch_done = False
226 self._known_related_objects = {} # {rel_field: {pk: rel_obj}}
227 self._iterable_class = ModelIterable
228 self._fields = None
229 self._defer_next_filter = False
230 self._deferred_filter = None
232 @property
233 def query(self):
234 if self._deferred_filter:
235 negate, args, kwargs = self._deferred_filter
236 self._filter_or_exclude_inplace(negate, args, kwargs)
237 self._deferred_filter = None
238 return self._query
240 @query.setter
241 def query(self, value):
242 if value.values_select:
243 self._iterable_class = ValuesIterable
244 self._query = value
246 def as_manager(cls):
247 # Address the circular dependency between `Queryset` and `Manager`.
248 from django.db.models.manager import Manager
250 manager = Manager.from_queryset(cls)()
251 manager._built_with_as_manager = True
252 return manager
254 as_manager.queryset_only = True
255 as_manager = classmethod(as_manager)
257 ########################
258 # PYTHON MAGIC METHODS #
259 ########################
261 def __deepcopy__(self, memo):
262 """Don't populate the QuerySet's cache."""
263 obj = self.__class__()
264 for k, v in self.__dict__.items():
265 if k == "_result_cache":
266 obj.__dict__[k] = None
267 else:
268 obj.__dict__[k] = copy.deepcopy(v, memo)
269 return obj
271 def __getstate__(self):
272 # Force the cache to be fully populated.
273 self._fetch_all()
274 return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: django.__version__}
276 def __setstate__(self, state):
277 pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
278 if pickled_version:
279 if pickled_version != django.__version__:
280 warnings.warn(
281 "Pickled queryset instance's Django version %s does not "
282 "match the current version %s."
283 % (pickled_version, django.__version__),
284 RuntimeWarning,
285 stacklevel=2,
286 )
287 else:
288 warnings.warn(
289 "Pickled queryset instance's Django version is not specified.",
290 RuntimeWarning,
291 stacklevel=2,
292 )
293 self.__dict__.update(state)
295 def __repr__(self):
296 data = list(self[: REPR_OUTPUT_SIZE + 1])
297 if len(data) > REPR_OUTPUT_SIZE:
298 data[-1] = "...(remaining elements truncated)..."
299 return "<%s %r>" % (self.__class__.__name__, data)
301 def __len__(self):
302 self._fetch_all()
303 return len(self._result_cache)
305 def __iter__(self):
306 """
307 The queryset iterator protocol uses three nested iterators in the
308 default case:
309 1. sql.compiler.execute_sql()
310 - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
311 using cursor.fetchmany(). This part is responsible for
312 doing some column masking, and returning the rows in chunks.
313 2. sql.compiler.results_iter()
314 - Returns one row at time. At this point the rows are still just
315 tuples. In some cases the return values are converted to
316 Python values at this location.
317 3. self.iterator()
318 - Responsible for turning the rows into model objects.
319 """
320 self._fetch_all()
321 return iter(self._result_cache)
323 def __bool__(self):
324 self._fetch_all()
325 return bool(self._result_cache)
327 def __getitem__(self, k):
328 """Retrieve an item or slice from the set of results."""
329 if not isinstance(k, (int, slice)): 329 ↛ 330line 329 didn't jump to line 330, because the condition on line 329 was never true
330 raise TypeError(
331 "QuerySet indices must be integers or slices, not %s."
332 % type(k).__name__
333 )
334 if (isinstance(k, int) and k < 0) or ( 334 ↛ 341line 334 didn't jump to line 341, because the condition on line 334 was never true
335 isinstance(k, slice)
336 and (
337 (k.start is not None and k.start < 0)
338 or (k.stop is not None and k.stop < 0)
339 )
340 ):
341 raise ValueError("Negative indexing is not supported.")
343 if self._result_cache is not None: 343 ↛ 344line 343 didn't jump to line 344, because the condition on line 343 was never true
344 return self._result_cache[k]
346 if isinstance(k, slice):
347 qs = self._chain()
348 if k.start is not None:
349 start = int(k.start)
350 else:
351 start = None
352 if k.stop is not None: 352 ↛ 355line 352 didn't jump to line 355, because the condition on line 352 was never false
353 stop = int(k.stop)
354 else:
355 stop = None
356 qs.query.set_limits(start, stop)
357 return list(qs)[:: k.step] if k.step else qs
359 qs = self._chain()
360 qs.query.set_limits(k, k + 1)
361 qs._fetch_all()
362 return qs._result_cache[0]
364 def __class_getitem__(cls, *args, **kwargs):
365 return cls
367 def __and__(self, other):
368 self._merge_sanity_check(other)
369 if isinstance(other, EmptyQuerySet):
370 return other
371 if isinstance(self, EmptyQuerySet):
372 return self
373 combined = self._chain()
374 combined._merge_known_related_objects(other)
375 combined.query.combine(other.query, sql.AND)
376 return combined
378 def __or__(self, other):
379 self._merge_sanity_check(other)
380 if isinstance(self, EmptyQuerySet):
381 return other
382 if isinstance(other, EmptyQuerySet):
383 return self
384 query = (
385 self
386 if self.query.can_filter()
387 else self.model._base_manager.filter(pk__in=self.values("pk"))
388 )
389 combined = query._chain()
390 combined._merge_known_related_objects(other)
391 if not other.query.can_filter():
392 other = other.model._base_manager.filter(pk__in=other.values("pk"))
393 combined.query.combine(other.query, sql.OR)
394 return combined
396 ####################################
397 # METHODS THAT DO DATABASE QUERIES #
398 ####################################
400 def _iterator(self, use_chunked_fetch, chunk_size):
401 yield from self._iterable_class(
402 self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size
403 )
405 def iterator(self, chunk_size=2000):
406 """
407 An iterator over the results from applying this QuerySet to the
408 database.
409 """
410 if chunk_size <= 0:
411 raise ValueError("Chunk size must be strictly positive.")
412 use_chunked_fetch = not connections[self.db].settings_dict.get(
413 "DISABLE_SERVER_SIDE_CURSORS"
414 )
415 return self._iterator(use_chunked_fetch, chunk_size)
417 def aggregate(self, *args, **kwargs):
418 """
419 Return a dictionary containing the calculations (aggregation)
420 over the current queryset.
422 If args is present the expression is passed as a kwarg using
423 the Aggregate object's default alias.
424 """
425 if self.query.distinct_fields: 425 ↛ 426line 425 didn't jump to line 426, because the condition on line 425 was never true
426 raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
427 self._validate_values_are_expressions(
428 (*args, *kwargs.values()), method_name="aggregate"
429 )
430 for arg in args:
431 # The default_alias property raises TypeError if default_alias
432 # can't be set automatically or AttributeError if it isn't an
433 # attribute.
434 try:
435 arg.default_alias
436 except (AttributeError, TypeError):
437 raise TypeError("Complex aggregates require an alias")
438 kwargs[arg.default_alias] = arg
440 query = self.query.chain()
441 for (alias, aggregate_expr) in kwargs.items():
442 query.add_annotation(aggregate_expr, alias, is_summary=True)
443 annotation = query.annotations[alias]
444 if not annotation.contains_aggregate: 444 ↛ 445line 444 didn't jump to line 445, because the condition on line 444 was never true
445 raise TypeError("%s is not an aggregate expression" % alias)
446 for expr in annotation.get_source_expressions():
447 if ( 447 ↛ 452line 447 didn't jump to line 452
448 expr.contains_aggregate
449 and isinstance(expr, Ref)
450 and expr.refs in kwargs
451 ):
452 name = expr.refs
453 raise exceptions.FieldError(
454 "Cannot compute %s('%s'): '%s' is an aggregate"
455 % (annotation.name, name, name)
456 )
457 return query.get_aggregation(self.db, kwargs)
459 def count(self):
460 """
461 Perform a SELECT COUNT() and return the number of records as an
462 integer.
464 If the QuerySet is already fully cached, return the length of the
465 cached results set to avoid multiple SELECT COUNT(*) calls.
466 """
467 if self._result_cache is not None: 467 ↛ 468line 467 didn't jump to line 468, because the condition on line 467 was never true
468 return len(self._result_cache)
470 return self.query.get_count(using=self.db)
472 def get(self, *args, **kwargs):
473 """
474 Perform the query and return a single object matching the given
475 keyword arguments.
476 """
477 if self.query.combinator and (args or kwargs): 477 ↛ 478line 477 didn't jump to line 478, because the condition on line 477 was never true
478 raise NotSupportedError(
479 "Calling QuerySet.get(...) with filters after %s() is not "
480 "supported." % self.query.combinator
481 )
482 clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs)
483 if self.query.can_filter() and not self.query.distinct_fields: 483 ↛ 485line 483 didn't jump to line 485, because the condition on line 483 was never false
484 clone = clone.order_by()
485 limit = None
486 if ( 486 ↛ 492line 486 didn't jump to line 492
487 not clone.query.select_for_update
488 or connections[clone.db].features.supports_select_for_update_with_limit
489 ):
490 limit = MAX_GET_RESULTS
491 clone.query.set_limits(high=limit)
492 num = len(clone)
493 if num == 1:
494 return clone._result_cache[0]
495 if not num: 495 ↛ 499line 495 didn't jump to line 499, because the condition on line 495 was never false
496 raise self.model.DoesNotExist(
497 "%s matching query does not exist." % self.model._meta.object_name
498 )
499 raise self.model.MultipleObjectsReturned(
500 "get() returned more than one %s -- it returned %s!"
501 % (
502 self.model._meta.object_name,
503 num if not limit or num < limit else "more than %s" % (limit - 1),
504 )
505 )
507 def create(self, **kwargs):
508 """
509 Create a new object with the given kwargs, saving it to the database
510 and returning the created object.
511 """
512 obj = self.model(**kwargs)
513 self._for_write = True
514 obj.save(force_insert=True, using=self.db)
515 return obj
517 def _prepare_for_bulk_create(self, objs):
518 for obj in objs:
519 if obj.pk is None: 519 ↛ 522line 519 didn't jump to line 522, because the condition on line 519 was never false
520 # Populate new PK values.
521 obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
522 obj._prepare_related_fields_for_save(operation_name="bulk_create")
524 def bulk_create(self, objs, batch_size=None, ignore_conflicts=False):
525 """
526 Insert each of the instances into the database. Do *not* call
527 save() on each of the instances, do not send any pre/post_save
528 signals, and do not set the primary key attribute if it is an
529 autoincrement field (except if features.can_return_rows_from_bulk_insert=True).
530 Multi-table models are not supported.
531 """
532 # When you bulk insert you don't get the primary keys back (if it's an
533 # autoincrement, except if can_return_rows_from_bulk_insert=True), so
534 # you can't insert into the child tables which references this. There
535 # are two workarounds:
536 # 1) This could be implemented if you didn't have an autoincrement pk
537 # 2) You could do it by doing O(n) normal inserts into the parent
538 # tables to get the primary keys back and then doing a single bulk
539 # insert into the childmost table.
540 # We currently set the primary keys on the objects when using
541 # PostgreSQL via the RETURNING ID clause. It should be possible for
542 # Oracle as well, but the semantics for extracting the primary keys is
543 # trickier so it's not done yet.
544 if batch_size is not None and batch_size <= 0: 544 ↛ 545line 544 didn't jump to line 545, because the condition on line 544 was never true
545 raise ValueError("Batch size must be a positive integer.")
546 # Check that the parents share the same concrete model with the our
547 # model to detect the inheritance pattern ConcreteGrandParent ->
548 # MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
549 # would not identify that case as involving multiple tables.
550 for parent in self.model._meta.get_parent_list(): 550 ↛ 551line 550 didn't jump to line 551, because the loop on line 550 never started
551 if parent._meta.concrete_model is not self.model._meta.concrete_model:
552 raise ValueError("Can't bulk create a multi-table inherited model")
553 if not objs:
554 return objs
555 self._for_write = True
556 connection = connections[self.db]
557 opts = self.model._meta
558 fields = opts.concrete_fields
559 objs = list(objs)
560 self._prepare_for_bulk_create(objs)
561 with transaction.atomic(using=self.db, savepoint=False):
562 objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
563 if objs_with_pk: 563 ↛ 564line 563 didn't jump to line 564, because the condition on line 563 was never true
564 returned_columns = self._batched_insert(
565 objs_with_pk,
566 fields,
567 batch_size,
568 ignore_conflicts=ignore_conflicts,
569 )
570 for obj_with_pk, results in zip(objs_with_pk, returned_columns):
571 for result, field in zip(results, opts.db_returning_fields):
572 if field != opts.pk:
573 setattr(obj_with_pk, field.attname, result)
574 for obj_with_pk in objs_with_pk:
575 obj_with_pk._state.adding = False
576 obj_with_pk._state.db = self.db
577 if objs_without_pk: 577 ↛ 596line 577 didn't jump to line 596, because the condition on line 577 was never false
578 fields = [f for f in fields if not isinstance(f, AutoField)]
579 returned_columns = self._batched_insert(
580 objs_without_pk,
581 fields,
582 batch_size,
583 ignore_conflicts=ignore_conflicts,
584 )
585 if (
586 connection.features.can_return_rows_from_bulk_insert
587 and not ignore_conflicts
588 ):
589 assert len(returned_columns) == len(objs_without_pk)
590 for obj_without_pk, results in zip(objs_without_pk, returned_columns):
591 for result, field in zip(results, opts.db_returning_fields):
592 setattr(obj_without_pk, field.attname, result)
593 obj_without_pk._state.adding = False
594 obj_without_pk._state.db = self.db
596 return objs
598 def bulk_update(self, objs, fields, batch_size=None):
599 """
600 Update the given fields in each of the given objects in the database.
601 """
602 if batch_size is not None and batch_size < 0:
603 raise ValueError("Batch size must be a positive integer.")
604 if not fields:
605 raise ValueError("Field names must be given to bulk_update().")
606 objs = tuple(objs)
607 if any(obj.pk is None for obj in objs):
608 raise ValueError("All bulk_update() objects must have a primary key set.")
609 fields = [self.model._meta.get_field(name) for name in fields]
610 if any(not f.concrete or f.many_to_many for f in fields):
611 raise ValueError("bulk_update() can only be used with concrete fields.")
612 if any(f.primary_key for f in fields):
613 raise ValueError("bulk_update() cannot be used with primary key fields.")
614 if not objs:
615 return 0
616 # PK is used twice in the resulting update query, once in the filter
617 # and once in the WHEN. Each field will also have one CAST.
618 max_batch_size = connections[self.db].ops.bulk_batch_size(
619 ["pk", "pk"] + fields, objs
620 )
621 batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
622 requires_casting = connections[self.db].features.requires_casted_case_in_updates
623 batches = (objs[i : i + batch_size] for i in range(0, len(objs), batch_size))
624 updates = []
625 for batch_objs in batches:
626 update_kwargs = {}
627 for field in fields:
628 when_statements = []
629 for obj in batch_objs:
630 attr = getattr(obj, field.attname)
631 if not isinstance(attr, Expression):
632 attr = Value(attr, output_field=field)
633 when_statements.append(When(pk=obj.pk, then=attr))
634 case_statement = Case(*when_statements, output_field=field)
635 if requires_casting:
636 case_statement = Cast(case_statement, output_field=field)
637 update_kwargs[field.attname] = case_statement
638 updates.append(([obj.pk for obj in batch_objs], update_kwargs))
639 rows_updated = 0
640 with transaction.atomic(using=self.db, savepoint=False):
641 for pks, update_kwargs in updates:
642 rows_updated += self.filter(pk__in=pks).update(**update_kwargs)
643 return rows_updated
645 bulk_update.alters_data = True
647 def get_or_create(self, defaults=None, **kwargs):
648 """
649 Look up an object with the given kwargs, creating one if necessary.
650 Return a tuple of (object, created), where created is a boolean
651 specifying whether an object was created.
652 """
653 # The get() needs to be targeted at the write database in order
654 # to avoid potential transaction consistency problems.
655 self._for_write = True
656 try:
657 return self.get(**kwargs), False
658 except self.model.DoesNotExist:
659 params = self._extract_model_params(defaults, **kwargs)
660 # Try to create an object using passed params.
661 try:
662 with transaction.atomic(using=self.db):
663 params = dict(resolve_callables(params))
664 return self.create(**params), True
665 except IntegrityError:
666 try:
667 return self.get(**kwargs), False
668 except self.model.DoesNotExist:
669 pass
670 raise
672 def update_or_create(self, defaults=None, **kwargs):
673 """
674 Look up an object with the given kwargs, updating one with defaults
675 if it exists, otherwise create a new one.
676 Return a tuple (object, created), where created is a boolean
677 specifying whether an object was created.
678 """
679 defaults = defaults or {}
680 self._for_write = True
681 with transaction.atomic(using=self.db):
682 # Lock the row so that a concurrent update is blocked until
683 # update_or_create() has performed its save.
684 obj, created = self.select_for_update().get_or_create(defaults, **kwargs)
685 if created:
686 return obj, created
687 for k, v in resolve_callables(defaults):
688 setattr(obj, k, v)
689 obj.save(using=self.db)
690 return obj, False
692 def _extract_model_params(self, defaults, **kwargs):
693 """
694 Prepare `params` for creating a model instance based on the given
695 kwargs; for use by get_or_create().
696 """
697 defaults = defaults or {}
698 params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
699 params.update(defaults)
700 property_names = self.model._meta._property_names
701 invalid_params = []
702 for param in params:
703 try:
704 self.model._meta.get_field(param)
705 except exceptions.FieldDoesNotExist:
706 # It's okay to use a model's property if it has a setter.
707 if not (param in property_names and getattr(self.model, param).fset):
708 invalid_params.append(param)
709 if invalid_params: 709 ↛ 710line 709 didn't jump to line 710, because the condition on line 709 was never true
710 raise exceptions.FieldError(
711 "Invalid field name(s) for model %s: '%s'."
712 % (
713 self.model._meta.object_name,
714 "', '".join(sorted(invalid_params)),
715 )
716 )
717 return params
719 def _earliest(self, *fields):
720 """
721 Return the earliest object according to fields (if given) or by the
722 model's Meta.get_latest_by.
723 """
724 if fields:
725 order_by = fields
726 else:
727 order_by = getattr(self.model._meta, "get_latest_by")
728 if order_by and not isinstance(order_by, (tuple, list)):
729 order_by = (order_by,)
730 if order_by is None:
731 raise ValueError(
732 "earliest() and latest() require either fields as positional "
733 "arguments or 'get_latest_by' in the model's Meta."
734 )
735 obj = self._chain()
736 obj.query.set_limits(high=1)
737 obj.query.clear_ordering(force=True)
738 obj.query.add_ordering(*order_by)
739 return obj.get()
741 def earliest(self, *fields):
742 if self.query.is_sliced:
743 raise TypeError("Cannot change a query once a slice has been taken.")
744 return self._earliest(*fields)
746 def latest(self, *fields):
747 if self.query.is_sliced:
748 raise TypeError("Cannot change a query once a slice has been taken.")
749 return self.reverse()._earliest(*fields)
751 def first(self):
752 """Return the first object of a query or None if no match is found."""
753 for obj in (self if self.ordered else self.order_by("pk"))[:1]:
754 return obj
756 def last(self):
757 """Return the last object of a query or None if no match is found."""
758 for obj in (self.reverse() if self.ordered else self.order_by("-pk"))[:1]: 758 ↛ exitline 758 didn't return from function 'last', because the loop on line 758 didn't complete
759 return obj
761 def in_bulk(self, id_list=None, *, field_name="pk"):
762 """
763 Return a dictionary mapping each of the given IDs to the object with
764 that ID. If `id_list` isn't provided, evaluate the entire QuerySet.
765 """
766 if self.query.is_sliced:
767 raise TypeError("Cannot use 'limit' or 'offset' with in_bulk().")
768 opts = self.model._meta
769 unique_fields = [
770 constraint.fields[0]
771 for constraint in opts.total_unique_constraints
772 if len(constraint.fields) == 1
773 ]
774 if (
775 field_name != "pk"
776 and not opts.get_field(field_name).unique
777 and field_name not in unique_fields
778 and self.query.distinct_fields != (field_name,)
779 ):
780 raise ValueError(
781 "in_bulk()'s field_name must be a unique field but %r isn't."
782 % field_name
783 )
784 if id_list is not None:
785 if not id_list:
786 return {}
787 filter_key = "{}__in".format(field_name)
788 batch_size = connections[self.db].features.max_query_params
789 id_list = tuple(id_list)
790 # If the database has a limit on the number of query parameters
791 # (e.g. SQLite), retrieve objects in batches if necessary.
792 if batch_size and batch_size < len(id_list):
793 qs = ()
794 for offset in range(0, len(id_list), batch_size):
795 batch = id_list[offset : offset + batch_size]
796 qs += tuple(self.filter(**{filter_key: batch}).order_by())
797 else:
798 qs = self.filter(**{filter_key: id_list}).order_by()
799 else:
800 qs = self._chain()
801 return {getattr(obj, field_name): obj for obj in qs}
803 def delete(self):
804 """Delete the records in the current QuerySet."""
805 self._not_support_combined_queries("delete")
806 if self.query.is_sliced:
807 raise TypeError("Cannot use 'limit' or 'offset' with delete().")
808 if self.query.distinct or self.query.distinct_fields:
809 raise TypeError("Cannot call delete() after .distinct().")
810 if self._fields is not None:
811 raise TypeError("Cannot call delete() after .values() or .values_list()")
813 del_query = self._chain()
815 # The delete is actually 2 queries - one to find related objects,
816 # and one to delete. Make sure that the discovery of related
817 # objects is performed on the same database as the deletion.
818 del_query._for_write = True
820 # Disable non-supported fields.
821 del_query.query.select_for_update = False
822 del_query.query.select_related = False
823 del_query.query.clear_ordering(force=True)
825 collector = Collector(using=del_query.db)
826 collector.collect(del_query)
827 deleted, _rows_count = collector.delete()
829 # Clear the result cache, in case this QuerySet gets reused.
830 self._result_cache = None
831 return deleted, _rows_count
833 delete.alters_data = True
834 delete.queryset_only = True
836 def _raw_delete(self, using):
837 """
838 Delete objects found from the given queryset in single direct SQL
839 query. No signals are sent and there is no protection for cascades.
840 """
841 query = self.query.clone()
842 query.__class__ = sql.DeleteQuery
843 cursor = query.get_compiler(using).execute_sql(CURSOR)
844 if cursor: 844 ↛ 847line 844 didn't jump to line 847, because the condition on line 844 was never false
845 with cursor:
846 return cursor.rowcount
847 return 0
849 _raw_delete.alters_data = True
851 def update(self, **kwargs):
852 """
853 Update all elements in the current QuerySet, setting all the given
854 fields to the appropriate values.
855 """
856 self._not_support_combined_queries("update")
857 if self.query.is_sliced:
858 raise TypeError("Cannot update a query once a slice has been taken.")
859 self._for_write = True
860 query = self.query.chain(sql.UpdateQuery)
861 query.add_update_values(kwargs)
862 # Clear any annotations so that they won't be present in subqueries.
863 query.annotations = {}
864 with transaction.mark_for_rollback_on_error(using=self.db):
865 rows = query.get_compiler(self.db).execute_sql(CURSOR)
866 self._result_cache = None
867 return rows
869 update.alters_data = True
871 def _update(self, values):
872 """
873 A version of update() that accepts field objects instead of field names.
874 Used primarily for model saving and not intended for use by general
875 code (it requires too much poking around at model internals to be
876 useful at that level).
877 """
878 if self.query.is_sliced: 878 ↛ 879line 878 didn't jump to line 879, because the condition on line 878 was never true
879 raise TypeError("Cannot update a query once a slice has been taken.")
880 query = self.query.chain(sql.UpdateQuery)
881 query.add_update_fields(values)
882 # Clear any annotations so that they won't be present in subqueries.
883 query.annotations = {}
884 self._result_cache = None
885 return query.get_compiler(self.db).execute_sql(CURSOR)
887 _update.alters_data = True
888 _update.queryset_only = False
890 def exists(self):
891 if self._result_cache is None: 891 ↛ 893line 891 didn't jump to line 893, because the condition on line 891 was never false
892 return self.query.has_results(using=self.db)
893 return bool(self._result_cache)
895 def contains(self, obj):
896 """Return True if the queryset contains an object."""
897 self._not_support_combined_queries("contains")
898 if self._fields is not None:
899 raise TypeError(
900 "Cannot call QuerySet.contains() after .values() or .values_list()."
901 )
902 try:
903 if obj._meta.concrete_model != self.model._meta.concrete_model:
904 return False
905 except AttributeError:
906 raise TypeError("'obj' must be a model instance.")
907 if obj.pk is None:
908 raise ValueError("QuerySet.contains() cannot be used on unsaved objects.")
909 if self._result_cache is not None:
910 return obj in self._result_cache
911 return self.filter(pk=obj.pk).exists()
913 def _prefetch_related_objects(self):
914 # This method can only be called once the result cache has been filled.
915 prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
916 self._prefetch_done = True
918 def explain(self, *, format=None, **options):
919 return self.query.explain(using=self.db, format=format, **options)
921 ##################################################
922 # PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
923 ##################################################
925 def raw(self, raw_query, params=(), translations=None, using=None):
926 if using is None:
927 using = self.db
928 qs = RawQuerySet(
929 raw_query,
930 model=self.model,
931 params=params,
932 translations=translations,
933 using=using,
934 )
935 qs._prefetch_related_lookups = self._prefetch_related_lookups[:]
936 return qs
938 def _values(self, *fields, **expressions):
939 clone = self._chain()
940 if expressions: 940 ↛ 941line 940 didn't jump to line 941, because the condition on line 940 was never true
941 clone = clone.annotate(**expressions)
942 clone._fields = fields
943 clone.query.set_values(fields)
944 return clone
946 def values(self, *fields, **expressions):
947 fields += tuple(expressions)
948 clone = self._values(*fields, **expressions)
949 clone._iterable_class = ValuesIterable
950 return clone
952 def values_list(self, *fields, flat=False, named=False):
953 if flat and named: 953 ↛ 954line 953 didn't jump to line 954, because the condition on line 953 was never true
954 raise TypeError("'flat' and 'named' can't be used together.")
955 if flat and len(fields) > 1: 955 ↛ 956line 955 didn't jump to line 956, because the condition on line 955 was never true
956 raise TypeError(
957 "'flat' is not valid when values_list is called with more than one "
958 "field."
959 )
961 field_names = {f for f in fields if not hasattr(f, "resolve_expression")}
962 _fields = []
963 expressions = {}
964 counter = 1
965 for field in fields:
966 if hasattr(field, "resolve_expression"): 966 ↛ 967line 966 didn't jump to line 967, because the condition on line 966 was never true
967 field_id_prefix = getattr(
968 field, "default_alias", field.__class__.__name__.lower()
969 )
970 while True:
971 field_id = field_id_prefix + str(counter)
972 counter += 1
973 if field_id not in field_names:
974 break
975 expressions[field_id] = field
976 _fields.append(field_id)
977 else:
978 _fields.append(field)
980 clone = self._values(*_fields, **expressions)
981 clone._iterable_class = (
982 NamedValuesListIterable
983 if named
984 else FlatValuesListIterable
985 if flat
986 else ValuesListIterable
987 )
988 return clone
990 def dates(self, field_name, kind, order="ASC"):
991 """
992 Return a list of date objects representing all available dates for
993 the given field_name, scoped to 'kind'.
994 """
995 if kind not in ("year", "month", "week", "day"):
996 raise ValueError("'kind' must be one of 'year', 'month', 'week', or 'day'.")
997 if order not in ("ASC", "DESC"):
998 raise ValueError("'order' must be either 'ASC' or 'DESC'.")
999 return (
1000 self.annotate(
1001 datefield=Trunc(field_name, kind, output_field=DateField()),
1002 plain_field=F(field_name),
1003 )
1004 .values_list("datefield", flat=True)
1005 .distinct()
1006 .filter(plain_field__isnull=False)
1007 .order_by(("-" if order == "DESC" else "") + "datefield")
1008 )
1010 # RemovedInDjango50Warning: when the deprecation ends, remove is_dst
1011 # argument.
1012 def datetimes(
1013 self, field_name, kind, order="ASC", tzinfo=None, is_dst=timezone.NOT_PASSED
1014 ):
1015 """
1016 Return a list of datetime objects representing all available
1017 datetimes for the given field_name, scoped to 'kind'.
1018 """
1019 if kind not in ("year", "month", "week", "day", "hour", "minute", "second"):
1020 raise ValueError(
1021 "'kind' must be one of 'year', 'month', 'week', 'day', "
1022 "'hour', 'minute', or 'second'."
1023 )
1024 if order not in ("ASC", "DESC"):
1025 raise ValueError("'order' must be either 'ASC' or 'DESC'.")
1026 if settings.USE_TZ:
1027 if tzinfo is None:
1028 tzinfo = timezone.get_current_timezone()
1029 else:
1030 tzinfo = None
1031 return (
1032 self.annotate(
1033 datetimefield=Trunc(
1034 field_name,
1035 kind,
1036 output_field=DateTimeField(),
1037 tzinfo=tzinfo,
1038 is_dst=is_dst,
1039 ),
1040 plain_field=F(field_name),
1041 )
1042 .values_list("datetimefield", flat=True)
1043 .distinct()
1044 .filter(plain_field__isnull=False)
1045 .order_by(("-" if order == "DESC" else "") + "datetimefield")
1046 )
1048 def none(self):
1049 """Return an empty QuerySet."""
1050 clone = self._chain()
1051 clone.query.set_empty()
1052 return clone
1054 ##################################################################
1055 # PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
1056 ##################################################################
1058 def all(self):
1059 """
1060 Return a new QuerySet that is a copy of the current one. This allows a
1061 QuerySet to proxy for a model manager in some cases.
1062 """
1063 return self._chain()
1065 def filter(self, *args, **kwargs):
1066 """
1067 Return a new QuerySet instance with the args ANDed to the existing
1068 set.
1069 """
1070 self._not_support_combined_queries("filter")
1071 return self._filter_or_exclude(False, args, kwargs)
1073 def exclude(self, *args, **kwargs):
1074 """
1075 Return a new QuerySet instance with NOT (args) ANDed to the existing
1076 set.
1077 """
1078 self._not_support_combined_queries("exclude")
1079 return self._filter_or_exclude(True, args, kwargs)
1081 def _filter_or_exclude(self, negate, args, kwargs):
1082 if (args or kwargs) and self.query.is_sliced: 1082 ↛ 1083line 1082 didn't jump to line 1083, because the condition on line 1082 was never true
1083 raise TypeError("Cannot filter a query once a slice has been taken.")
1084 clone = self._chain()
1085 if self._defer_next_filter:
1086 self._defer_next_filter = False
1087 clone._deferred_filter = negate, args, kwargs
1088 else:
1089 clone._filter_or_exclude_inplace(negate, args, kwargs)
1090 return clone
1092 def _filter_or_exclude_inplace(self, negate, args, kwargs):
1093 if negate:
1094 self._query.add_q(~Q(*args, **kwargs))
1095 else:
1096 self._query.add_q(Q(*args, **kwargs))
1098 def complex_filter(self, filter_obj):
1099 """
1100 Return a new QuerySet instance with filter_obj added to the filters.
1102 filter_obj can be a Q object or a dictionary of keyword lookup
1103 arguments.
1105 This exists to support framework features such as 'limit_choices_to',
1106 and usually it will be more natural to use other methods.
1107 """
1108 if isinstance(filter_obj, Q): 1108 ↛ 1109line 1108 didn't jump to line 1109, because the condition on line 1108 was never true
1109 clone = self._chain()
1110 clone.query.add_q(filter_obj)
1111 return clone
1112 else:
1113 return self._filter_or_exclude(False, args=(), kwargs=filter_obj)
1115 def _combinator_query(self, combinator, *other_qs, all=False):
1116 # Clone the query to inherit the select list and everything
1117 clone = self._chain()
1118 # Clear limits and ordering so they can be reapplied
1119 clone.query.clear_ordering(force=True)
1120 clone.query.clear_limits()
1121 clone.query.combined_queries = (self.query,) + tuple(
1122 qs.query for qs in other_qs
1123 )
1124 clone.query.combinator = combinator
1125 clone.query.combinator_all = all
1126 return clone
1128 def union(self, *other_qs, all=False):
1129 # If the query is an EmptyQuerySet, combine all nonempty querysets.
1130 if isinstance(self, EmptyQuerySet):
1131 qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)]
1132 if not qs:
1133 return self
1134 if len(qs) == 1:
1135 return qs[0]
1136 return qs[0]._combinator_query("union", *qs[1:], all=all)
1137 return self._combinator_query("union", *other_qs, all=all)
1139 def intersection(self, *other_qs):
1140 # If any query is an EmptyQuerySet, return it.
1141 if isinstance(self, EmptyQuerySet):
1142 return self
1143 for other in other_qs:
1144 if isinstance(other, EmptyQuerySet):
1145 return other
1146 return self._combinator_query("intersection", *other_qs)
1148 def difference(self, *other_qs):
1149 # If the query is an EmptyQuerySet, return it.
1150 if isinstance(self, EmptyQuerySet):
1151 return self
1152 return self._combinator_query("difference", *other_qs)
1154 def select_for_update(self, nowait=False, skip_locked=False, of=(), no_key=False):
1155 """
1156 Return a new QuerySet instance that will select objects with a
1157 FOR UPDATE lock.
1158 """
1159 if nowait and skip_locked:
1160 raise ValueError("The nowait option cannot be used with skip_locked.")
1161 obj = self._chain()
1162 obj._for_write = True
1163 obj.query.select_for_update = True
1164 obj.query.select_for_update_nowait = nowait
1165 obj.query.select_for_update_skip_locked = skip_locked
1166 obj.query.select_for_update_of = of
1167 obj.query.select_for_no_key_update = no_key
1168 return obj
1170 def select_related(self, *fields):
1171 """
1172 Return a new QuerySet instance that will select related objects.
1174 If fields are specified, they must be ForeignKey fields and only those
1175 related objects are included in the selection.
1177 If select_related(None) is called, clear the list.
1178 """
1179 self._not_support_combined_queries("select_related")
1180 if self._fields is not None:
1181 raise TypeError(
1182 "Cannot call select_related() after .values() or .values_list()"
1183 )
1185 obj = self._chain()
1186 if fields == (None,):
1187 obj.query.select_related = False
1188 elif fields:
1189 obj.query.add_select_related(fields)
1190 else:
1191 obj.query.select_related = True
1192 return obj
1194 def prefetch_related(self, *lookups):
1195 """
1196 Return a new QuerySet instance that will prefetch the specified
1197 Many-To-One and Many-To-Many related objects when the QuerySet is
1198 evaluated.
1200 When prefetch_related() is called more than once, append to the list of
1201 prefetch lookups. If prefetch_related(None) is called, clear the list.
1202 """
1203 self._not_support_combined_queries("prefetch_related")
1204 clone = self._chain()
1205 if lookups == (None,):
1206 clone._prefetch_related_lookups = ()
1207 else:
1208 for lookup in lookups:
1209 if isinstance(lookup, Prefetch):
1210 lookup = lookup.prefetch_to
1211 lookup = lookup.split(LOOKUP_SEP, 1)[0]
1212 if lookup in self.query._filtered_relations:
1213 raise ValueError(
1214 "prefetch_related() is not supported with FilteredRelation."
1215 )
1216 clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
1217 return clone
1219 def annotate(self, *args, **kwargs):
1220 """
1221 Return a query set in which the returned objects have been annotated
1222 with extra data or aggregations.
1223 """
1224 self._not_support_combined_queries("annotate")
1225 return self._annotate(args, kwargs, select=True)
1227 def alias(self, *args, **kwargs):
1228 """
1229 Return a query set with added aliases for extra data or aggregations.
1230 """
1231 self._not_support_combined_queries("alias")
1232 return self._annotate(args, kwargs, select=False)
1234 def _annotate(self, args, kwargs, select=True):
1235 self._validate_values_are_expressions(
1236 args + tuple(kwargs.values()), method_name="annotate"
1237 )
1238 annotations = {}
1239 for arg in args:
1240 # The default_alias property may raise a TypeError.
1241 try:
1242 if arg.default_alias in kwargs:
1243 raise ValueError(
1244 "The named annotation '%s' conflicts with the "
1245 "default name for another annotation." % arg.default_alias
1246 )
1247 except TypeError:
1248 raise TypeError("Complex annotations require an alias")
1249 annotations[arg.default_alias] = arg
1250 annotations.update(kwargs)
1252 clone = self._chain()
1253 names = self._fields
1254 if names is None:
1255 names = set(
1256 chain.from_iterable(
1257 (field.name, field.attname)
1258 if hasattr(field, "attname")
1259 else (field.name,)
1260 for field in self.model._meta.get_fields()
1261 )
1262 )
1264 for alias, annotation in annotations.items():
1265 if alias in names:
1266 raise ValueError(
1267 "The annotation '%s' conflicts with a field on "
1268 "the model." % alias
1269 )
1270 if isinstance(annotation, FilteredRelation):
1271 clone.query.add_filtered_relation(annotation, alias)
1272 else:
1273 clone.query.add_annotation(
1274 annotation,
1275 alias,
1276 is_summary=False,
1277 select=select,
1278 )
1279 for alias, annotation in clone.query.annotations.items():
1280 if alias in annotations and annotation.contains_aggregate:
1281 if clone._fields is None:
1282 clone.query.group_by = True
1283 else:
1284 clone.query.set_group_by()
1285 break
1287 return clone
1289 def order_by(self, *field_names):
1290 """Return a new QuerySet instance with the ordering changed."""
1291 if self.query.is_sliced: 1291 ↛ 1292line 1291 didn't jump to line 1292, because the condition on line 1291 was never true
1292 raise TypeError("Cannot reorder a query once a slice has been taken.")
1293 obj = self._chain()
1294 obj.query.clear_ordering(force=True, clear_default=False)
1295 obj.query.add_ordering(*field_names)
1296 return obj
1298 def distinct(self, *field_names):
1299 """
1300 Return a new QuerySet instance that will select only distinct results.
1301 """
1302 self._not_support_combined_queries("distinct")
1303 if self.query.is_sliced:
1304 raise TypeError(
1305 "Cannot create distinct fields once a slice has been taken."
1306 )
1307 obj = self._chain()
1308 obj.query.add_distinct_fields(*field_names)
1309 return obj
1311 def extra(
1312 self,
1313 select=None,
1314 where=None,
1315 params=None,
1316 tables=None,
1317 order_by=None,
1318 select_params=None,
1319 ):
1320 """Add extra SQL fragments to the query."""
1321 self._not_support_combined_queries("extra")
1322 if self.query.is_sliced:
1323 raise TypeError("Cannot change a query once a slice has been taken.")
1324 clone = self._chain()
1325 clone.query.add_extra(select, select_params, where, params, tables, order_by)
1326 return clone
1328 def reverse(self):
1329 """Reverse the ordering of the QuerySet."""
1330 if self.query.is_sliced: 1330 ↛ 1331line 1330 didn't jump to line 1331, because the condition on line 1330 was never true
1331 raise TypeError("Cannot reverse a query once a slice has been taken.")
1332 clone = self._chain()
1333 clone.query.standard_ordering = not clone.query.standard_ordering
1334 return clone
1336 def defer(self, *fields):
1337 """
1338 Defer the loading of data for certain fields until they are accessed.
1339 Add the set of deferred fields to any existing set of deferred fields.
1340 The only exception to this is if None is passed in as the only
1341 parameter, in which case removal all deferrals.
1342 """
1343 self._not_support_combined_queries("defer")
1344 if self._fields is not None:
1345 raise TypeError("Cannot call defer() after .values() or .values_list()")
1346 clone = self._chain()
1347 if fields == (None,):
1348 clone.query.clear_deferred_loading()
1349 else:
1350 clone.query.add_deferred_loading(fields)
1351 return clone
1353 def only(self, *fields):
1354 """
1355 Essentially, the opposite of defer(). Only the fields passed into this
1356 method and that are not already specified as deferred are loaded
1357 immediately when the queryset is evaluated.
1358 """
1359 self._not_support_combined_queries("only")
1360 if self._fields is not None: 1360 ↛ 1361line 1360 didn't jump to line 1361, because the condition on line 1360 was never true
1361 raise TypeError("Cannot call only() after .values() or .values_list()")
1362 if fields == (None,): 1362 ↛ 1365line 1362 didn't jump to line 1365, because the condition on line 1362 was never true
1363 # Can only pass None to defer(), not only(), as the rest option.
1364 # That won't stop people trying to do this, so let's be explicit.
1365 raise TypeError("Cannot pass None as an argument to only().")
1366 for field in fields:
1367 field = field.split(LOOKUP_SEP, 1)[0]
1368 if field in self.query._filtered_relations: 1368 ↛ 1369line 1368 didn't jump to line 1369, because the condition on line 1368 was never true
1369 raise ValueError("only() is not supported with FilteredRelation.")
1370 clone = self._chain()
1371 clone.query.add_immediate_loading(fields)
1372 return clone
1374 def using(self, alias):
1375 """Select which database this QuerySet should execute against."""
1376 clone = self._chain()
1377 clone._db = alias
1378 return clone
1380 ###################################
1381 # PUBLIC INTROSPECTION ATTRIBUTES #
1382 ###################################
1384 @property
1385 def ordered(self):
1386 """
1387 Return True if the QuerySet is ordered -- i.e. has an order_by()
1388 clause or a default ordering on the model (or is empty).
1389 """
1390 if isinstance(self, EmptyQuerySet): 1390 ↛ 1391line 1390 didn't jump to line 1391, because the condition on line 1390 was never true
1391 return True
1392 if self.query.extra_order_by or self.query.order_by:
1393 return True
1394 elif (
1395 self.query.default_ordering
1396 and self.query.get_meta().ordering
1397 and
1398 # A default ordering doesn't affect GROUP BY queries.
1399 not self.query.group_by
1400 ):
1401 return True
1402 else:
1403 return False
1405 @property
1406 def db(self):
1407 """Return the database used if this query is executed now."""
1408 if self._for_write:
1409 return self._db or router.db_for_write(self.model, **self._hints)
1410 return self._db or router.db_for_read(self.model, **self._hints)
1412 ###################
1413 # PRIVATE METHODS #
1414 ###################
1416 def _insert(
1417 self,
1418 objs,
1419 fields,
1420 returning_fields=None,
1421 raw=False,
1422 using=None,
1423 ignore_conflicts=False,
1424 ):
1425 """
1426 Insert a new record for the given model. This provides an interface to
1427 the InsertQuery class and is how Model.save() is implemented.
1428 """
1429 self._for_write = True
1430 if using is None: 1430 ↛ 1431line 1430 didn't jump to line 1431, because the condition on line 1430 was never true
1431 using = self.db
1432 query = sql.InsertQuery(self.model, ignore_conflicts=ignore_conflicts)
1433 query.insert_values(fields, objs, raw=raw)
1434 return query.get_compiler(using=using).execute_sql(returning_fields)
1436 _insert.alters_data = True
1437 _insert.queryset_only = False
1439 def _batched_insert(self, objs, fields, batch_size, ignore_conflicts=False):
1440 """
1441 Helper method for bulk_create() to insert objs one batch at a time.
1442 """
1443 if ( 1443 ↛ 1447line 1443 didn't jump to line 1447
1444 ignore_conflicts
1445 and not connections[self.db].features.supports_ignore_conflicts
1446 ):
1447 raise NotSupportedError(
1448 "This database backend does not support ignoring conflicts."
1449 )
1450 ops = connections[self.db].ops
1451 max_batch_size = max(ops.bulk_batch_size(fields, objs), 1)
1452 batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
1453 inserted_rows = []
1454 bulk_return = connections[self.db].features.can_return_rows_from_bulk_insert
1455 for item in [objs[i : i + batch_size] for i in range(0, len(objs), batch_size)]:
1456 if bulk_return and not ignore_conflicts:
1457 inserted_rows.extend(
1458 self._insert(
1459 item,
1460 fields=fields,
1461 using=self.db,
1462 returning_fields=self.model._meta.db_returning_fields,
1463 ignore_conflicts=ignore_conflicts,
1464 )
1465 )
1466 else:
1467 self._insert(
1468 item,
1469 fields=fields,
1470 using=self.db,
1471 ignore_conflicts=ignore_conflicts,
1472 )
1473 return inserted_rows
1475 def _chain(self):
1476 """
1477 Return a copy of the current QuerySet that's ready for another
1478 operation.
1479 """
1480 obj = self._clone()
1481 if obj._sticky_filter:
1482 obj.query.filter_is_sticky = True
1483 obj._sticky_filter = False
1484 return obj
1486 def _clone(self):
1487 """
1488 Return a copy of the current QuerySet. A lightweight alternative
1489 to deepcopy().
1490 """
1491 c = self.__class__(
1492 model=self.model,
1493 query=self.query.chain(),
1494 using=self._db,
1495 hints=self._hints,
1496 )
1497 c._sticky_filter = self._sticky_filter
1498 c._for_write = self._for_write
1499 c._prefetch_related_lookups = self._prefetch_related_lookups[:]
1500 c._known_related_objects = self._known_related_objects
1501 c._iterable_class = self._iterable_class
1502 c._fields = self._fields
1503 return c
1505 def _fetch_all(self):
1506 if self._result_cache is None:
1507 self._result_cache = list(self._iterable_class(self))
1508 if self._prefetch_related_lookups and not self._prefetch_done: 1508 ↛ 1509line 1508 didn't jump to line 1509, because the condition on line 1508 was never true
1509 self._prefetch_related_objects()
1511 def _next_is_sticky(self):
1512 """
1513 Indicate that the next filter call and the one following that should
1514 be treated as a single filter. This is only important when it comes to
1515 determining when to reuse tables for many-to-many filters. Required so
1516 that we can filter naturally on the results of related managers.
1518 This doesn't return a clone of the current QuerySet (it returns
1519 "self"). The method is only used internally and should be immediately
1520 followed by a filter() that does create a clone.
1521 """
1522 self._sticky_filter = True
1523 return self
1525 def _merge_sanity_check(self, other):
1526 """Check that two QuerySet classes may be merged."""
1527 if self._fields is not None and (
1528 set(self.query.values_select) != set(other.query.values_select)
1529 or set(self.query.extra_select) != set(other.query.extra_select)
1530 or set(self.query.annotation_select) != set(other.query.annotation_select)
1531 ):
1532 raise TypeError(
1533 "Merging '%s' classes must involve the same values in each case."
1534 % self.__class__.__name__
1535 )
1537 def _merge_known_related_objects(self, other):
1538 """
1539 Keep track of all known related objects from either QuerySet instance.
1540 """
1541 for field, objects in other._known_related_objects.items():
1542 self._known_related_objects.setdefault(field, {}).update(objects)
1544 def resolve_expression(self, *args, **kwargs):
1545 if self._fields and len(self._fields) > 1: 1545 ↛ 1548line 1545 didn't jump to line 1548, because the condition on line 1545 was never true
1546 # values() queryset can only be used as nested queries
1547 # if they are set up to select only a single field.
1548 raise TypeError("Cannot use multi-field values as a filter value.")
1549 query = self.query.resolve_expression(*args, **kwargs)
1550 query._db = self._db
1551 return query
1553 resolve_expression.queryset_only = True
1555 def _add_hints(self, **hints):
1556 """
1557 Update hinting information for use by routers. Add new key/values or
1558 overwrite existing key/values.
1559 """
1560 self._hints.update(hints)
1562 def _has_filters(self):
1563 """
1564 Check if this QuerySet has any filtering going on. This isn't
1565 equivalent with checking if all objects are present in results, for
1566 example, qs[1:]._has_filters() -> False.
1567 """
1568 return self.query.has_filters()
1570 @staticmethod
1571 def _validate_values_are_expressions(values, method_name):
1572 invalid_args = sorted(
1573 str(arg) for arg in values if not hasattr(arg, "resolve_expression")
1574 )
1575 if invalid_args: 1575 ↛ 1576line 1575 didn't jump to line 1576, because the condition on line 1575 was never true
1576 raise TypeError(
1577 "QuerySet.%s() received non-expression(s): %s."
1578 % (
1579 method_name,
1580 ", ".join(invalid_args),
1581 )
1582 )
1584 def _not_support_combined_queries(self, operation_name):
1585 if self.query.combinator: 1585 ↛ 1586line 1585 didn't jump to line 1586, because the condition on line 1585 was never true
1586 raise NotSupportedError(
1587 "Calling QuerySet.%s() after %s() is not supported."
1588 % (operation_name, self.query.combinator)
1589 )
1592class InstanceCheckMeta(type):
1593 def __instancecheck__(self, instance):
1594 return isinstance(instance, QuerySet) and instance.query.is_empty()
1597class EmptyQuerySet(metaclass=InstanceCheckMeta):
1598 """
1599 Marker class to checking if a queryset is empty by .none():
1600 isinstance(qs.none(), EmptyQuerySet) -> True
1601 """
1603 def __init__(self, *args, **kwargs):
1604 raise TypeError("EmptyQuerySet can't be instantiated")
1607class RawQuerySet:
1608 """
1609 Provide an iterator which converts the results of raw SQL queries into
1610 annotated model instances.
1611 """
1613 def __init__(
1614 self,
1615 raw_query,
1616 model=None,
1617 query=None,
1618 params=(),
1619 translations=None,
1620 using=None,
1621 hints=None,
1622 ):
1623 self.raw_query = raw_query
1624 self.model = model
1625 self._db = using
1626 self._hints = hints or {}
1627 self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
1628 self.params = params
1629 self.translations = translations or {}
1630 self._result_cache = None
1631 self._prefetch_related_lookups = ()
1632 self._prefetch_done = False
1634 def resolve_model_init_order(self):
1635 """Resolve the init field names and value positions."""
1636 converter = connections[self.db].introspection.identifier_converter
1637 model_init_fields = [
1638 f for f in self.model._meta.fields if converter(f.column) in self.columns
1639 ]
1640 annotation_fields = [
1641 (column, pos)
1642 for pos, column in enumerate(self.columns)
1643 if column not in self.model_fields
1644 ]
1645 model_init_order = [
1646 self.columns.index(converter(f.column)) for f in model_init_fields
1647 ]
1648 model_init_names = [f.attname for f in model_init_fields]
1649 return model_init_names, model_init_order, annotation_fields
1651 def prefetch_related(self, *lookups):
1652 """Same as QuerySet.prefetch_related()"""
1653 clone = self._clone()
1654 if lookups == (None,):
1655 clone._prefetch_related_lookups = ()
1656 else:
1657 clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
1658 return clone
1660 def _prefetch_related_objects(self):
1661 prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
1662 self._prefetch_done = True
1664 def _clone(self):
1665 """Same as QuerySet._clone()"""
1666 c = self.__class__(
1667 self.raw_query,
1668 model=self.model,
1669 query=self.query,
1670 params=self.params,
1671 translations=self.translations,
1672 using=self._db,
1673 hints=self._hints,
1674 )
1675 c._prefetch_related_lookups = self._prefetch_related_lookups[:]
1676 return c
1678 def _fetch_all(self):
1679 if self._result_cache is None:
1680 self._result_cache = list(self.iterator())
1681 if self._prefetch_related_lookups and not self._prefetch_done:
1682 self._prefetch_related_objects()
1684 def __len__(self):
1685 self._fetch_all()
1686 return len(self._result_cache)
1688 def __bool__(self):
1689 self._fetch_all()
1690 return bool(self._result_cache)
1692 def __iter__(self):
1693 self._fetch_all()
1694 return iter(self._result_cache)
1696 def iterator(self):
1697 # Cache some things for performance reasons outside the loop.
1698 db = self.db
1699 compiler = connections[db].ops.compiler("SQLCompiler")(
1700 self.query, connections[db], db
1701 )
1703 query = iter(self.query)
1705 try:
1706 (
1707 model_init_names,
1708 model_init_pos,
1709 annotation_fields,
1710 ) = self.resolve_model_init_order()
1711 if self.model._meta.pk.attname not in model_init_names:
1712 raise exceptions.FieldDoesNotExist(
1713 "Raw query must include the primary key"
1714 )
1715 model_cls = self.model
1716 fields = [self.model_fields.get(c) for c in self.columns]
1717 converters = compiler.get_converters(
1718 [f.get_col(f.model._meta.db_table) if f else None for f in fields]
1719 )
1720 if converters:
1721 query = compiler.apply_converters(query, converters)
1722 for values in query:
1723 # Associate fields to values
1724 model_init_values = [values[pos] for pos in model_init_pos]
1725 instance = model_cls.from_db(db, model_init_names, model_init_values)
1726 if annotation_fields:
1727 for column, pos in annotation_fields:
1728 setattr(instance, column, values[pos])
1729 yield instance
1730 finally:
1731 # Done iterating the Query. If it has its own cursor, close it.
1732 if hasattr(self.query, "cursor") and self.query.cursor:
1733 self.query.cursor.close()
1735 def __repr__(self):
1736 return "<%s: %s>" % (self.__class__.__name__, self.query)
1738 def __getitem__(self, k):
1739 return list(self)[k]
1741 @property
1742 def db(self):
1743 """Return the database used if this query is executed now."""
1744 return self._db or router.db_for_read(self.model, **self._hints)
1746 def using(self, alias):
1747 """Select the database this RawQuerySet should execute against."""
1748 return RawQuerySet(
1749 self.raw_query,
1750 model=self.model,
1751 query=self.query.chain(using=alias),
1752 params=self.params,
1753 translations=self.translations,
1754 using=alias,
1755 )
1757 @cached_property
1758 def columns(self):
1759 """
1760 A list of model field names in the order they'll appear in the
1761 query results.
1762 """
1763 columns = self.query.get_columns()
1764 # Adjust any column names which don't match field names
1765 for (query_name, model_name) in self.translations.items():
1766 # Ignore translations for nonexistent column names
1767 try:
1768 index = columns.index(query_name)
1769 except ValueError:
1770 pass
1771 else:
1772 columns[index] = model_name
1773 return columns
1775 @cached_property
1776 def model_fields(self):
1777 """A dict mapping column names to model field names."""
1778 converter = connections[self.db].introspection.identifier_converter
1779 model_fields = {}
1780 for field in self.model._meta.fields:
1781 name, column = field.get_attname_column()
1782 model_fields[converter(column)] = field
1783 return model_fields
1786class Prefetch:
1787 def __init__(self, lookup, queryset=None, to_attr=None):
1788 # `prefetch_through` is the path we traverse to perform the prefetch.
1789 self.prefetch_through = lookup
1790 # `prefetch_to` is the path to the attribute that stores the result.
1791 self.prefetch_to = lookup
1792 if queryset is not None and (
1793 isinstance(queryset, RawQuerySet)
1794 or (
1795 hasattr(queryset, "_iterable_class")
1796 and not issubclass(queryset._iterable_class, ModelIterable)
1797 )
1798 ):
1799 raise ValueError(
1800 "Prefetch querysets cannot use raw(), values(), and values_list()."
1801 )
1802 if to_attr:
1803 self.prefetch_to = LOOKUP_SEP.join(
1804 lookup.split(LOOKUP_SEP)[:-1] + [to_attr]
1805 )
1807 self.queryset = queryset
1808 self.to_attr = to_attr
1810 def __getstate__(self):
1811 obj_dict = self.__dict__.copy()
1812 if self.queryset is not None:
1813 queryset = self.queryset._chain()
1814 # Prevent the QuerySet from being evaluated
1815 queryset._result_cache = []
1816 queryset._prefetch_done = True
1817 obj_dict["queryset"] = queryset
1818 return obj_dict
1820 def add_prefix(self, prefix):
1821 self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through
1822 self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to
1824 def get_current_prefetch_to(self, level):
1825 return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[: level + 1])
1827 def get_current_to_attr(self, level):
1828 parts = self.prefetch_to.split(LOOKUP_SEP)
1829 to_attr = parts[level]
1830 as_attr = self.to_attr and level == len(parts) - 1
1831 return to_attr, as_attr
1833 def get_current_queryset(self, level):
1834 if self.get_current_prefetch_to(level) == self.prefetch_to:
1835 return self.queryset
1836 return None
1838 def __eq__(self, other):
1839 if not isinstance(other, Prefetch):
1840 return NotImplemented
1841 return self.prefetch_to == other.prefetch_to
1843 def __hash__(self):
1844 return hash((self.__class__, self.prefetch_to))
1847def normalize_prefetch_lookups(lookups, prefix=None):
1848 """Normalize lookups into Prefetch objects."""
1849 ret = []
1850 for lookup in lookups:
1851 if not isinstance(lookup, Prefetch):
1852 lookup = Prefetch(lookup)
1853 if prefix:
1854 lookup.add_prefix(prefix)
1855 ret.append(lookup)
1856 return ret
1859def prefetch_related_objects(model_instances, *related_lookups):
1860 """
1861 Populate prefetched object caches for a list of model instances based on
1862 the lookups/Prefetch instances given.
1863 """
1864 if not model_instances:
1865 return # nothing to do
1867 # We need to be able to dynamically add to the list of prefetch_related
1868 # lookups that we look up (see below). So we need some book keeping to
1869 # ensure we don't do duplicate work.
1870 done_queries = {} # dictionary of things like 'foo__bar': [results]
1872 auto_lookups = set() # we add to this as we go through.
1873 followed_descriptors = set() # recursion protection
1875 all_lookups = normalize_prefetch_lookups(reversed(related_lookups))
1876 while all_lookups:
1877 lookup = all_lookups.pop()
1878 if lookup.prefetch_to in done_queries:
1879 if lookup.queryset is not None:
1880 raise ValueError(
1881 "'%s' lookup was already seen with a different queryset. "
1882 "You may need to adjust the ordering of your lookups."
1883 % lookup.prefetch_to
1884 )
1886 continue
1888 # Top level, the list of objects to decorate is the result cache
1889 # from the primary QuerySet. It won't be for deeper levels.
1890 obj_list = model_instances
1892 through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
1893 for level, through_attr in enumerate(through_attrs):
1894 # Prepare main instances
1895 if not obj_list:
1896 break
1898 prefetch_to = lookup.get_current_prefetch_to(level)
1899 if prefetch_to in done_queries:
1900 # Skip any prefetching, and any object preparation
1901 obj_list = done_queries[prefetch_to]
1902 continue
1904 # Prepare objects:
1905 good_objects = True
1906 for obj in obj_list:
1907 # Since prefetching can re-use instances, it is possible to have
1908 # the same instance multiple times in obj_list, so obj might
1909 # already be prepared.
1910 if not hasattr(obj, "_prefetched_objects_cache"):
1911 try:
1912 obj._prefetched_objects_cache = {}
1913 except (AttributeError, TypeError):
1914 # Must be an immutable object from
1915 # values_list(flat=True), for example (TypeError) or
1916 # a QuerySet subclass that isn't returning Model
1917 # instances (AttributeError), either in Django or a 3rd
1918 # party. prefetch_related() doesn't make sense, so quit.
1919 good_objects = False
1920 break
1921 if not good_objects:
1922 break
1924 # Descend down tree
1926 # We assume that objects retrieved are homogeneous (which is the premise
1927 # of prefetch_related), so what applies to first object applies to all.
1928 first_obj = obj_list[0]
1929 to_attr = lookup.get_current_to_attr(level)[0]
1930 prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(
1931 first_obj, through_attr, to_attr
1932 )
1934 if not attr_found:
1935 raise AttributeError(
1936 "Cannot find '%s' on %s object, '%s' is an invalid "
1937 "parameter to prefetch_related()"
1938 % (
1939 through_attr,
1940 first_obj.__class__.__name__,
1941 lookup.prefetch_through,
1942 )
1943 )
1945 if level == len(through_attrs) - 1 and prefetcher is None:
1946 # Last one, this *must* resolve to something that supports
1947 # prefetching, otherwise there is no point adding it and the
1948 # developer asking for it has made a mistake.
1949 raise ValueError(
1950 "'%s' does not resolve to an item that supports "
1951 "prefetching - this is an invalid parameter to "
1952 "prefetch_related()." % lookup.prefetch_through
1953 )
1955 obj_to_fetch = None
1956 if prefetcher is not None:
1957 obj_to_fetch = [obj for obj in obj_list if not is_fetched(obj)]
1959 if obj_to_fetch:
1960 obj_list, additional_lookups = prefetch_one_level(
1961 obj_to_fetch,
1962 prefetcher,
1963 lookup,
1964 level,
1965 )
1966 # We need to ensure we don't keep adding lookups from the
1967 # same relationships to stop infinite recursion. So, if we
1968 # are already on an automatically added lookup, don't add
1969 # the new lookups from relationships we've seen already.
1970 if not (
1971 prefetch_to in done_queries
1972 and lookup in auto_lookups
1973 and descriptor in followed_descriptors
1974 ):
1975 done_queries[prefetch_to] = obj_list
1976 new_lookups = normalize_prefetch_lookups(
1977 reversed(additional_lookups), prefetch_to
1978 )
1979 auto_lookups.update(new_lookups)
1980 all_lookups.extend(new_lookups)
1981 followed_descriptors.add(descriptor)
1982 else:
1983 # Either a singly related object that has already been fetched
1984 # (e.g. via select_related), or hopefully some other property
1985 # that doesn't support prefetching but needs to be traversed.
1987 # We replace the current list of parent objects with the list
1988 # of related objects, filtering out empty or missing values so
1989 # that we can continue with nullable or reverse relations.
1990 new_obj_list = []
1991 for obj in obj_list:
1992 if through_attr in getattr(obj, "_prefetched_objects_cache", ()):
1993 # If related objects have been prefetched, use the
1994 # cache rather than the object's through_attr.
1995 new_obj = list(obj._prefetched_objects_cache.get(through_attr))
1996 else:
1997 try:
1998 new_obj = getattr(obj, through_attr)
1999 except exceptions.ObjectDoesNotExist:
2000 continue
2001 if new_obj is None:
2002 continue
2003 # We special-case `list` rather than something more generic
2004 # like `Iterable` because we don't want to accidentally match
2005 # user models that define __iter__.
2006 if isinstance(new_obj, list):
2007 new_obj_list.extend(new_obj)
2008 else:
2009 new_obj_list.append(new_obj)
2010 obj_list = new_obj_list
2013def get_prefetcher(instance, through_attr, to_attr):
2014 """
2015 For the attribute 'through_attr' on the given instance, find
2016 an object that has a get_prefetch_queryset().
2017 Return a 4 tuple containing:
2018 (the object with get_prefetch_queryset (or None),
2019 the descriptor object representing this relationship (or None),
2020 a boolean that is False if the attribute was not found at all,
2021 a function that takes an instance and returns a boolean that is True if
2022 the attribute has already been fetched for that instance)
2023 """
2025 def has_to_attr_attribute(instance):
2026 return hasattr(instance, to_attr)
2028 prefetcher = None
2029 is_fetched = has_to_attr_attribute
2031 # For singly related objects, we have to avoid getting the attribute
2032 # from the object, as this will trigger the query. So we first try
2033 # on the class, in order to get the descriptor object.
2034 rel_obj_descriptor = getattr(instance.__class__, through_attr, None)
2035 if rel_obj_descriptor is None:
2036 attr_found = hasattr(instance, through_attr)
2037 else:
2038 attr_found = True
2039 if rel_obj_descriptor:
2040 # singly related object, descriptor object has the
2041 # get_prefetch_queryset() method.
2042 if hasattr(rel_obj_descriptor, "get_prefetch_queryset"):
2043 prefetcher = rel_obj_descriptor
2044 is_fetched = rel_obj_descriptor.is_cached
2045 else:
2046 # descriptor doesn't support prefetching, so we go ahead and get
2047 # the attribute on the instance rather than the class to
2048 # support many related managers
2049 rel_obj = getattr(instance, through_attr)
2050 if hasattr(rel_obj, "get_prefetch_queryset"):
2051 prefetcher = rel_obj
2052 if through_attr != to_attr:
2053 # Special case cached_property instances because hasattr
2054 # triggers attribute computation and assignment.
2055 if isinstance(
2056 getattr(instance.__class__, to_attr, None), cached_property
2057 ):
2059 def has_cached_property(instance):
2060 return to_attr in instance.__dict__
2062 is_fetched = has_cached_property
2063 else:
2065 def in_prefetched_cache(instance):
2066 return through_attr in instance._prefetched_objects_cache
2068 is_fetched = in_prefetched_cache
2069 return prefetcher, rel_obj_descriptor, attr_found, is_fetched
2072def prefetch_one_level(instances, prefetcher, lookup, level):
2073 """
2074 Helper function for prefetch_related_objects().
2076 Run prefetches on all instances using the prefetcher object,
2077 assigning results to relevant caches in instance.
2079 Return the prefetched objects along with any additional prefetches that
2080 must be done due to prefetch_related lookups found from default managers.
2081 """
2082 # prefetcher must have a method get_prefetch_queryset() which takes a list
2083 # of instances, and returns a tuple:
2085 # (queryset of instances of self.model that are related to passed in instances,
2086 # callable that gets value to be matched for returned instances,
2087 # callable that gets value to be matched for passed in instances,
2088 # boolean that is True for singly related objects,
2089 # cache or field name to assign to,
2090 # boolean that is True when the previous argument is a cache name vs a field name).
2092 # The 'values to be matched' must be hashable as they will be used
2093 # in a dictionary.
2095 (
2096 rel_qs,
2097 rel_obj_attr,
2098 instance_attr,
2099 single,
2100 cache_name,
2101 is_descriptor,
2102 ) = prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level))
2103 # We have to handle the possibility that the QuerySet we just got back
2104 # contains some prefetch_related lookups. We don't want to trigger the
2105 # prefetch_related functionality by evaluating the query. Rather, we need
2106 # to merge in the prefetch_related lookups.
2107 # Copy the lookups in case it is a Prefetch object which could be reused
2108 # later (happens in nested prefetch_related).
2109 additional_lookups = [
2110 copy.copy(additional_lookup)
2111 for additional_lookup in getattr(rel_qs, "_prefetch_related_lookups", ())
2112 ]
2113 if additional_lookups:
2114 # Don't need to clone because the manager should have given us a fresh
2115 # instance, so we access an internal instead of using public interface
2116 # for performance reasons.
2117 rel_qs._prefetch_related_lookups = ()
2119 all_related_objects = list(rel_qs)
2121 rel_obj_cache = {}
2122 for rel_obj in all_related_objects:
2123 rel_attr_val = rel_obj_attr(rel_obj)
2124 rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
2126 to_attr, as_attr = lookup.get_current_to_attr(level)
2127 # Make sure `to_attr` does not conflict with a field.
2128 if as_attr and instances:
2129 # We assume that objects retrieved are homogeneous (which is the premise
2130 # of prefetch_related), so what applies to first object applies to all.
2131 model = instances[0].__class__
2132 try:
2133 model._meta.get_field(to_attr)
2134 except exceptions.FieldDoesNotExist:
2135 pass
2136 else:
2137 msg = "to_attr={} conflicts with a field on the {} model."
2138 raise ValueError(msg.format(to_attr, model.__name__))
2140 # Whether or not we're prefetching the last part of the lookup.
2141 leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level
2143 for obj in instances:
2144 instance_attr_val = instance_attr(obj)
2145 vals = rel_obj_cache.get(instance_attr_val, [])
2147 if single:
2148 val = vals[0] if vals else None
2149 if as_attr:
2150 # A to_attr has been given for the prefetch.
2151 setattr(obj, to_attr, val)
2152 elif is_descriptor:
2153 # cache_name points to a field name in obj.
2154 # This field is a descriptor for a related object.
2155 setattr(obj, cache_name, val)
2156 else:
2157 # No to_attr has been given for this prefetch operation and the
2158 # cache_name does not point to a descriptor. Store the value of
2159 # the field in the object's field cache.
2160 obj._state.fields_cache[cache_name] = val
2161 else:
2162 if as_attr:
2163 setattr(obj, to_attr, vals)
2164 else:
2165 manager = getattr(obj, to_attr)
2166 if leaf and lookup.queryset is not None:
2167 qs = manager._apply_rel_filters(lookup.queryset)
2168 else:
2169 qs = manager.get_queryset()
2170 qs._result_cache = vals
2171 # We don't want the individual qs doing prefetch_related now,
2172 # since we have merged this into the current work.
2173 qs._prefetch_done = True
2174 obj._prefetched_objects_cache[cache_name] = qs
2175 return all_related_objects, additional_lookups
2178class RelatedPopulator:
2179 """
2180 RelatedPopulator is used for select_related() object instantiation.
2182 The idea is that each select_related() model will be populated by a
2183 different RelatedPopulator instance. The RelatedPopulator instances get
2184 klass_info and select (computed in SQLCompiler) plus the used db as
2185 input for initialization. That data is used to compute which columns
2186 to use, how to instantiate the model, and how to populate the links
2187 between the objects.
2189 The actual creation of the objects is done in populate() method. This
2190 method gets row and from_obj as input and populates the select_related()
2191 model instance.
2192 """
2194 def __init__(self, klass_info, select, db):
2195 self.db = db
2196 # Pre-compute needed attributes. The attributes are:
2197 # - model_cls: the possibly deferred model class to instantiate
2198 # - either:
2199 # - cols_start, cols_end: usually the columns in the row are
2200 # in the same order model_cls.__init__ expects them, so we
2201 # can instantiate by model_cls(*row[cols_start:cols_end])
2202 # - reorder_for_init: When select_related descends to a child
2203 # class, then we want to reuse the already selected parent
2204 # data. However, in this case the parent data isn't necessarily
2205 # in the same order that Model.__init__ expects it to be, so
2206 # we have to reorder the parent data. The reorder_for_init
2207 # attribute contains a function used to reorder the field data
2208 # in the order __init__ expects it.
2209 # - pk_idx: the index of the primary key field in the reordered
2210 # model data. Used to check if a related object exists at all.
2211 # - init_list: the field attnames fetched from the database. For
2212 # deferred models this isn't the same as all attnames of the
2213 # model's fields.
2214 # - related_populators: a list of RelatedPopulator instances if
2215 # select_related() descends to related models from this model.
2216 # - local_setter, remote_setter: Methods to set cached values on
2217 # the object being populated and on the remote object. Usually
2218 # these are Field.set_cached_value() methods.
2219 select_fields = klass_info["select_fields"]
2220 from_parent = klass_info["from_parent"]
2221 if not from_parent:
2222 self.cols_start = select_fields[0]
2223 self.cols_end = select_fields[-1] + 1
2224 self.init_list = [
2225 f[0].target.attname for f in select[self.cols_start : self.cols_end]
2226 ]
2227 self.reorder_for_init = None
2228 else:
2229 attname_indexes = {
2230 select[idx][0].target.attname: idx for idx in select_fields
2231 }
2232 model_init_attnames = (
2233 f.attname for f in klass_info["model"]._meta.concrete_fields
2234 )
2235 self.init_list = [
2236 attname for attname in model_init_attnames if attname in attname_indexes
2237 ]
2238 self.reorder_for_init = operator.itemgetter(
2239 *[attname_indexes[attname] for attname in self.init_list]
2240 )
2242 self.model_cls = klass_info["model"]
2243 self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
2244 self.related_populators = get_related_populators(klass_info, select, self.db)
2245 self.local_setter = klass_info["local_setter"]
2246 self.remote_setter = klass_info["remote_setter"]
2248 def populate(self, row, from_obj):
2249 if self.reorder_for_init:
2250 obj_data = self.reorder_for_init(row)
2251 else:
2252 obj_data = row[self.cols_start : self.cols_end]
2253 if obj_data[self.pk_idx] is None:
2254 obj = None
2255 else:
2256 obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
2257 for rel_iter in self.related_populators:
2258 rel_iter.populate(row, obj)
2259 self.local_setter(from_obj, obj)
2260 if obj is not None:
2261 self.remote_setter(obj, from_obj)
2264def get_related_populators(klass_info, select, db):
2265 iterators = []
2266 related_klass_infos = klass_info.get("related_klass_infos", [])
2267 for rel_klass_info in related_klass_infos: 2267 ↛ 2268line 2267 didn't jump to line 2268, because the loop on line 2267 never started
2268 rel_cls = RelatedPopulator(rel_klass_info, select, db)
2269 iterators.append(rel_cls)
2270 return iterators