diff --git a/django/apps/registry.py b/django/apps/registry.py index 92de6075fc..4b727e157a 100644 --- a/django/apps/registry.py +++ b/django/apps/registry.py @@ -288,9 +288,9 @@ class Apps: referred model is not swappable, return None. This method is decorated with @functools.cache because it's performance - critical when it comes to migrations. Since the swappable settings don't - change after Django has loaded the settings, there is no reason to get - the respective settings attribute over and over again. + critical when it comes to migrations. Since the swappable settings + don't change after Django has loaded the settings, there is no reason + to get the respective settings attribute over and over again. """ to_string = to_string.lower() for model in self.get_models(include_swapped=True): @@ -378,8 +378,9 @@ class Apps: # the relation tree and the fields cache. self.get_models.cache_clear() if self.ready: - # Circumvent self.get_models() to prevent that the cache is refilled. - # This particularly prevents that an empty value is cached while cloning. + # Circumvent self.get_models() to prevent that the cache is + # refilled. This particularly prevents that an empty value is + # cached while cloning. for app_config in self.app_configs.values(): for model in app_config.get_models(include_auto_created=True): model._meta._expire_cache() diff --git a/django/conf/__init__.py b/django/conf/__init__.py index 6b5f044e34..c7ae36aba0 100644 --- a/django/conf/__init__.py +++ b/django/conf/__init__.py @@ -77,7 +77,8 @@ class LazySettings(LazyObject): val = getattr(_wrapped, name) # Special case some settings which require further modification. - # This is done here for performance reasons so the modified value is cached. + # This is done here for performance reasons so the modified value is + # cached. if name in {"MEDIA_URL", "STATIC_URL"} and val is not None: val = self._add_script_prefix(val) elif name == "SECRET_KEY" and not val: @@ -149,7 +150,8 @@ class LazySettings(LazyObject): class Settings: def __init__(self, settings_module): - # update this dict from global settings (but only for ALL_CAPS settings) + # update this dict from global settings (but only for ALL_CAPS + # settings) for setting in dir(global_settings): if setting.isupper(): setattr(self, setting, getattr(global_settings, setting)) diff --git a/django/conf/global_settings.py b/django/conf/global_settings.py index 25ac49becf..ffbe5d3980 100644 --- a/django/conf/global_settings.py +++ b/django/conf/global_settings.py @@ -317,9 +317,9 @@ DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000 # before a SuspiciousOperation (TooManyFilesSent) is raised. DATA_UPLOAD_MAX_NUMBER_FILES = 100 -# Directory in which upload streamed files will be temporarily saved. A value of -# `None` will make Django use the operating system's default temporary directory -# (i.e. "/tmp" on *nix systems). +# Directory in which upload streamed files will be temporarily saved. A value +# of `None` will make Django use the operating system's default temporary +# directory (i.e. "/tmp" on *nix systems). FILE_UPLOAD_TEMP_DIR = None # The numeric mode to set newly-uploaded files to. The value should be a mode @@ -327,9 +327,9 @@ FILE_UPLOAD_TEMP_DIR = None # https://docs.python.org/library/os.html#files-and-directories. FILE_UPLOAD_PERMISSIONS = 0o644 -# The numeric mode to assign to newly-created directories, when uploading files. -# The value should be a mode as you'd pass to os.chmod; -# see https://docs.python.org/library/os.html#files-and-directories. +# The numeric mode to assign to newly-created directories, when uploading +# files. The value should be a mode as you'd pass to os.chmod; see +# https://docs.python.org/library/os.html#files-and-directories. FILE_UPLOAD_DIRECTORY_PERMISSIONS = None # Python module path where user will place custom format definition. @@ -342,7 +342,8 @@ FORMAT_MODULE_PATH = None # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = "N j, Y" -# Default formatting for datetime objects. See all available format strings here: +# Default formatting for datetime objects. See all available format strings +# here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATETIME_FORMAT = "N j, Y, P" @@ -350,8 +351,8 @@ DATETIME_FORMAT = "N j, Y, P" # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date TIME_FORMAT = "P" -# Default formatting for date objects when only the year and month are relevant. -# See all available format strings here: +# Default formatting for date objects when only the year and month are +# relevant. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date YEAR_MONTH_FORMAT = "F Y" @@ -360,7 +361,8 @@ YEAR_MONTH_FORMAT = "F Y" # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date MONTH_DAY_FORMAT = "F j" -# Default short formatting for date objects. See all available format strings here: +# Default short formatting for date objects. See all available format strings +# here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date SHORT_DATE_FORMAT = "m/d/Y" diff --git a/django/conf/locale/__init__.py b/django/conf/locale/__init__.py index 6ac7bd3bdb..04962042b3 100644 --- a/django/conf/locale/__init__.py +++ b/django/conf/locale/__init__.py @@ -1,10 +1,11 @@ """ -LANG_INFO is a dictionary structure to provide meta information about languages. +LANG_INFO is a dictionary structure to provide meta information about +languages. About name_local: capitalize it as if your language name was appearing inside a sentence in your language. -The 'fallback' key can be used to specify a special fallback logic which doesn't -follow the traditional 'fr-ca' -> 'fr' fallback logic. +The 'fallback' key can be used to specify a special fallback logic which +doesn't follow the traditional 'fr-ca' -> 'fr' fallback logic. """ LANG_INFO = { diff --git a/django/contrib/admin/actions.py b/django/contrib/admin/actions.py index 865c16aff2..04a906542a 100644 --- a/django/contrib/admin/actions.py +++ b/django/contrib/admin/actions.py @@ -24,7 +24,8 @@ def delete_selected(modeladmin, request, queryset): deletable objects, or, if the user has no permission one of the related childs (foreignkeys), a "permission denied" message. - Next, it deletes all selected objects and redirects back to the change list. + Next, it deletes all selected objects and redirects back to the change + list. """ opts = modeladmin.model._meta app_label = opts.app_label diff --git a/django/contrib/admin/checks.py b/django/contrib/admin/checks.py index 775bb12145..10257a54bf 100644 --- a/django/contrib/admin/checks.py +++ b/django/contrib/admin/checks.py @@ -316,7 +316,8 @@ class BaseModelAdminChecks: def _check_fields(self, obj): """Check that `fields` only refer to existing fields, doesn't contain - duplicates. Check if at most one of `fields` and `fieldsets` is defined. + duplicates. Check if at most one of `fields` and `fieldsets` is + defined. """ if obj.fields is None: diff --git a/django/contrib/admin/options.py b/django/contrib/admin/options.py index c3ccc6c4fe..6c202c8e61 100644 --- a/django/contrib/admin/options.py +++ b/django/contrib/admin/options.py @@ -170,10 +170,10 @@ class BaseModelAdmin(metaclass=forms.MediaDefiningClass): # ForeignKey or ManyToManyFields if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)): - # Combine the field kwargs with any options for formfield_overrides. - # Make sure the passed in **kwargs override anything in - # formfield_overrides because **kwargs is more specific, and should - # always win. + # Combine the field kwargs with any options for + # formfield_overrides. Make sure the passed in **kwargs override + # anything in formfield_overrides because **kwargs is more + # specific, and should always win. if db_field.__class__ in self.formfield_overrides: kwargs = {**self.formfield_overrides[db_field.__class__], **kwargs} @@ -2182,8 +2182,8 @@ class ModelAdmin(BaseModelAdmin): if obj is None: return self._get_obj_does_not_exist_redirect(request, self.opts, object_id) - # Populate deleted_objects, a data structure of all related objects that - # will also be deleted. + # Populate deleted_objects, a data structure of all related objects + # that will also be deleted. ( deleted_objects, model_count, diff --git a/django/contrib/admin/sites.py b/django/contrib/admin/sites.py index 9c9aa21f57..17af19fd1b 100644 --- a/django/contrib/admin/sites.py +++ b/django/contrib/admin/sites.py @@ -29,11 +29,11 @@ all_sites = WeakSet() class AdminSite: """ - An AdminSite object encapsulates an instance of the Django admin application, ready - to be hooked in to your URLconf. Models are registered with the AdminSite using the - register() method, and the get_urls() method can then be used to access Django view - functions that present a full admin interface for the collection of registered - models. + An AdminSite object encapsulates an instance of the Django admin + application, ready to be hooked in to your URLconf. Models are registered + with the AdminSite using the register() method, and the get_urls() method + can then be used to access Django view functions that present a full admin + interface for the collection of registered models. """ # Text to put at the end of each page's . @@ -136,9 +136,9 @@ class AdminSite: # If we got **options then dynamically construct a subclass of # admin_class with those **options. if options: - # For reasons I don't quite understand, without a __module__ - # the created class appears to "live" in the wrong place, - # which causes issues later on. + # For reasons I don't quite understand, without a + # __module__ the created class appears to "live" in the + # wrong place, which causes issues later on. options["__module__"] = __name__ admin_class = type( "%sAdmin" % model.__name__, (admin_class,), options diff --git a/django/contrib/admin/templatetags/admin_list.py b/django/contrib/admin/templatetags/admin_list.py index 1e6f8bf298..43d87c0024 100644 --- a/django/contrib/admin/templatetags/admin_list.py +++ b/django/contrib/admin/templatetags/admin_list.py @@ -256,7 +256,8 @@ def items_for_result(cl, result, form): ): row_classes.append("nowrap") row_class = mark_safe(' class="%s"' % " ".join(row_classes)) - # If list_display_links not defined, add the link tag to the first field + # If list_display_links not defined, add the link tag to the first + # field if link_to_changelist: table_tag = "th" if first else "td" first = False @@ -293,9 +294,9 @@ def items_for_result(cl, result, form): "<{}{}>{}</{}>", table_tag, row_class, link_or_text, table_tag ) else: - # By default the fields come from ModelAdmin.list_editable, but if we pull - # the fields out of the form instead of list_editable custom admins - # can provide fields on a per request basis + # By default the fields come from ModelAdmin.list_editable, but if + # we pull the fields out of the form instead of list_editable + # custom admins can provide fields on a per request basis if ( form and field_name in form.fields diff --git a/django/contrib/admin/tests.py b/django/contrib/admin/tests.py index b95a37b959..bd9c76b5fd 100644 --- a/django/contrib/admin/tests.py +++ b/django/contrib/admin/tests.py @@ -218,9 +218,9 @@ class AdminSeleniumTestCase(SeleniumTestCase, StaticLiveServerTestCase): actual_values.append(option.get_attribute("value")) self.assertEqual(values, actual_values) else: - # Prevent the `find_elements(By.CSS_SELECTOR, …)` call from blocking - # if the selector doesn't match any options as we expect it - # to be the case. + # Prevent the `find_elements(By.CSS_SELECTOR, …)` call from + # blocking if the selector doesn't match any options as we expect + # it to be the case. with self.disable_implicit_wait(): self.wait_until( lambda driver: not driver.find_elements( diff --git a/django/contrib/admin/utils.py b/django/contrib/admin/utils.py index eec93fa4be..74bd571e56 100644 --- a/django/contrib/admin/utils.py +++ b/django/contrib/admin/utils.py @@ -73,7 +73,8 @@ def prepare_lookup_value(key, value, separator=","): # if key ends with __in, split parameter into separate values if key.endswith("__in"): value = value.split(separator) - # if key ends with __isnull, special case '' and the string literals 'false' and '0' + # if key ends with __isnull, special case '' and the string literals + # 'false' and '0' elif key.endswith("__isnull"): value = value.lower() not in ("", "false", "0") return value @@ -558,9 +559,10 @@ def construct_change_message(form, formsets, add): Translations are deactivated so that strings are stored untranslated. Translation happens later on LogEntry access. """ - # Evaluating `form.changed_data` prior to disabling translations is required - # to avoid fields affected by localization from being included incorrectly, - # e.g. where date formats differ such as MM/DD/YYYY vs DD/MM/YYYY. + # Evaluating `form.changed_data` prior to disabling translations is + # required to avoid fields affected by localization from being included + # incorrectly, e.g. where date formats differ such as MM/DD/YYYY vs + # DD/MM/YYYY. changed_data = form.changed_data with translation_override(None): # Deactivate translations while fetching verbose_name for form diff --git a/django/contrib/admin/views/main.py b/django/contrib/admin/views/main.py index ed6c6f9219..8c9118808e 100644 --- a/django/contrib/admin/views/main.py +++ b/django/contrib/admin/views/main.py @@ -322,7 +322,8 @@ class ChangeList: self.result_count = result_count self.show_full_result_count = self.model_admin.show_full_result_count # Admin actions are shown if there is at least one entry - # or if entries are not counted because show_full_result_count is disabled + # or if entries are not counted because show_full_result_count is + # disabled self.show_admin_actions = not self.show_full_result_count or bool( full_result_count ) @@ -485,8 +486,8 @@ class ChangeList: """ Return a dictionary of ordering field column numbers and asc/desc. """ - # We must cope with more than one column having the same underlying sort - # field, so we base things on column numbers. + # We must cope with more than one column having the same underlying + # sort field, so we base things on column numbers. ordering = self._get_default_ordering() ordering_fields = {} if ORDER_VAR not in self.params: diff --git a/django/contrib/admin/widgets.py b/django/contrib/admin/widgets.py index fc83267e43..a601fc2667 100644 --- a/django/contrib/admin/widgets.py +++ b/django/contrib/admin/widgets.py @@ -215,8 +215,8 @@ class ForeignKeyRawIdWidget(forms.TextInput): class ManyToManyRawIdWidget(ForeignKeyRawIdWidget): """ - A Widget for displaying ManyToMany ids in the "raw_id" interface rather than - in a <select multiple> box. + A Widget for displaying ManyToMany ids in the "raw_id" interface rather + than in a <select multiple> box. """ template_name = "admin/widgets/many_to_many_raw_id.html" @@ -275,7 +275,8 @@ class RelatedFieldWidgetWrapper(forms.Widget): if not isinstance(widget, AutocompleteMixin): self.attrs["data-context"] = "available-source" self.can_change_related = not multiple and can_change_related - # XXX: The deletion UX can be confusing when dealing with cascading deletion. + # XXX: The deletion UX can be confusing when dealing with cascading + # deletion. cascade = getattr(rel, "on_delete", None) is CASCADE self.can_delete_related = not multiple and not cascade and can_delete_related self.can_view_related = not multiple and can_view_related diff --git a/django/contrib/admindocs/utils.py b/django/contrib/admindocs/utils.py index 6603f4dcd5..f8e329a79d 100644 --- a/django/contrib/admindocs/utils.py +++ b/django/contrib/admindocs/utils.py @@ -69,8 +69,8 @@ def parse_rst(text, default_reference_context, thing_being_parsed=None): "file_insertion_enabled": False, } thing_being_parsed = thing_being_parsed and "<%s>" % thing_being_parsed - # Wrap ``text`` in some reST that sets the default role to ``cmsreference``, - # then restores it. + # Wrap ``text`` in some reST that sets the default role to + # ``cmsreference``, then restores it. source = """ .. default-role:: cmsreference diff --git a/django/contrib/auth/mixins.py b/django/contrib/auth/mixins.py index 1f2e95ff00..9a59043897 100644 --- a/django/contrib/auth/mixins.py +++ b/django/contrib/auth/mixins.py @@ -33,7 +33,8 @@ class AccessMixin: def get_permission_denied_message(self): """ - Override this method to override the permission_denied_message attribute. + Override this method to override the permission_denied_message + attribute. """ return self.permission_denied_message diff --git a/django/contrib/auth/password_validation.py b/django/contrib/auth/password_validation.py index 38e7c5c3a8..690be58700 100644 --- a/django/contrib/auth/password_validation.py +++ b/django/contrib/auth/password_validation.py @@ -227,7 +227,8 @@ class CommonPasswordValidator: The password is rejected if it occurs in a provided list of passwords, which may be gzipped. The list Django ships with contains 20000 common - passwords (unhexed, lowercased and deduplicated), created by Royce Williams: + passwords (unhexed, lowercased and deduplicated), created by Royce + Williams: https://gist.github.com/roycewilliams/226886fd01572964e1431ac8afc999ce The password list must be lowercased to match the comparison in validate(). """ diff --git a/django/contrib/auth/urls.py b/django/contrib/auth/urls.py index 699ba6179a..8365be689b 100644 --- a/django/contrib/auth/urls.py +++ b/django/contrib/auth/urls.py @@ -1,7 +1,7 @@ # The views used below are normally mapped in the AdminSite instance. -# This URLs file is used to provide a reliable view deployment for test purposes. -# It is also provided as a convenience to those who want to deploy these URLs -# elsewhere. +# This URLs file is used to provide a reliable view deployment for test +# purposes. It is also provided as a convenience to those who want to deploy +# these URLs elsewhere. from django.contrib.auth import views from django.urls import path diff --git a/django/contrib/contenttypes/admin.py b/django/contrib/contenttypes/admin.py index 617d6d2e81..f595ce5285 100644 --- a/django/contrib/contenttypes/admin.py +++ b/django/contrib/contenttypes/admin.py @@ -67,8 +67,8 @@ class GenericInlineModelAdminChecks(InlineModelAdminChecks): ) ] - # There's one or more GenericForeignKeys; make sure that one of them - # uses the right ct_field and ct_fk_field. + # There's one or more GenericForeignKeys; make sure that one of + # them uses the right ct_field and ct_fk_field. for gfk in gfks: if gfk.ct_field == obj.ct_field and gfk.fk_field == obj.ct_fk_field: return [] diff --git a/django/contrib/contenttypes/fields.py b/django/contrib/contenttypes/fields.py index f28c346576..d85b61933a 100644 --- a/django/contrib/contenttypes/fields.py +++ b/django/contrib/contenttypes/fields.py @@ -199,8 +199,9 @@ class GenericForeignKey(FieldCacheMixin, Field): ct = self.get_content_type(id=ct_id, using=instance._state.db) ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys)) - # For doing the join in Python, we have to match both the FK val and the - # content type, so we use a callable that returns a (fk, class) pair. + # For doing the join in Python, we have to match both the FK val and + # the content type, so we use a callable that returns a (fk, class) + # pair. def gfk_key(obj): ct_id = getattr(obj, ct_attname) if ct_id is None: diff --git a/django/contrib/gis/db/backends/base/features.py b/django/contrib/gis/db/backends/base/features.py index 22c90a1714..d1e2211bdc 100644 --- a/django/contrib/gis/db/backends/base/features.py +++ b/django/contrib/gis/db/backends/base/features.py @@ -8,10 +8,12 @@ from .operations import BaseSpatialOperations class BaseSpatialFeatures: gis_enabled = True - # Does the database contain a SpatialRefSys model to store SRID information? + # Does the database contain a SpatialRefSys model to store SRID + # information? has_spatialrefsys_table = True - # Does the backend support the django.contrib.gis.utils.add_srs_entry() utility? + # Does the backend support the django.contrib.gis.utils.add_srs_entry() + # utility? supports_add_srs_entry = True # Does the backend introspect GeometryField to its subtypes? supports_geometry_field_introspection = True diff --git a/django/contrib/gis/db/backends/base/operations.py b/django/contrib/gis/db/backends/base/operations.py index 5c09aaa8bc..b3bf4383ad 100644 --- a/django/contrib/gis/db/backends/base/operations.py +++ b/django/contrib/gis/db/backends/base/operations.py @@ -28,8 +28,8 @@ class BaseSpatialOperations: geom_func_prefix = "" - # Mapping between Django function names and backend names, when names do not - # match; used in spatial_function_name(). + # Mapping between Django function names and backend names, when names do + # not match; used in spatial_function_name(). function_names = {} # Set of known unsupported functions of the backend @@ -79,8 +79,8 @@ class BaseSpatialOperations: # Constructors from_text = False - # Default conversion functions for aggregates; will be overridden if implemented - # for the spatial backend. + # Default conversion functions for aggregates; will be overridden if + # implemented for the spatial backend. def convert_extent(self, box, srid): raise NotImplementedError( "Aggregate extent not implemented for this spatial backend." diff --git a/django/contrib/gis/db/backends/mysql/features.py b/django/contrib/gis/db/backends/mysql/features.py index cd99420374..4e46ba40f3 100644 --- a/django/contrib/gis/db/backends/mysql/features.py +++ b/django/contrib/gis/db/backends/mysql/features.py @@ -17,5 +17,6 @@ class DatabaseFeatures(BaseSpatialFeatures, MySQLDatabaseFeatures): @cached_property def supports_geometry_field_unique_index(self): - # Not supported in MySQL since https://dev.mysql.com/worklog/task/?id=11808 + # Not supported in MySQL since + # https://dev.mysql.com/worklog/task/?id=11808 return self.connection.mysql_is_mariadb diff --git a/django/contrib/gis/db/backends/mysql/introspection.py b/django/contrib/gis/db/backends/mysql/introspection.py index 4d6aea78a2..65e7549efc 100644 --- a/django/contrib/gis/db/backends/mysql/introspection.py +++ b/django/contrib/gis/db/backends/mysql/introspection.py @@ -19,9 +19,9 @@ class MySQLIntrospection(DatabaseIntrospection): # column. for column, typ, null, key, default, extra in cursor.fetchall(): if column == description.name: - # Using OGRGeomType to convert from OGC name to Django field. - # MySQL does not support 3D or SRIDs, so the field params - # are empty. + # Using OGRGeomType to convert from OGC name to Django + # field. MySQL does not support 3D or SRIDs, so the field + # params are empty. field_type = OGRGeomType(typ).django field_params = {} break diff --git a/django/contrib/gis/db/backends/oracle/introspection.py b/django/contrib/gis/db/backends/oracle/introspection.py index 8e1a5e7a8c..baaf658b43 100644 --- a/django/contrib/gis/db/backends/oracle/introspection.py +++ b/django/contrib/gis/db/backends/oracle/introspection.py @@ -17,7 +17,8 @@ class OracleIntrospection(DatabaseIntrospection): def get_geometry_type(self, table_name, description): with self.connection.cursor() as cursor: - # Querying USER_SDO_GEOM_METADATA to get the SRID and dimension information. + # Querying USER_SDO_GEOM_METADATA to get the SRID and dimension + # information. try: cursor.execute( 'SELECT "DIMINFO", "SRID" FROM "USER_SDO_GEOM_METADATA" ' @@ -31,8 +32,8 @@ class OracleIntrospection(DatabaseIntrospection): 'corresponding to "%s"."%s"' % (table_name, description.name) ) from exc - # TODO: Research way to find a more specific geometry field type for - # the column's contents. + # TODO: Research way to find a more specific geometry field type + # for the column's contents. field_type = "GeometryField" # Getting the field parameters. diff --git a/django/contrib/gis/db/backends/oracle/operations.py b/django/contrib/gis/db/backends/oracle/operations.py index 77cb8e034c..467fc530b3 100644 --- a/django/contrib/gis/db/backends/oracle/operations.py +++ b/django/contrib/gis/db/backends/oracle/operations.py @@ -227,8 +227,8 @@ class OracleOperations(BaseSpatialOperations, DatabaseOperations): return OracleSpatialRefSys def modify_insert_params(self, placeholder, params): - """Drop out insert parameters for NULL placeholder. Needed for Oracle Spatial - backend due to #10888. + """Drop out insert parameters for NULL placeholder. Needed for Oracle + Spatial backend due to #10888. """ if placeholder == "NULL": return [] diff --git a/django/contrib/gis/db/backends/postgis/introspection.py b/django/contrib/gis/db/backends/postgis/introspection.py index 7c96c2cb04..e3b9957838 100644 --- a/django/contrib/gis/db/backends/postgis/introspection.py +++ b/django/contrib/gis/db/backends/postgis/introspection.py @@ -61,7 +61,8 @@ class PostGISIntrospection(DatabaseIntrospection): # OGRGeomType does not require GDAL and makes it easy to convert # from OGC geom type name to Django field. field_type = OGRGeomType(field_type).django - # Getting any GeometryField keyword arguments that are not the default. + # Getting any GeometryField keyword arguments that are not the + # default. field_params = {} if self.postgis_oid_lookup.get(description.type_code) == "geography": field_params["geography"] = True diff --git a/django/contrib/gis/db/backends/postgis/operations.py b/django/contrib/gis/db/backends/postgis/operations.py index 303e039958..df3cc7c7ee 100644 --- a/django/contrib/gis/db/backends/postgis/operations.py +++ b/django/contrib/gis/db/backends/postgis/operations.py @@ -328,7 +328,8 @@ class PostGISOperations(BaseSpatialOperations, DatabaseOperations): def _get_postgis_func(self, func): """ - Helper routine for calling PostGIS functions and returning their result. + Helper routine for calling PostGIS functions and returning their + result. """ # Close out the connection. See #9437. with self.connection.temporary_connection() as cursor: @@ -340,7 +341,9 @@ class PostGISOperations(BaseSpatialOperations, DatabaseOperations): return self._get_postgis_func("postgis_geos_version") def postgis_lib_version(self): - "Return the version number of the PostGIS library used with PostgreSQL." + """ + Return the version number of the PostGIS library used with PostgreSQL. + """ return self._get_postgis_func("postgis_lib_version") def postgis_proj_version(self): diff --git a/django/contrib/gis/db/backends/postgis/schema.py b/django/contrib/gis/db/backends/postgis/schema.py index c74b574c4c..aef62198cb 100644 --- a/django/contrib/gis/db/backends/postgis/schema.py +++ b/django/contrib/gis/db/backends/postgis/schema.py @@ -100,8 +100,9 @@ class PostGISSchemaEditor(DatabaseSchemaEditor): opclasses = None fields = [field] if field.geom_type == "RASTER": - # For raster fields, wrap index creation SQL statement with ST_ConvexHull. - # Indexes on raster columns are based on the convex hull of the raster. + # For raster fields, wrap index creation SQL statement with + # ST_ConvexHull. Indexes on raster columns are based on the convex + # hull of the raster. expressions = Func(Col(None, field), template=self.rast_index_template) fields = None elif field.dim > 2 and not field.geography: diff --git a/django/contrib/gis/db/backends/spatialite/introspection.py b/django/contrib/gis/db/backends/spatialite/introspection.py index 8d0003fd53..f2f5146586 100644 --- a/django/contrib/gis/db/backends/spatialite/introspection.py +++ b/django/contrib/gis/db/backends/spatialite/introspection.py @@ -52,7 +52,8 @@ class SpatiaLiteIntrospection(DatabaseIntrospection): ogr_type = ogr_type % 1000 + OGRGeomType.wkb25bit field_type = OGRGeomType(ogr_type).django - # Getting any GeometryField keyword arguments that are not the default. + # Getting any GeometryField keyword arguments that are not the + # default. dim = row[0] srid = row[1] field_params = {} diff --git a/django/contrib/gis/db/backends/spatialite/operations.py b/django/contrib/gis/db/backends/spatialite/operations.py index 3509001426..e9321ee2a3 100644 --- a/django/contrib/gis/db/backends/spatialite/operations.py +++ b/django/contrib/gis/db/backends/spatialite/operations.py @@ -48,9 +48,11 @@ class SpatiaLiteOperations(BaseSpatialOperations, DatabaseOperations): "relate": SpatialiteNullCheckOperator(func="Relate"), "coveredby": SpatialiteNullCheckOperator(func="CoveredBy"), "covers": SpatialiteNullCheckOperator(func="Covers"), - # Returns true if B's bounding box completely contains A's bounding box. + # Returns true if B's bounding box completely contains A's bounding + # box. "contained": SpatialOperator(func="MbrWithin"), - # Returns true if A's bounding box completely contains B's bounding box. + # Returns true if A's bounding box completely contains B's bounding + # box. "bbcontains": SpatialOperator(func="MbrContains"), # Returns true if A's bounding box overlaps B's bounding box. "bboverlaps": SpatialOperator(func="MbrOverlaps"), diff --git a/django/contrib/gis/db/backends/spatialite/schema.py b/django/contrib/gis/db/backends/spatialite/schema.py index fb2c5690de..69b8da2501 100644 --- a/django/contrib/gis/db/backends/spatialite/schema.py +++ b/django/contrib/gis/db/backends/spatialite/schema.py @@ -91,7 +91,8 @@ class SpatialiteSchemaEditor(DatabaseSchemaEditor): def delete_model(self, model, **kwargs): from django.contrib.gis.db.models import GeometryField - # Drop spatial metadata (dropping the table does not automatically remove them) + # Drop spatial metadata (dropping the table does not automatically + # remove them) for field in model._meta.local_fields: if isinstance(field, GeometryField): self.remove_geometry_metadata(model, field) @@ -126,9 +127,9 @@ class SpatialiteSchemaEditor(DatabaseSchemaEditor): # NOTE: If the field is a geometry field, the table is just recreated, # the parent's remove_field can't be used cause it will skip the - # recreation if the field does not have a database type. Geometry fields - # do not have a db type cause they are added and removed via stored - # procedures. + # recreation if the field does not have a database type. Geometry + # fields do not have a db type cause they are added and removed via + # stored procedures. if isinstance(field, GeometryField): self._remake_table(model, delete_field=field) else: diff --git a/django/contrib/gis/db/backends/utils.py b/django/contrib/gis/db/backends/utils.py index ffb7420019..4e2035d577 100644 --- a/django/contrib/gis/db/backends/utils.py +++ b/django/contrib/gis/db/backends/utils.py @@ -6,7 +6,8 @@ backends. class SpatialOperator: """ - Class encapsulating the behavior specific to a GIS operation (used by lookups). + Class encapsulating the behavior specific to a GIS operation (used by + lookups). """ sql_template = None diff --git a/django/contrib/gis/db/models/fields.py b/django/contrib/gis/db/models/fields.py index 4542e19040..d1c1a5937e 100644 --- a/django/contrib/gis/db/models/fields.py +++ b/django/contrib/gis/db/models/fields.py @@ -81,8 +81,8 @@ class BaseSpatialField(Field): def __init__(self, verbose_name=None, srid=4326, spatial_index=True, **kwargs): """ - The initialization function for base spatial fields. Takes the following - as keyword arguments: + The initialization function for base spatial fields. Takes the + following as keyword arguments: srid: The spatial reference system identifier, an OGC standard. @@ -195,7 +195,8 @@ class BaseSpatialField(Field): if isinstance(obj, GEOSGeometry): pass else: - # Check if input is a candidate for conversion to raster or geometry. + # Check if input is a candidate for conversion to raster or + # geometry. is_candidate = isinstance(obj, (bytes, str)) or hasattr( obj, "__geo_interface__" ) @@ -395,7 +396,8 @@ class RasterField(BaseSpatialField): geography = False def _check_connection(self, connection): - # Make sure raster fields are used only on backends with raster support. + # Make sure raster fields are used only on backends with raster + # support. if ( not connection.features.gis_enabled or not connection.features.supports_raster diff --git a/django/contrib/gis/db/models/functions.py b/django/contrib/gis/db/models/functions.py index cafcd32e3b..9e94d0f77a 100644 --- a/django/contrib/gis/db/models/functions.py +++ b/django/contrib/gis/db/models/functions.py @@ -121,8 +121,8 @@ class GeomOutputGeoFunc(GeoFunc): class SQLiteDecimalToFloatMixin: """ - By default, Decimal values are converted to str by the SQLite backend, which - is not acceptable by the GIS functions expecting numeric values. + By default, Decimal values are converted to str by the SQLite backend, + which is not acceptable by the GIS functions expecting numeric values. """ def as_sqlite(self, compiler, connection, **extra_context): @@ -483,7 +483,8 @@ class Length(DistanceResultMixin, OracleToleranceMixin, GeoFunc): if self.source_is_geography(): clone.source_expressions.append(Value(self.spheroid)) elif self.geo_field.geodetic(connection): - # Geometry fields with geodetic (lon/lat) coordinates need length_spheroid + # Geometry fields with geodetic (lon/lat) coordinates need + # length_spheroid function = connection.ops.spatial_function_name("LengthSpheroid") clone.source_expressions.append(Value(self.geo_field.spheroid(connection))) else: diff --git a/django/contrib/gis/db/models/lookups.py b/django/contrib/gis/db/models/lookups.py index 49e6c8b606..3d30ffed5c 100644 --- a/django/contrib/gis/db/models/lookups.py +++ b/django/contrib/gis/db/models/lookups.py @@ -70,9 +70,9 @@ class GISLookup(Lookup): return placeholder % rhs, rhs_params def get_rhs_op(self, connection, rhs): - # Unlike BuiltinLookup, the GIS get_rhs_op() implementation should return - # an object (SpatialOperator) with an as_sql() method to allow for more - # complex computations (where the lhs part can be mixed in). + # Unlike BuiltinLookup, the GIS get_rhs_op() implementation should + # return an object (SpatialOperator) with an as_sql() method to allow + # for more complex computations (where the lhs part can be mixed in). return connection.ops.gis_operators[self.lookup_name] def as_sql(self, compiler, connection): @@ -98,8 +98,8 @@ class GISLookup(Lookup): @BaseSpatialField.register_lookup class OverlapsLeftLookup(GISLookup): """ - The overlaps_left operator returns true if A's bounding box overlaps or is to the - left of B's bounding box. + The overlaps_left operator returns true if A's bounding box overlaps or is + to the left of B's bounding box. """ lookup_name = "overlaps_left" @@ -108,8 +108,8 @@ class OverlapsLeftLookup(GISLookup): @BaseSpatialField.register_lookup class OverlapsRightLookup(GISLookup): """ - The 'overlaps_right' operator returns true if A's bounding box overlaps or is to the - right of B's bounding box. + The 'overlaps_right' operator returns true if A's bounding box overlaps or + is to the right of B's bounding box. """ lookup_name = "overlaps_right" @@ -118,8 +118,8 @@ class OverlapsRightLookup(GISLookup): @BaseSpatialField.register_lookup class OverlapsBelowLookup(GISLookup): """ - The 'overlaps_below' operator returns true if A's bounding box overlaps or is below - B's bounding box. + The 'overlaps_below' operator returns true if A's bounding box overlaps or + is below B's bounding box. """ lookup_name = "overlaps_below" @@ -128,8 +128,8 @@ class OverlapsBelowLookup(GISLookup): @BaseSpatialField.register_lookup class OverlapsAboveLookup(GISLookup): """ - The 'overlaps_above' operator returns true if A's bounding box overlaps or is above - B's bounding box. + The 'overlaps_above' operator returns true if A's bounding box overlaps or + is above B's bounding box. """ lookup_name = "overlaps_above" @@ -138,8 +138,8 @@ class OverlapsAboveLookup(GISLookup): @BaseSpatialField.register_lookup class LeftLookup(GISLookup): """ - The 'left' operator returns true if A's bounding box is strictly to the left - of B's bounding box. + The 'left' operator returns true if A's bounding box is strictly to the + left of B's bounding box. """ lookup_name = "left" @@ -148,8 +148,8 @@ class LeftLookup(GISLookup): @BaseSpatialField.register_lookup class RightLookup(GISLookup): """ - The 'right' operator returns true if A's bounding box is strictly to the right - of B's bounding box. + The 'right' operator returns true if A's bounding box is strictly to the + right of B's bounding box. """ lookup_name = "right" @@ -158,8 +158,8 @@ class RightLookup(GISLookup): @BaseSpatialField.register_lookup class StrictlyBelowLookup(GISLookup): """ - The 'strictly_below' operator returns true if A's bounding box is strictly below B's - bounding box. + The 'strictly_below' operator returns true if A's bounding box is strictly + below B's bounding box. """ lookup_name = "strictly_below" @@ -168,8 +168,8 @@ class StrictlyBelowLookup(GISLookup): @BaseSpatialField.register_lookup class StrictlyAboveLookup(GISLookup): """ - The 'strictly_above' operator returns true if A's bounding box is strictly above B's - bounding box. + The 'strictly_above' operator returns true if A's bounding box is strictly + above B's bounding box. """ lookup_name = "strictly_above" @@ -192,8 +192,8 @@ BaseSpatialField.register_lookup(SameAsLookup, "exact") @BaseSpatialField.register_lookup class BBContainsLookup(GISLookup): """ - The 'bbcontains' operator returns true if A's bounding box completely contains - by B's bounding box. + The 'bbcontains' operator returns true if A's bounding box completely + contains by B's bounding box. """ lookup_name = "bbcontains" @@ -212,8 +212,8 @@ class BBOverlapsLookup(GISLookup): @BaseSpatialField.register_lookup class ContainedLookup(GISLookup): """ - The 'contained' operator returns true if A's bounding box is completely contained - by B's bounding box. + The 'contained' operator returns true if A's bounding box is completely + contained by B's bounding box. """ lookup_name = "contained" diff --git a/django/contrib/gis/gdal/envelope.py b/django/contrib/gis/gdal/envelope.py index 8293aa499d..f33f9b4df6 100644 --- a/django/contrib/gis/gdal/envelope.py +++ b/django/contrib/gis/gdal/envelope.py @@ -39,8 +39,8 @@ class Envelope: def __init__(self, *args): """ - The initialization function may take an OGREnvelope structure, 4-element - tuple or list, or 4 individual arguments. + The initialization function may take an OGREnvelope structure, + 4-element tuple or list, or 4 individual arguments. """ if len(args) == 1: diff --git a/django/contrib/gis/gdal/geometries.py b/django/contrib/gis/gdal/geometries.py index f0e56a3e01..6301cd7146 100644 --- a/django/contrib/gis/gdal/geometries.py +++ b/django/contrib/gis/gdal/geometries.py @@ -30,7 +30,8 @@ Example: >>> print(mpnt.proj) +proj=longlat +ellps=clrk66 +datum=NAD27 +no_defs >>> print(mpnt) - MULTIPOINT (-89.99993037860248 29.99979788655764,-89.99993037860248 29.99979788655764) + MULTIPOINT (-89.99993037860248 29.99979788655764,-89.99993037860248 + 29.99979788655764) The OGRGeomType class is to make it easy to specify an OGR geometry type: >>> from django.contrib.gis.gdal import OGRGeomType @@ -248,7 +249,10 @@ class OGRGeometry(GDALBase): @property def area(self): - "Return the area for a LinearRing, Polygon, or MultiPolygon; 0 otherwise." + """ + Return the area for a LinearRing, Polygon, or MultiPolygon; 0 + otherwise. + """ return capi.get_area(self.ptr) @property @@ -411,7 +415,8 @@ class OGRGeometry(GDALBase): else: byteorder = 0 # wkbXDR sz = self.wkb_size - # Creating the unsigned character buffer, and passing it in by reference. + # Creating the unsigned character buffer, and passing it in by + # reference. buf = (c_ubyte * sz)() # For backward compatibility, export old-style 99-402 extended # dimension types when geometry does not have an M dimension. @@ -483,8 +488,8 @@ class OGRGeometry(GDALBase): # #### Topology Methods #### def _topology(self, func, other): - """A generalized function for topology operations, takes a GDAL function and - the other geometry to perform the operation on.""" + """A generalized function for topology operations, takes a GDAL + function and the other geometry to perform the operation on.""" if not isinstance(other, OGRGeometry): raise TypeError( "Must use another OGRGeometry object for topology operations!" diff --git a/django/contrib/gis/gdal/raster/band.py b/django/contrib/gis/gdal/raster/band.py index c3ec960643..34ce39633e 100644 --- a/django/contrib/gis/gdal/raster/band.py +++ b/django/contrib/gis/gdal/raster/band.py @@ -71,8 +71,8 @@ class GDALBand(GDALRasterBase): If approximate=True, the statistics may be computed based on overviews or a subset of image tiles. - If refresh=True, the statistics will be computed from the data directly, - and the cache will be updated where applicable. + If refresh=True, the statistics will be computed from the data + directly, and the cache will be updated where applicable. For empty bands (where all pixel values are nodata), all statistics values are returned as None. diff --git a/django/contrib/gis/gdal/raster/source.py b/django/contrib/gis/gdal/raster/source.py index b33eb11c0f..93c5900970 100644 --- a/django/contrib/gis/gdal/raster/source.py +++ b/django/contrib/gis/gdal/raster/source.py @@ -204,7 +204,8 @@ class GDALRaster(GDALRasterBase): if "skew" in ds_input: self.skew.x, self.skew.y = ds_input["skew"] elif isinstance(ds_input, c_void_p): - # Instantiate the object using an existing pointer to a gdal raster. + # Instantiate the object using an existing pointer to a gdal + # raster. self._ptr = ds_input else: raise GDALException( @@ -410,11 +411,12 @@ class GDALRaster(GDALRasterBase): name of the source raster will be used and appended with _copy. + source_driver_name. - In addition, the resampling algorithm can be specified with the "resampling" - input parameter. The default is NearestNeighbor. For a list of all options - consult the GDAL_RESAMPLE_ALGORITHMS constant. + In addition, the resampling algorithm can be specified with the + "resampling" input parameter. The default is NearestNeighbor. For a + list of all options consult the GDAL_RESAMPLE_ALGORITHMS constant. """ - # Get the parameters defining the geotransform, srid, and size of the raster + # Get the parameters defining the geotransform, srid, and size of the + # raster ds_input.setdefault("width", self.width) ds_input.setdefault("height", self.height) ds_input.setdefault("srid", self.srs.srid) diff --git a/django/contrib/gis/gdal/srs.py b/django/contrib/gis/gdal/srs.py index bb3176c383..cd77a1bf6c 100644 --- a/django/contrib/gis/gdal/srs.py +++ b/django/contrib/gis/gdal/srs.py @@ -44,9 +44,9 @@ class AxisOrder(IntEnum): class SpatialReference(GDALBase): """ - A wrapper for the OGRSpatialReference object. According to the GDAL web site, - the SpatialReference object "provide[s] services to represent coordinate - systems (projections and datums) and to transform between them." + A wrapper for the OGRSpatialReference object. According to the GDAL web + site, the SpatialReference object "provide[s] services to represent + coordinate systems (projections and datums) and to transform between them." """ destructor = capi.release_srs @@ -150,8 +150,8 @@ class SpatialReference(GDALBase): # #### SpatialReference Methods #### def attr_value(self, target, index=0): """ - The attribute value for the given target node (e.g. 'PROJCS'). The index - keyword specifies an index of the child node to return. + The attribute value for the given target node (e.g. 'PROJCS'). The + index keyword specifies an index of the child node to return. """ if not isinstance(target, str) or not isinstance(index, int): raise TypeError @@ -288,7 +288,9 @@ class SpatialReference(GDALBase): @property def local(self): - "Return True if this SpatialReference is local (root node is LOCAL_CS)." + """ + Return True if this SpatialReference is local (root node is LOCAL_CS). + """ return bool(capi.islocal(self.ptr)) @property diff --git a/django/contrib/gis/geos/collections.py b/django/contrib/gis/geos/collections.py index 41a647f234..8659b660b6 100644 --- a/django/contrib/gis/geos/collections.py +++ b/django/contrib/gis/geos/collections.py @@ -27,8 +27,8 @@ class GeometryCollection(GEOSGeometry): else: init_geoms = args - # Ensuring that only the permitted geometries are allowed in this collection - # this is moved to list mixin super class + # Ensuring that only the permitted geometries are allowed in this + # collection this is moved to list mixin super class self._check_allowed(init_geoms) # Creating the geometry pointer array. @@ -61,14 +61,19 @@ class GeometryCollection(GEOSGeometry): return capi.get_geomn(self.ptr, index) def _get_single_external(self, index): - "Return the Geometry from this Collection at the given index (0-based)." + """ + Return the Geometry from this Collection at the given index (0-based). + """ # Checking the index and returning the corresponding GEOS geometry. return GEOSGeometry( capi.geom_clone(self._get_single_internal(index)), srid=self.srid ) def _set_list(self, length, items): - "Create a new collection, and destroy the contents of the previous pointer." + """ + Create a new collection, and destroy the contents of the previous + pointer. + """ prev_ptr = self.ptr srid = self.srid self.ptr = self._create_collection(length, items) diff --git a/django/contrib/gis/geos/coordseq.py b/django/contrib/gis/geos/coordseq.py index a9ec4d2bf0..e54f3f2714 100644 --- a/django/contrib/gis/geos/coordseq.py +++ b/django/contrib/gis/geos/coordseq.py @@ -180,7 +180,8 @@ class GEOSCoordSeq(GEOSBase): @property def kml(self): "Return the KML representation for the coordinates." - # Getting the substitution string depending on whether the coordinates have + # Getting the substitution string depending on whether the coordinates + # have # a Z dimension. if self.hasz: substr = "%s,%s,%s " diff --git a/django/contrib/gis/geos/geometry.py b/django/contrib/gis/geos/geometry.py index cfc2d695ea..48658c4218 100644 --- a/django/contrib/gis/geos/geometry.py +++ b/django/contrib/gis/geos/geometry.py @@ -76,9 +76,10 @@ class GEOSGeometryBase(GEOSBase): def __deepcopy__(self, memodict): """ - The `deepcopy` routine is used by the `Node` class of django.utils.tree; - thus, the protocol routine needs to be implemented to return correct - copies (clones) of these GEOS objects, which use C pointers. + The `deepcopy` routine is used by the `Node` class of + django.utils.tree; thus, the protocol routine needs to be implemented + to return correct copies (clones) of these GEOS objects, which use C + pointers. """ return self.clone() @@ -343,7 +344,8 @@ class GEOSGeometryBase(GEOSBase): def overlaps(self, other): """ Return true if the DE-9IM intersection matrix for the two Geometries - is T*T***T** (for two points or two surfaces) 1*T***T** (for two curves). + is T*T***T** (for two points or two surfaces) 1*T***T** (for two + curves). """ return capi.geos_overlaps(self.ptr, other.ptr) @@ -542,9 +544,9 @@ class GEOSGeometryBase(GEOSBase): """ Return a geometry that represents all points whose distance from this Geometry is less than or equal to distance. Calculations are in the - Spatial Reference System of this Geometry. The optional third parameter sets - the number of segment used to approximate a quarter circle (defaults to 8). - (Text from PostGIS documentation at ch. 6.1.3) + Spatial Reference System of this Geometry. The optional third parameter + sets the number of segment used to approximate a quarter circle + (defaults to 8). (Text from PostGIS documentation at ch. 6.1.3) """ return self._topology(capi.geos_buffer(self.ptr, width, quadsegs)) @@ -567,9 +569,9 @@ class GEOSGeometryBase(GEOSBase): @property def centroid(self): """ - The centroid is equal to the centroid of the set of component Geometries - of highest dimension (since the lower-dimension geometries contribute zero - "weight" to the centroid). + The centroid is equal to the centroid of the set of component + Geometries of highest dimension (since the lower-dimension geometries + contribute zero "weight" to the centroid). """ return self._topology(capi.geos_centroid(self.ptr)) @@ -594,7 +596,10 @@ class GEOSGeometryBase(GEOSBase): return self._topology(capi.geos_envelope(self.ptr)) def intersection(self, other): - "Return a Geometry representing the points shared by this Geometry and other." + """ + Return a Geometry representing the points shared by this Geometry and + other. + """ return self._topology(capi.geos_intersection(self.ptr, other.ptr)) @property @@ -603,7 +608,9 @@ class GEOSGeometryBase(GEOSBase): return self._topology(capi.geos_pointonsurface(self.ptr)) def relate(self, other): - "Return the DE-9IM intersection matrix for this Geometry and the other." + """ + Return the DE-9IM intersection matrix for this Geometry and the other. + """ return capi.geos_relate(self.ptr, other.ptr).decode() def simplify(self, tolerance=0.0, preserve_topology=False): @@ -636,7 +643,10 @@ class GEOSGeometryBase(GEOSBase): return self._topology(capi.geos_unary_union(self.ptr)) def union(self, other): - "Return a Geometry representing all the points in this Geometry and other." + """ + Return a Geometry representing all the points in this Geometry and + other. + """ return self._topology(capi.geos_union(self.ptr, other.ptr)) # #### Other Routines #### diff --git a/django/contrib/gis/geos/libgeos.py b/django/contrib/gis/geos/libgeos.py index 501c28c6d3..99a4f2f94e 100644 --- a/django/contrib/gis/geos/libgeos.py +++ b/django/contrib/gis/geos/libgeos.py @@ -57,7 +57,8 @@ def load_geos(): ) # Getting the GEOS C library. The C interface (CDLL) is used for # both *NIX and Windows. - # See the GEOS C API source code for more details on the library function calls: + # See the GEOS C API source code for more details on the library function + # calls: # https://libgeos.org/doxygen/geos__c_8h_source.html _lgeos = CDLL(lib_path) # Here we set up the prototypes for the initGEOS_r and finishGEOS_r diff --git a/django/contrib/gis/geos/point.py b/django/contrib/gis/geos/point.py index a1a0a33e00..90f0793e16 100644 --- a/django/contrib/gis/geos/point.py +++ b/django/contrib/gis/geos/point.py @@ -26,7 +26,8 @@ class Point(GEOSGeometry): # Here a tuple or list was passed in under the `x` parameter. coords = x elif isinstance(x, (float, int)) and isinstance(y, (float, int)): - # Here X, Y, and (optionally) Z were passed in individually, as parameters. + # Here X, Y, and (optionally) Z were passed in individually, as + # parameters. if isinstance(z, (float, int)): coords = [x, y, z] else: diff --git a/django/contrib/gis/geos/polygon.py b/django/contrib/gis/geos/polygon.py index 554447c73f..c6b96607a9 100644 --- a/django/contrib/gis/geos/polygon.py +++ b/django/contrib/gis/geos/polygon.py @@ -69,8 +69,8 @@ class Polygon(GEOSGeometry): # ### These routines are needed for list-like operation w/ListMixin ### def _create_polygon(self, length, items): # Instantiate LinearRing objects if necessary, but don't clone them yet - # _construct_ring will throw a TypeError if a parameter isn't a valid ring - # If we cloned the pointers here, we wouldn't be able to clean up + # _construct_ring will throw a TypeError if a parameter isn't a valid + # ring If we cloned the pointers here, we wouldn't be able to clean up # in case of error. if not length: return capi.create_empty_polygon() diff --git a/django/contrib/gis/geos/prototypes/errcheck.py b/django/contrib/gis/geos/prototypes/errcheck.py index 5ee43999fa..044bf8bc5c 100644 --- a/django/contrib/gis/geos/prototypes/errcheck.py +++ b/django/contrib/gis/geos/prototypes/errcheck.py @@ -19,7 +19,9 @@ def last_arg_byref(args): def check_dbl(result, func, cargs): - "Check the status code and returns the double value passed in by reference." + """ + Check the status code and returns the double value passed in by reference. + """ # Checking the status code if result != 1: return None diff --git a/django/contrib/gis/measure.py b/django/contrib/gis/measure.py index 707c061a29..71dc130ac4 100644 --- a/django/contrib/gis/measure.py +++ b/django/contrib/gis/measure.py @@ -1,8 +1,8 @@ # Copyright (c) 2007, Robert Coup <robert.coup@onetrackmind.co.nz> # All rights reserved. # -# Redistribution and use in source and binary forms, with or without modification, -# are permitted provided that the following conditions are met: +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. @@ -11,20 +11,21 @@ # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # -# 3. Neither the name of Distance nor the names of its contributors may be used -# to endorse or promote products derived from this software without +# 3. Neither the name of Distance nor the names of its contributors may be +# used to endorse or promote products derived from this software without # specific prior written permission. # -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. # """ Distance and Area objects to allow for sensible and convenient calculation diff --git a/django/contrib/gis/utils/layermapping.py b/django/contrib/gis/utils/layermapping.py index e2bf30200e..a4cd04dc05 100644 --- a/django/contrib/gis/utils/layermapping.py +++ b/django/contrib/gis/utils/layermapping.py @@ -107,10 +107,10 @@ class LayerMapping: using=None, ): """ - A LayerMapping object is initialized using the given Model (not an instance), - a DataSource (or string path to an OGR-supported data file), and a mapping - dictionary. See the module level docstring for more details and keyword - argument usage. + A LayerMapping object is initialized using the given Model (not an + instance), a DataSource (or string path to an OGR-supported data file), + and a mapping dictionary. See the module level docstring for more + details and keyword argument usage. """ # Getting the DataSource and the associated Layer. if isinstance(data, (str, Path)): @@ -227,7 +227,8 @@ class LayerMapping: 'Given mapping field "%s" not in given Model fields.' % field_name ) - # Getting the string name for the Django field class (e.g., 'PointField'). + # Getting the string name for the Django field class (e.g., + # 'PointField'). fld_name = model_field.__class__.__name__ if isinstance(model_field, GeometryField): @@ -262,9 +263,9 @@ class LayerMapping: % (fld_name, "(dim=3)" if coord_dim == 3 else "", ltype) ) - # Setting the `geom_field` attribute w/the name of the model field - # that is a Geometry. Also setting the coordinate dimension - # attribute. + # Setting the `geom_field` attribute w/the name of the model + # field that is a Geometry. Also setting the coordinate + # dimension attribute. self.geom_field = field_name self.coord_dim = coord_dim fields_val = model_field @@ -424,7 +425,8 @@ class LayerMapping: digits = dtup[1] d_idx = dtup[2] # index where the decimal is - # Maximum amount of precision, or digits to the left of the decimal. + # Maximum amount of precision, or digits to the left of the + # decimal. max_prec = model_field.max_digits - model_field.decimal_places # Getting the digits to the left of the decimal place for the @@ -446,7 +448,8 @@ class LayerMapping: elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance( model_field, models.IntegerField ): - # Attempt to convert any OFTReal and OFTString value to an OFTInteger. + # Attempt to convert any OFTReal and OFTString value to an + # OFTInteger. try: val = int(ogr_field.value) except ValueError: @@ -533,7 +536,10 @@ class LayerMapping: ) from exc def geometry_field(self): - "Return the GeometryField instance associated with the geographic column." + """ + Return the GeometryField instance associated with the geographic + column. + """ # Use `get_field()` on the model's options so that we # get the correct field instance if there's model inheritance. opts = self.model._meta @@ -542,7 +548,8 @@ class LayerMapping: def make_multi(self, geom_type, model_field): """ Given the OGRGeomType for a geometry and its associated GeometryField, - determine whether the geometry should be turned into a GeometryCollection. + determine whether the geometry should be turned into a + GeometryCollection. """ return ( geom_type.num in self.MULTI_TYPES @@ -583,12 +590,13 @@ class LayerMapping: When this keyword is set, status information will be printed giving the number of features processed and successfully saved. By default, progress information will pe printed every 1000 features processed, - however, this default may be overridden by setting this keyword with an - integer for the desired interval. + however, this default may be overridden by setting this keyword with + an integer for the desired interval. stream: Status information will be written to this file handle. Defaults to - using `sys.stdout`, but any object with a `write` method is supported. + using `sys.stdout`, but any object with a `write` method is + supported. silent: By default, non-fatal error notifications are printed to stdout, but @@ -631,8 +639,8 @@ class LayerMapping: # Constructing the model using the keyword args is_update = False if self.unique: - # If we want unique models on a particular field, handle the - # geometry appropriately. + # If we want unique models on a particular field, + # handle the geometry appropriately. try: # Getting the keyword arguments and retrieving # the unique model. @@ -688,8 +696,8 @@ class LayerMapping: "Processed %d features, saved %d ...\n" % (num_feat, num_saved) ) - # Only used for status output purposes -- incremental saving uses the - # values returned here. + # Only used for status output purposes -- incremental saving uses + # the values returned here. return num_saved, num_feat if self.transaction_decorator is not None: @@ -708,8 +716,8 @@ class LayerMapping: n_i = len(indices) for i, end in enumerate(indices): - # Constructing the slice to use for this step; the last slice is - # special (e.g, [100:] instead of [90:100]). + # Constructing the slice to use for this step; the last slice + # is special (e.g, [100:] instead of [90:100]). if i + 1 == n_i: step_slice = slice(beg, None) else: diff --git a/django/contrib/gis/utils/ogrinspect.py b/django/contrib/gis/utils/ogrinspect.py index 63353690d9..96f58c9ff1 100644 --- a/django/contrib/gis/utils/ogrinspect.py +++ b/django/contrib/gis/utils/ogrinspect.py @@ -141,8 +141,9 @@ def _ogrinspect( null=False, ): """ - Helper routine for `ogrinspect` that generates GeoDjango models corresponding - to the given data source. See the `ogrinspect` docstring for more details. + Helper routine for `ogrinspect` that generates GeoDjango models + corresponding to the given data source. See the `ogrinspect` docstring for + more details. """ # Getting the DataSource if isinstance(data_source, str): diff --git a/django/contrib/humanize/templatetags/humanize.py b/django/contrib/humanize/templatetags/humanize.py index 7e2e3b5fed..91ffe3acad 100644 --- a/django/contrib/humanize/templatetags/humanize.py +++ b/django/contrib/humanize/templatetags/humanize.py @@ -39,11 +39,14 @@ def ordinal(value): templates = ( # Translators: Ordinal format when value ends with 0, e.g. 80th. pgettext("ordinal 0", "{}th"), - # Translators: Ordinal format when value ends with 1, e.g. 81st, except 11. + # Translators: Ordinal format when value ends with 1, e.g. 81st, + # except 11. pgettext("ordinal 1", "{}st"), - # Translators: Ordinal format when value ends with 2, e.g. 82nd, except 12. + # Translators: Ordinal format when value ends with 2, e.g. 82nd, + # except 12. pgettext("ordinal 2", "{}nd"), - # Translators: Ordinal format when value ends with 3, e.g. 83rd, except 13. + # Translators: Ordinal format when value ends with 3, e.g. 83rd, + # except 13. pgettext("ordinal 3", "{}rd"), # Translators: Ordinal format when value ends with 4, e.g. 84th. pgettext("ordinal 4", "{}th"), @@ -212,7 +215,8 @@ def naturaltime(value): class NaturalTimeFormatter: time_strings = { - # Translators: delta will contain a string like '2 months' or '1 month, 2 weeks' + # Translators: delta will contain a string like '2 months' or '1 month, + # 2 weeks' "past-day": gettext_lazy("%(delta)s ago"), # Translators: please keep a non-breaking space (U+00A0) between count # and time unit. @@ -225,25 +229,27 @@ class NaturalTimeFormatter: "past-second": ngettext_lazy("a second ago", "%(count)s seconds ago", "count"), "now": gettext_lazy("now"), # fmt: off - # fmt turned off to avoid black splitting the ngettext_lazy calls to multiple - # lines, as this results in gettext missing the 'Translators:' comments. + # fmt turned off to avoid black splitting the ngettext_lazy calls to + # multiple lines, as this results in gettext missing the 'Translators:' + # comments. "future-second": ngettext_lazy( - # Translators: please keep a non-breaking space (U+00A0) between count - # and time unit. + # Translators: please keep a non-breaking space (U+00A0) between + # count and time unit. "a second from now", "%(count)s seconds from now", "count" ), "future-minute": ngettext_lazy( - # Translators: please keep a non-breaking space (U+00A0) between count - # and time unit. + # Translators: please keep a non-breaking space (U+00A0) between + # count and time unit. "a minute from now", "%(count)s minutes from now", "count", ), "future-hour": ngettext_lazy( - # Translators: please keep a non-breaking space (U+00A0) between count - # and time unit. + # Translators: please keep a non-breaking space (U+00A0) between + # count and time unit. "an hour from now", "%(count)s hours from now", "count", ), # fmt: on - # Translators: delta will contain a string like '2 months' or '1 month, 2 weeks' + # Translators: delta will contain a string like '2 months' or '1 month, + # 2 weeks' "future-day": gettext_lazy("%(delta)s from now"), } past_substrings = { diff --git a/django/contrib/postgres/signals.py b/django/contrib/postgres/signals.py index a3816d3d30..00ab421e04 100644 --- a/django/contrib/postgres/signals.py +++ b/django/contrib/postgres/signals.py @@ -58,12 +58,12 @@ else: oids, array_oids = get_hstore_oids(connection.alias) # Don't register handlers when hstore is not available on the database. # - # If someone tries to create an hstore field it will error there. This is - # necessary as someone may be using PSQL without extensions installed but - # be using other features of contrib.postgres. + # If someone tries to create an hstore field it will error there. This + # is necessary as someone may be using PSQL without extensions + # installed but be using other features of contrib.postgres. # - # This is also needed in order to create the connection in order to install - # the hstore extension. + # This is also needed in order to create the connection in order to + # install the hstore extension. if oids: register_hstore( connection.connection, globally=True, oid=oids, array_oid=array_oids @@ -72,7 +72,8 @@ else: oids, citext_oids = get_citext_oids(connection.alias) # Don't register handlers when citext is not available on the database. # - # The same comments in the above call to register_hstore() also apply here. + # The same comments in the above call to register_hstore() also apply + # here. if oids: array_type = psycopg2.extensions.new_array_type( citext_oids, "citext[]", psycopg2.STRING diff --git a/django/contrib/sessions/backends/base.py b/django/contrib/sessions/backends/base.py index 69f756a228..e53f1d201a 100644 --- a/django/contrib/sessions/backends/base.py +++ b/django/contrib/sessions/backends/base.py @@ -120,7 +120,9 @@ class SessionBase: del (await self._aget_session())[self.TEST_COOKIE_NAME] def encode(self, session_dict): - "Return the given session dictionary serialized and encoded as a string." + """ + Return the given session dictionary serialized and encoded as a string. + """ return signing.dumps( session_dict, salt=self.key_salt, diff --git a/django/contrib/sitemaps/__init__.py b/django/contrib/sitemaps/__init__.py index dd21d8829d..c0c0ac77a7 100644 --- a/django/contrib/sitemaps/__init__.py +++ b/django/contrib/sitemaps/__init__.py @@ -51,7 +51,8 @@ class Sitemap: def _items(self): if self.i18n: # Create (item, lang_code) tuples for all items and languages. - # This is necessary to paginate with all languages already considered. + # This is necessary to paginate with all languages already + # considered. items = [ (item, lang_code) for item in self.items() @@ -63,7 +64,8 @@ class Sitemap: def _location(self, item, force_lang_code=None): if self.i18n: obj, lang_code = item - # Activate language from item-tuple or forced one before calling location. + # Activate language from item-tuple or forced one before calling + # location. with translation.override(force_lang_code or lang_code): return self._get("location", item) return self._get("location", item) diff --git a/django/contrib/staticfiles/storage.py b/django/contrib/staticfiles/storage.py index 2ec107d55a..b16c77757c 100644 --- a/django/contrib/staticfiles/storage.py +++ b/django/contrib/staticfiles/storage.py @@ -239,15 +239,18 @@ class HashedFilesMixin: return matched if url_path.startswith("/"): - # Otherwise the condition above would have returned prematurely. + # Otherwise the condition above would have returned + # prematurely. assert url_path.startswith(settings.STATIC_URL) target_name = url_path.removeprefix(settings.STATIC_URL) else: - # We're using the posixpath module to mix paths and URLs conveniently. + # We're using the posixpath module to mix paths and URLs + # conveniently. source_name = name if os.sep == "/" else name.replace(os.sep, "/") target_name = posixpath.join(posixpath.dirname(source_name), url_path) - # Determine the hashed name of the target file with the storage backend. + # Determine the hashed name of the target file with the storage + # backend. hashed_url = self._url( self._stored_name, unquote(target_name), @@ -280,8 +283,8 @@ class HashedFilesMixin: 2. adjusting files which contain references to other files so they refer to the cache-busting filenames. - If either of these are performed on a file, then that file is considered - post-processed. + If either of these are performed on a file, then that file is + considered post-processed. """ # don't even dare to process the files if we're in dry run mode if dry_run: @@ -448,7 +451,8 @@ class HashedFilesMixin: # Move on to the next intermediate file. intermediate_name = cache_name # If the cache name can't be determined after the max number of passes, - # the intermediate files on disk may be corrupt; avoid an infinite loop. + # the intermediate files on disk may be corrupt; avoid an infinite + # loop. raise ValueError("The name '%s' could not be hashed with %r." % (name, self)) diff --git a/django/core/cache/backends/base.py b/django/core/cache/backends/base.py index aef9986389..f1b731aee1 100644 --- a/django/core/cache/backends/base.py +++ b/django/core/cache/backends/base.py @@ -283,8 +283,8 @@ class BaseCache: def decr(self, key, delta=1, version=None): """ - Subtract delta from value in the cache. If the key does not exist, raise - a ValueError exception. + Subtract delta from value in the cache. If the key does not exist, + raise a ValueError exception. """ return self.incr(key, -delta, version=version) diff --git a/django/core/cache/backends/db.py b/django/core/cache/backends/db.py index b3f4eab7c1..03add8c4a5 100644 --- a/django/core/cache/backends/db.py +++ b/django/core/cache/backends/db.py @@ -136,9 +136,9 @@ class DatabaseCache(BaseDatabaseCache): b64encoded = base64.b64encode(pickled).decode("latin1") try: # Note: typecasting for datetimes is needed by some 3rd party - # database backends. All core backends work without typecasting, - # so be careful about changes here - test suite will NOT pick - # regressions. + # database backends. All core backends work without + # typecasting, so be careful about changes here - test suite + # will NOT pick regressions. with transaction.atomic(using=db): cursor.execute( "SELECT %s, %s FROM %s WHERE %s = %%s" @@ -198,7 +198,8 @@ class DatabaseCache(BaseDatabaseCache): else: return False # touch failed. except DatabaseError: - # To be threadsafe, updates/inserts are allowed to fail silently + # To be threadsafe, updates/inserts are allowed to fail + # silently return False else: return True diff --git a/django/core/checks/messages.py b/django/core/checks/messages.py index db7aa55119..5ba48abe5a 100644 --- a/django/core/checks/messages.py +++ b/django/core/checks/messages.py @@ -29,7 +29,8 @@ class CheckMessage: obj = "?" elif isinstance(self.obj, models.base.ModelBase): # We need to hardcode ModelBase and Field cases because its __str__ - # method doesn't return "applabel.modellabel" and cannot be changed. + # method doesn't return "applabel.modellabel" and cannot be + # changed. obj = self.obj._meta.label else: obj = str(self.obj) diff --git a/django/core/checks/security/base.py b/django/core/checks/security/base.py index 9506052196..7d9631df1e 100644 --- a/django/core/checks/security/base.py +++ b/django/core/checks/security/base.py @@ -266,7 +266,8 @@ def check_referrer_policy(app_configs, **kwargs): if _security_middleware(): if settings.SECURE_REFERRER_POLICY is None: return [W022] - # Support a comma-separated string or iterable of values to allow fallback. + # Support a comma-separated string or iterable of values to allow + # fallback. if isinstance(settings.SECURE_REFERRER_POLICY, str): values = {v.strip() for v in settings.SECURE_REFERRER_POLICY.split(",")} else: diff --git a/django/core/files/base.py b/django/core/files/base.py index 9682467afa..b8613ffc55 100644 --- a/django/core/files/base.py +++ b/django/core/files/base.py @@ -67,8 +67,8 @@ class File(FileProxyMixin): Return ``True`` if you can expect multiple chunks. NB: If a particular file representation is in memory, subclasses should - always return ``False`` -- there's no good reason to read from memory in - chunks. + always return ``False`` -- there's no good reason to read from memory + in chunks. """ return self.size > (chunk_size or self.DEFAULT_CHUNK_SIZE) diff --git a/django/core/files/move.py b/django/core/files/move.py index 1605bebc1a..eff11b111b 100644 --- a/django/core/files/move.py +++ b/django/core/files/move.py @@ -19,8 +19,9 @@ def file_move_safe( """ Move a file from one location to another in the safest way possible. - First, try ``os.rename``, which is simple but will break across filesystems. - If that fails, stream manually from one file to another in pure Python. + First, try ``os.rename``, which is simple but will break across + filesystems. If that fails, stream manually from one file to another in + pure Python. If the destination file exists and ``allow_overwrite`` is ``False``, raise ``FileExistsError``. diff --git a/django/core/files/storage/base.py b/django/core/files/storage/base.py index 31ecbd209a..612c8cc357 100644 --- a/django/core/files/storage/base.py +++ b/django/core/files/storage/base.py @@ -149,8 +149,8 @@ class Storage: def exists(self, name): """ - Return True if a file referenced by the given name already exists in the - storage system, or False if the name is available for a new file. + Return True if a file referenced by the given name already exists in + the storage system, or False if the name is available for a new file. """ raise NotImplementedError( "subclasses of Storage must provide an exists() method" diff --git a/django/core/files/storage/filesystem.py b/django/core/files/storage/filesystem.py index 428ae61b40..9592bff008 100644 --- a/django/core/files/storage/filesystem.py +++ b/django/core/files/storage/filesystem.py @@ -104,8 +104,9 @@ class FileSystemStorage(Storage, StorageSettingsMixin): # This is a normal uploadedfile that we can stream. else: - # The combination of O_CREAT and O_EXCL makes os.open() raises an - # OSError if the file already exists before it's opened. + # The combination of O_CREAT and O_EXCL makes os.open() + # raises an OSError if the file already exists before it's + # opened. open_flags = ( os.O_WRONLY | os.O_CREAT diff --git a/django/core/files/uploadedfile.py b/django/core/files/uploadedfile.py index efbfcac4c8..1d006ede4f 100644 --- a/django/core/files/uploadedfile.py +++ b/django/core/files/uploadedfile.py @@ -54,7 +54,8 @@ class UploadedFile(File): # Just use the basename of the file -- anything else is dangerous. name = os.path.basename(name) - # File names longer than 255 characters can cause problems on older OSes. + # File names longer than 255 characters can cause problems on older + # OSes. if len(name) > 255: name, ext = os.path.splitext(name) ext = ext[:255] @@ -126,7 +127,8 @@ class InMemoryUploadedFile(UploadedFile): class SimpleUploadedFile(InMemoryUploadedFile): """ - A simple representation of a file, which just has content, size, and a name. + A simple representation of a file, which just has content, size, and a + name. """ def __init__(self, name, content, content_type="text/plain"): diff --git a/django/core/files/uploadhandler.py b/django/core/files/uploadhandler.py index ab86f7fede..133c0a597f 100644 --- a/django/core/files/uploadhandler.py +++ b/django/core/files/uploadhandler.py @@ -37,8 +37,8 @@ class StopUpload(UploadFileException): def __init__(self, connection_reset=False): """ If ``connection_reset`` is ``True``, Django knows will halt the upload - without consuming the rest of the upload. This will cause the browser to - show a "connection reset" error. + without consuming the rest of the upload. This will cause the browser + to show a "connection reset" error. """ self.connection_reset = connection_reset @@ -51,7 +51,8 @@ class StopUpload(UploadFileException): class SkipFile(UploadFileException): """ - This exception is raised by an upload handler that wants to skip a given file. + This exception is raised by an upload handler that wants to skip a given + file. """ pass diff --git a/django/core/handlers/asgi.py b/django/core/handlers/asgi.py index 10d18b60eb..beace7597c 100644 --- a/django/core/handlers/asgi.py +++ b/django/core/handlers/asgi.py @@ -331,8 +331,8 @@ class ASGIHandler(base.BaseHandler): ) # Streaming responses need to be pinned to their iterator. if response.streaming: - # - Consume via `__aiter__` and not `streaming_content` directly, to - # allow mapping of a sync iterator. + # - Consume via `__aiter__` and not `streaming_content` directly, + # to allow mapping of a sync iterator. # - Use aclosing() when consuming aiter. See # https://github.com/python/cpython/commit/6e8dcdaaa49d4313bf9fab9f9923ca5828fbb10e async with aclosing(aiter(response)) as content: @@ -342,8 +342,9 @@ class ASGIHandler(base.BaseHandler): { "type": "http.response.body", "body": chunk, - # Ignore "more" as there may be more parts; instead, - # use an empty final closing message with False. + # Ignore "more" as there may be more parts; + # instead, use an empty final closing message + # with False. "more_body": True, } ) diff --git a/django/core/handlers/base.py b/django/core/handlers/base.py index 8911543d4e..af3e0c3a50 100644 --- a/django/core/handlers/base.py +++ b/django/core/handlers/base.py @@ -27,7 +27,8 @@ class BaseHandler: """ Populate middleware lists from settings.MIDDLEWARE. - Must be called after the environment is fixed (see __call__ in subclasses). + Must be called after the environment is fixed (see __call__ in + subclasses). """ self._view_middleware = [] self._template_response_middleware = [] diff --git a/django/core/mail/message.py b/django/core/mail/message.py index 93269d0310..3fd2ef6656 100644 --- a/django/core/mail/message.py +++ b/django/core/mail/message.py @@ -153,7 +153,8 @@ class MIMEMixin: class SafeMIMEMessage(MIMEMixin, MIMEMessage): def __setitem__(self, name, val): - # Per RFC 2046 Section 5.2.1, message/rfc822 attachment headers must be ASCII. + # Per RFC 2046 Section 5.2.1, message/rfc822 attachment headers must be + # ASCII. name, val = forbid_multi_line_headers(name, val, "ascii") MIMEMessage.__setitem__(self, name, val) diff --git a/django/core/management/__init__.py b/django/core/management/__init__.py index 0c16447d58..f547ef730c 100644 --- a/django/core/management/__init__.py +++ b/django/core/management/__init__.py @@ -117,7 +117,8 @@ def call_command(command_name, *args, **options): else: command = load_command_class(app_name, command_name) - # Simulate argument parsing to get the option defaults (see #10080 for details). + # Simulate argument parsing to get the option defaults (see #10080 for + # details). parser = command.create_parser("", command_name) # Use the `dest` option name from the parser option opt_mapping = { @@ -256,9 +257,9 @@ class ManagementUtility: except KeyError: if os.environ.get("DJANGO_SETTINGS_MODULE"): # If `subcommand` is missing due to misconfigured settings, the - # following line will retrigger an ImproperlyConfigured exception - # (get_commands() swallows the original one) so the user is - # informed about it. + # following line will retrigger an ImproperlyConfigured + # exception (get_commands() swallows the original one) so the + # user is informed about it. settings.INSTALLED_APPS elif not settings.configured: sys.stderr.write("No Django settings specified.\n") diff --git a/django/core/management/commands/diffsettings.py b/django/core/management/commands/diffsettings.py index 047e4764a8..5cc2e6d674 100644 --- a/django/core/management/commands/diffsettings.py +++ b/django/core/management/commands/diffsettings.py @@ -45,7 +45,8 @@ class Command(BaseCommand): def handle(self, **options): from django.conf import Settings, global_settings, settings - # Because settings are imported lazily, we need to explicitly load them. + # Because settings are imported lazily, we need to explicitly load + # them. if not settings.configured: settings._setup() diff --git a/django/core/management/commands/dumpdata.py b/django/core/management/commands/dumpdata.py index 5a9ab83919..15e615c1d0 100644 --- a/django/core/management/commands/dumpdata.py +++ b/django/core/management/commands/dumpdata.py @@ -229,7 +229,8 @@ class Command(BaseCommand): self.stdout.ending = None progress_output = None object_count = 0 - # If dumpdata is outputting to stdout, there is no way to display progress + # If dumpdata is outputting to stdout, there is no way to display + # progress if output and self.stdout.isatty() and options["verbosity"] > 0: progress_output = self.stdout object_count = sum(get_objects(count_only=True)) diff --git a/django/core/management/commands/flush.py b/django/core/management/commands/flush.py index a057393d53..ff7ee45557 100644 --- a/django/core/management/commands/flush.py +++ b/django/core/management/commands/flush.py @@ -86,8 +86,9 @@ Are you sure you want to do this? # Empty sql_list may signify an empty database and post_migrate # would then crash. if sql_list and not inhibit_post_migrate: - # Emit the post migrate signal. This allows individual applications to - # respond as if the database had been migrated from scratch. + # Emit the post migrate signal. This allows individual + # applications to respond as if the database had been migrated + # from scratch. emit_post_migrate_signal(verbosity, interactive, database) else: self.stdout.write("Flush cancelled.") diff --git a/django/core/management/commands/inspectdb.py b/django/core/management/commands/inspectdb.py index 81f0cbefea..8c271498c6 100644 --- a/django/core/management/commands/inspectdb.py +++ b/django/core/management/commands/inspectdb.py @@ -193,8 +193,8 @@ class Command(BaseCommand): ) used_relations.add(rel_to) else: - # Calling `get_field_type` to get the field type string and any - # additional parameters and notes. + # Calling `get_field_type` to get the field type string + # and any additional parameters and notes. field_type, field_params, field_notes = self.get_field_type( connection, table_name, row ) @@ -203,8 +203,8 @@ class Command(BaseCommand): field_type += "(" - # Don't output 'id = meta.AutoField(primary_key=True)', because - # that's assumed if it doesn't exist. + # Don't output 'id = meta.AutoField(primary_key=True)', + # because that's assumed if it doesn't exist. if att_name == "id" and extra_params == {"primary_key": True}: if field_type == "AutoField(": continue @@ -215,8 +215,8 @@ class Command(BaseCommand): ): comment_notes.append("AutoField?") - # Add 'null' and 'blank', if the 'null_ok' flag was present in the - # table description. + # Add 'null' and 'blank', if the 'null_ok' flag was present + # in the table description. if row.null_ok: # If it's NULL... extra_params["blank"] = True extra_params["null"] = True @@ -287,7 +287,8 @@ class Command(BaseCommand): while new_name.find(LOOKUP_SEP) >= 0: new_name = new_name.replace(LOOKUP_SEP, "_") if col_name.lower().find(LOOKUP_SEP) >= 0: - # Only add the comment if the double underscore was in the original name + # Only add the comment if the double underscore was in the + # original name field_notes.append( "Field renamed because it contained more than one '_' in a row." ) diff --git a/django/core/management/commands/makemessages.py b/django/core/management/commands/makemessages.py index ac67a5bc9f..60fe295ac1 100644 --- a/django/core/management/commands/makemessages.py +++ b/django/core/management/commands/makemessages.py @@ -420,9 +420,11 @@ class Command(BaseCommand): for locale in locales: if not is_valid_locale(locale): # Try to guess what valid locale it could be - # Valid examples are: en_GB, shi_Latn_MA and nl_NL-x-informal + # Valid examples are: en_GB, shi_Latn_MA and + # nl_NL-x-informal - # Search for characters followed by a non character (i.e. separator) + # Search for characters followed by a non character (i.e. + # separator) match = re.match( r"^(?P<language>[a-zA-Z]+)" r"(?P<separator>[^a-zA-Z])" @@ -464,8 +466,9 @@ class Command(BaseCommand): @cached_property def gettext_version(self): - # Gettext tools will output system-encoded bytestrings instead of UTF-8, - # when looking up the version. It's especially a problem on Windows. + # Gettext tools will output system-encoded bytestrings instead of + # UTF-8, when looking up the version. It's especially a problem on + # Windows. out, err, status = popen_wrapper( ["xgettext", "--version"], stdout_encoding=DEFAULT_LOCALE_ENCODING, diff --git a/django/core/management/commands/makemigrations.py b/django/core/management/commands/makemigrations.py index 690d0e5053..7f711ed7ae 100644 --- a/django/core/management/commands/makemigrations.py +++ b/django/core/management/commands/makemigrations.py @@ -139,7 +139,8 @@ class Command(BaseCommand): # the loader doesn't try to resolve replaced migrations from DB. loader = MigrationLoader(None, ignore_no_migrations=True) - # Raise an error if any migrations are applied before their dependencies. + # Raise an error if any migrations are applied before their + # dependencies. consistency_check_labels = {config.label for config in apps.get_app_configs()} # Non-default databases are only checked if database routers used. aliases_to_check = ( @@ -186,7 +187,8 @@ class Command(BaseCommand): "'python manage.py makemigrations --merge'" % name_str ) - # If they want to merge and there's nothing to merge, then politely exit + # If they want to merge and there's nothing to merge, then politely + # exit if self.merge and not conflicts: self.log("No conflicts detected to merge.") return @@ -505,9 +507,9 @@ class Command(BaseCommand): if self.scriptable: self.stdout.write(writer.path) elif self.verbosity == 3: - # Alternatively, makemigrations --merge --dry-run --verbosity 3 - # will log the merge migrations rather than saving the file - # to the disk. + # Alternatively, makemigrations --merge --dry-run + # --verbosity 3 will log the merge migrations rather than + # saving the file to the disk. self.log( self.style.MIGRATE_HEADING( "Full merge migrations file '%s':" % writer.filename diff --git a/django/core/management/commands/migrate.py b/django/core/management/commands/migrate.py index 4ef6e1a87c..268f669ba2 100644 --- a/django/core/management/commands/migrate.py +++ b/django/core/management/commands/migrate.py @@ -113,7 +113,8 @@ class Command(BaseCommand): # Work out which apps have migrations and which do not executor = MigrationExecutor(connection, self.migration_progress_callback) - # Raise an error if any migrations are applied before their dependencies. + # Raise an error if any migrations are applied before their + # dependencies. executor.loader.check_consistent_history(connection) # Before anything else, see if there's conflicting apps and drop out @@ -357,8 +358,8 @@ class Command(BaseCommand): fake=fake, fake_initial=fake_initial, ) - # post_migrate signals have access to all models. Ensure that all models - # are reloaded in case any are delayed. + # post_migrate signals have access to all models. Ensure that all + # models are reloaded in case any are delayed. post_migrate_state.clear_delayed_apps_cache() post_migrate_apps = post_migrate_state.apps @@ -375,8 +376,8 @@ class Command(BaseCommand): [ModelState.from_model(apps.get_model(*model)) for model in model_keys] ) - # Send the post_migrate signal, so individual apps can do whatever they need - # to do at this point. + # Send the post_migrate signal, so individual apps can do whatever they + # need to do at this point. emit_post_migrate_signal( self.verbosity, self.interactive, diff --git a/django/core/management/commands/shell.py b/django/core/management/commands/shell.py index b05e7ff404..47342291e8 100644 --- a/django/core/management/commands/shell.py +++ b/django/core/management/commands/shell.py @@ -70,8 +70,8 @@ class Command(BaseCommand): # Set up a dictionary to serve as the environment for the shell. imported_objects = self.get_namespace(**options) - # We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system - # conventions and get $PYTHONSTARTUP first then .pythonrc.py. + # We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow + # system conventions and get $PYTHONSTARTUP first then .pythonrc.py. if not options["no_startup"]: for pythonrc in OrderedSet( [os.environ.get("PYTHONSTARTUP"), os.path.expanduser("~/.pythonrc.py")] @@ -89,9 +89,9 @@ class Command(BaseCommand): except Exception: traceback.print_exc() - # By default, this will set up readline to do tab completion and to read and - # write history to the .python_history file, but this can be overridden by - # $PYTHONSTARTUP or ~/.pythonrc.py. + # By default, this will set up readline to do tab completion and to + # read and write history to the .python_history file, but this can be + # overridden by $PYTHONSTARTUP or ~/.pythonrc.py. try: hook = sys.__interactivehook__ except AttributeError: diff --git a/django/core/management/commands/sqlmigrate.py b/django/core/management/commands/sqlmigrate.py index 076499b3e2..3c2e25eeea 100644 --- a/django/core/management/commands/sqlmigrate.py +++ b/django/core/management/commands/sqlmigrate.py @@ -74,8 +74,8 @@ class Command(BaseCommand): migration.atomic and connection.features.can_rollback_ddl ) - # Make a plan that represents just the requested migrations and show SQL - # for it + # Make a plan that represents just the requested migrations and show + # SQL for it plan = [(loader.graph.nodes[target], options["backwards"])] sql_statements = loader.collect_sql(plan) if not sql_statements and options["verbosity"] >= 1: diff --git a/django/core/paginator.py b/django/core/paginator.py index 4376d6db85..aabc2f8532 100644 --- a/django/core/paginator.py +++ b/django/core/paginator.py @@ -56,7 +56,8 @@ class BasePaginator: else self.default_error_messages | error_messages ) if self.per_page <= self.orphans: - # RemovedInDjango70Warning: When the deprecation ends, replace with: + # RemovedInDjango70Warning: When the deprecation ends, replace + # with: # raise ValueError( # "The orphans argument cannot be larger than or equal to the " # "per_page argument." diff --git a/django/core/serializers/__init__.py b/django/core/serializers/__init__.py index 7e23769533..2f730d5cca 100644 --- a/django/core/serializers/__init__.py +++ b/django/core/serializers/__init__.py @@ -225,8 +225,9 @@ def sort_dependencies(app_list, allow_cycles=False): model, deps = model_dependencies.pop() # If all of the models in the dependency list are either already - # on the final model list, or not on the original serialization list, - # then we've found another model with all it's dependencies satisfied. + # on the final model list, or not on the original serialization + # list, then we've found another model with all it's dependencies + # satisfied. if all(d not in models or d in model_list for d in deps): model_list.append(model) changed = True diff --git a/django/core/serializers/base.py b/django/core/serializers/base.py index 1fbca9244b..efc55981eb 100644 --- a/django/core/serializers/base.py +++ b/django/core/serializers/base.py @@ -108,8 +108,9 @@ class Serializer: self.first = True for count, obj in enumerate(queryset, start=1): self.start_object(obj) - # Use the concrete parent class' _meta instead of the object's _meta - # This is to avoid local_fields problems for proxy models. Refs #17717. + # Use the concrete parent class' _meta instead of the object's + # _meta This is to avoid local_fields problems for proxy models. + # Refs #17717. concrete_model = obj._meta.concrete_model # When using natural primary keys, retrieve the pk field of the # parent for multi-table inheritance child models. That field must diff --git a/django/core/serializers/python.py b/django/core/serializers/python.py index 807d4b3977..2929874b01 100644 --- a/django/core/serializers/python.py +++ b/django/core/serializers/python.py @@ -1,7 +1,7 @@ """ A Python "serializer". Doesn't do much serializing per se -- just converts to -and from basic Python data types (lists, dicts, strings, etc.). Useful as a basis for -other serializers. +and from basic Python data types (lists, dicts, strings, etc.). Useful as a +basis for other serializers. """ from django.apps import apps diff --git a/django/core/serializers/pyyaml.py b/django/core/serializers/pyyaml.py index c72d1fa03b..fdc245756f 100644 --- a/django/core/serializers/pyyaml.py +++ b/django/core/serializers/pyyaml.py @@ -46,11 +46,12 @@ class Serializer(PythonSerializer): def _value_from_field(self, obj, field): # A nasty special case: base YAML doesn't support serialization of time - # types (as opposed to dates or datetimes, which it does support). Since - # we want to use the "safe" serializer for better interoperability, we - # need to do something with those pesky times. Converting 'em to strings - # isn't perfect, but it's better than a "!!python/time" type which would - # halt deserialization under any other language. + # types (as opposed to dates or datetimes, which it does support). + # Since we want to use the "safe" serializer for better + # interoperability, we need to do something with those pesky times. + # Converting 'em to strings isn't perfect, but it's better than a + # "!!python/time" type which would halt deserialization under any other + # language. value = super()._value_from_field(obj, field) if isinstance(value, datetime.time): value = str(value) diff --git a/django/core/serializers/xml_serializer.py b/django/core/serializers/xml_serializer.py index 360d5309d8..0557af3954 100644 --- a/django/core/serializers/xml_serializer.py +++ b/django/core/serializers/xml_serializer.py @@ -250,7 +250,8 @@ class Deserializer(base.Deserializer): continue field = Model._meta.get_field(field_name) - # As is usually the case, relation fields get the special treatment. + # As is usually the case, relation fields get the special + # treatment. if field.remote_field and isinstance( field.remote_field, models.ManyToManyRel ): @@ -303,7 +304,8 @@ class Deserializer(base.Deserializer): if hasattr(model._default_manager, "get_by_natural_key"): keys = node.getElementsByTagName("natural") if keys: - # If there are 'natural' subelements, it must be a natural key + # If there are 'natural' subelements, it must be a natural + # key field_value = [getInnerText(k).strip() for k in keys] try: obj = model._default_manager.db_manager( @@ -343,7 +345,8 @@ class Deserializer(base.Deserializer): def m2m_convert(n): keys = n.getElementsByTagName("natural") if keys: - # If there are 'natural' subelements, it must be a natural key + # If there are 'natural' subelements, it must be a natural + # key field_value = [getInnerText(k).strip() for k in keys] obj_pk = ( default_manager.db_manager(self.db) @@ -394,7 +397,8 @@ class Deserializer(base.Deserializer): def getInnerText(node): """Get all the inner text of a DOM node (recursively).""" - # inspired by https://mail.python.org/pipermail/xml-sig/2005-March/011022.html + # inspired by + # https://mail.python.org/pipermail/xml-sig/2005-March/011022.html inner_text = [] for child in node.childNodes: if ( diff --git a/django/core/servers/basehttp.py b/django/core/servers/basehttp.py index 495657d264..41719034fb 100644 --- a/django/core/servers/basehttp.py +++ b/django/core/servers/basehttp.py @@ -234,7 +234,9 @@ class WSGIRequestHandler(simple_server.WSGIRequestHandler): pass def handle_one_request(self): - """Copy of WSGIRequestHandler.handle() but with different ServerHandler""" + """ + Copy of WSGIRequestHandler.handle() but with different ServerHandler + """ self.raw_requestline = self.rfile.readline(65537) if len(self.raw_requestline) > 65536: self.requestline = "" diff --git a/django/core/signing.py b/django/core/signing.py index e3d7785910..ed56ce0908 100644 --- a/django/core/signing.py +++ b/django/core/signing.py @@ -17,7 +17,8 @@ If the signature fails, a BadSignature exception is raised. 'hello' >>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv42-modified") ... -BadSignature: Signature "ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv42-modified" does not match +BadSignature: Signature "ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv42-modified" does +not match You can optionally compress the JSON prior to base64 encoding it to save space, using the compress=True argument. This checks if compression actually diff --git a/django/db/backends/base/base.py b/django/db/backends/base/base.py index 54328c8450..3f3d29874a 100644 --- a/django/db/backends/base/base.py +++ b/django/db/backends/base/base.py @@ -595,8 +595,8 @@ class BaseDatabaseWrapper: """ if self.connection is not None: self.health_check_done = False - # If the application didn't restore the original autocommit setting, - # don't take chances, drop the connection. + # If the application didn't restore the original autocommit + # setting, don't take chances, drop the connection. if self.get_autocommit() != self.settings_dict["AUTOCOMMIT"]: self.close() return diff --git a/django/db/backends/base/creation.py b/django/db/backends/base/creation.py index 1ed583f9e4..7c11465f94 100644 --- a/django/db/backends/base/creation.py +++ b/django/db/backends/base/creation.py @@ -94,9 +94,10 @@ class BaseDatabaseCreation: settings.MIGRATION_MODULES = old_migration_modules # We then serialize the current state of the database into a string - # and store it on the connection. This slightly horrific process is so people - # who are testing on databases without transactions or who are using - # a TransactionTestCase still get a clean database on every test run. + # and store it on the connection. This slightly horrific process is so + # people who are testing on databases without transactions or who are + # using a TransactionTestCase still get a clean database on every test + # run. if serialize is not None: warnings.warn( "DatabaseCreation.create_test_db(serialize) is deprecated. Call " @@ -112,7 +113,8 @@ class BaseDatabaseCreation: call_command("createcachetable", database=self.connection.alias) - # Ensure a connection for the side effect of initializing the test database. + # Ensure a connection for the side effect of initializing the test + # database. self.connection.ensure_connection() if os.environ.get("RUNNING_DJANGOS_TEST_SUITE") == "true": @@ -220,8 +222,8 @@ class BaseDatabaseCreation: try: self._execute_create_test_db(cursor, test_db_params, keepdb) except Exception as e: - # if we want to keep the db, then no need to do any of the below, - # just return and skip it all. + # if we want to keep the db, then no need to do any of the + # below, just return and skip it all. if keepdb: return test_database_name @@ -365,7 +367,8 @@ class BaseDatabaseCreation: for test_name in tests: test_case_name, _, test_method_name = test_name.rpartition(".") test_app = test_name.split(".")[0] - # Importing a test app that isn't installed raises RuntimeError. + # Importing a test app that isn't installed raises + # RuntimeError. if test_app in settings.INSTALLED_APPS: test_case = import_string(test_case_name) test_method = getattr(test_case, test_method_name) diff --git a/django/db/backends/base/features.py b/django/db/backends/base/features.py index cf712739c7..ad44a31d90 100644 --- a/django/db/backends/base/features.py +++ b/django/db/backends/base/features.py @@ -201,7 +201,8 @@ class BaseDatabaseFeatures: # supported by the Python driver supports_paramstyle_pyformat = True - # Does the backend require literal defaults, rather than parameterized ones? + # Does the backend require literal defaults, rather than parameterized + # ones? requires_literal_defaults = False # Does the backend support functions in defaults? @@ -213,7 +214,8 @@ class BaseDatabaseFeatures: # Does the backend support the DEFAULT keyword in bulk insert queries? supports_default_keyword_in_bulk_insert = True - # Does the backend require a connection reset after each material schema change? + # Does the backend require a connection reset after each material schema + # change? connection_persists_old_columns = False # What kind of error does the backend throw when accessing closed cursor? @@ -228,11 +230,12 @@ class BaseDatabaseFeatures: # If NULL is implied on columns without needing to be explicitly specified implied_column_null = False - # Does the backend support "select for update" queries with limit (and offset)? + # Does the backend support "select for update" queries with limit (and + # offset)? supports_select_for_update_with_limit = True - # Does the backend ignore null expressions in GREATEST and LEAST queries unless - # every expression is null? + # Does the backend ignore null expressions in GREATEST and LEAST queries + # unless every expression is null? greatest_least_ignores_nulls = False # Can the backend clone databases for parallel test execution? @@ -261,10 +264,10 @@ class BaseDatabaseFeatures: # Does the database support ORDER BY in aggregate expressions? supports_aggregate_order_by_clause = False - # Does the database backend support DISTINCT when using multiple arguments in an - # aggregate expression? For example, Sqlite treats the "delimiter" argument of - # STRING_AGG/GROUP_CONCAT as an extra argument and does not allow using a custom - # delimiter along with DISTINCT. + # Does the database backend support DISTINCT when using multiple arguments + # in an aggregate expression? For example, Sqlite treats the "delimiter" + # argument of STRING_AGG/GROUP_CONCAT as an extra argument and does not + # allow using a custom delimiter along with DISTINCT. supports_aggregate_distinct_multiple_argument = True # Does the database support SQL 2023 ANY_VALUE in GROUP BY? diff --git a/django/db/backends/base/introspection.py b/django/db/backends/base/introspection.py index 12360538b9..3a62ab6327 100644 --- a/django/db/backends/base/introspection.py +++ b/django/db/backends/base/introspection.py @@ -158,8 +158,9 @@ class BaseDatabaseIntrospection: def get_sequences(self, cursor, table_name, table_fields=()): """ Return a list of introspected sequences for table_name. Each sequence - is a dict: {'table': <table_name>, 'column': <column_name>}. An optional - 'name' key can be added if the backend supports named sequences. + is a dict: {'table': <table_name>, 'column': <column_name>}. An + optional 'name' key can be added if the backend supports named + sequences. """ raise NotImplementedError( "subclasses of BaseDatabaseIntrospection may require a get_sequences() " @@ -168,8 +169,8 @@ class BaseDatabaseIntrospection: def get_relations(self, cursor, table_name): """ - Return a dictionary of {field_name: (field_name_other_table, other_table)} - representing all foreign keys in the given table. + Return a dictionary of {field_name: (field_name_other_table, + other_table)} representing all foreign keys in the given table. """ raise NotImplementedError( "subclasses of BaseDatabaseIntrospection may require a " diff --git a/django/db/backends/base/operations.py b/django/db/backends/base/operations.py index c426a2b90a..a95f535bdb 100644 --- a/django/db/backends/base/operations.py +++ b/django/db/backends/base/operations.py @@ -546,8 +546,8 @@ class BaseDatabaseOperations: def adapt_datetimefield_value(self, value): """ - Transform a datetime value to an object compatible with what is expected - by the backend driver for datetime columns. + Transform a datetime value to an object compatible with what is + expected by the backend driver for datetime columns. """ if value is None: return None diff --git a/django/db/backends/base/schema.py b/django/db/backends/base/schema.py index 3d7ea83dd7..5262864e7f 100644 --- a/django/db/backends/base/schema.py +++ b/django/db/backends/base/schema.py @@ -494,8 +494,8 @@ class BaseDatabaseSchemaEditor: Return a quoted version of the value so it's safe to use in an SQL string. This is not safe against injection from user code; it is intended only for use in making SQL scripts or preparing default values - for particularly tricky backends (defaults are not user-defined, though, - so this is safe). + for particularly tricky backends (defaults are not user-defined, + though, so this is safe). """ raise NotImplementedError() @@ -1234,7 +1234,8 @@ class BaseDatabaseSchemaEditor: self.execute(self._create_primary_key_sql(model, new_field)) # Update all referencing columns rels_to_update.extend(_related_non_m2m_objects(old_field, new_field)) - # Handle our type alters on the other end of rels from the PK stuff above + # Handle our type alters on the other end of rels from the PK stuff + # above for old_rel, new_rel in rels_to_update: rel_db_params = new_rel.field.db_parameters(connection=self.connection) rel_type = rel_db_params["type"] @@ -1483,7 +1484,8 @@ class BaseDatabaseSchemaEditor: ) self.alter_field( new_field.remote_field.through, - # for self-referential models we need to alter field from the other end too + # for self-referential models we need to alter field from the other + # end too old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()), new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()), ) diff --git a/django/db/backends/dummy/base.py b/django/db/backends/dummy/base.py index 0e2edc73cf..c42fc5ef76 100644 --- a/django/db/backends/dummy/base.py +++ b/django/db/backends/dummy/base.py @@ -1,7 +1,8 @@ """ Dummy database backend for Django. -Django uses this if the database ENGINE setting is empty (None or empty string). +Django uses this if the database ENGINE setting is empty (None or empty +string). Each of these API functions, except connection.close(), raise ImproperlyConfigured. diff --git a/django/db/backends/mysql/base.py b/django/db/backends/mysql/base.py index e594067b50..2ad8627dfe 100644 --- a/django/db/backends/mysql/base.py +++ b/django/db/backends/mysql/base.py @@ -22,7 +22,8 @@ except ImportError as err: from MySQLdb.constants import CLIENT, FIELD_TYPE from MySQLdb.converters import conversions -# Some of these import MySQLdb, so import them after checking if it's installed. +# Some of these import MySQLdb, so import them after checking if it's +# installed. from .client import DatabaseClient from .creation import DatabaseCreation from .features import DatabaseFeatures @@ -57,7 +58,8 @@ class CursorWrapper: exception instances and reraises them with the correct types. Implemented as a wrapper, rather than a subclass, so that it isn't stuck - to the particular underlying representation returned by Connection.cursor(). + to the particular underlying representation returned by + Connection.cursor(). """ codes_for_integrityerror = ( @@ -101,9 +103,10 @@ class CursorWrapper: class DatabaseWrapper(BaseDatabaseWrapper): vendor = "mysql" # This dictionary maps Field objects to their associated MySQL column - # types, as strings. Column-type strings can contain format strings; they'll - # be interpolated against the values of Field.__dict__ before being output. - # If a column type is set to None, it won't be included in the output. + # types, as strings. Column-type strings can contain format strings; + # they'll be interpolated against the values of Field.__dict__ before being + # output. If a column type is set to None, it won't be included in the + # output. _data_types = { "AutoField": "integer AUTO_INCREMENT", @@ -175,13 +178,13 @@ class DatabaseWrapper(BaseDatabaseWrapper): } # The patterns below are used to generate SQL pattern lookup clauses when - # the right-hand side of the lookup isn't a raw string (it might be an expression - # or the result of a bilateral transformation). - # In those cases, special characters for LIKE operators (e.g. \, *, _) should be - # escaped on database side. + # the right-hand side of the lookup isn't a raw string (it might be an + # expression or the result of a bilateral transformation). In those cases, + # special characters for LIKE operators (e.g. \, *, _) should be escaped on + # database side. # - # Note: we use str.format() here for readability as '%' is used as a wildcard for - # the LIKE operator. + # Note: we use str.format() here for readability as '%' is used as a + # wildcard for the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\\', '\\\\'), '%%', '\%%'), '_', '\_')" pattern_ops = { "contains": "LIKE BINARY CONCAT('%%', {}, '%%')", diff --git a/django/db/backends/mysql/features.py b/django/db/backends/mysql/features.py index 6ae4c56af1..24ecc0d80b 100644 --- a/django/db/backends/mysql/features.py +++ b/django/db/backends/mysql/features.py @@ -137,7 +137,9 @@ class DatabaseFeatures(BaseDatabaseFeatures): @cached_property def _mysql_storage_engine(self): - "Internal method used in Django tests. Don't rely on this from your code" + """ + Internal method used in Django tests. Don't rely on this from your code + """ return self.connection.mysql_server_data["default_storage_engine"] @cached_property diff --git a/django/db/backends/mysql/introspection.py b/django/db/backends/mysql/introspection.py index f00d57cce4..24f773f009 100644 --- a/django/db/backends/mysql/introspection.py +++ b/django/db/backends/mysql/introspection.py @@ -131,9 +131,10 @@ class DatabaseIntrospection(BaseDatabaseIntrospection): ) row = cursor.fetchone() default_column_collation = row[0] if row else "" - # information_schema database gives more accurate results for some figures: - # - varchar length returned by cursor.description is an internal length, - # not visible length (#5725) + # information_schema database gives more accurate results for some + # figures: + # - varchar length returned by cursor.description is an internal + # length, not visible length (#5725) # - precision and scale (for decimal fields) (#5014) # - auto_increment is not available in cursor.description cursor.execute( @@ -195,8 +196,8 @@ class DatabaseIntrospection(BaseDatabaseIntrospection): def get_relations(self, cursor, table_name): """ - Return a dictionary of {field_name: (field_name_other_table, other_table)} - representing all foreign keys in the given table. + Return a dictionary of {field_name: (field_name_other_table, + other_table)} representing all foreign keys in the given table. """ cursor.execute( """ diff --git a/django/db/backends/mysql/operations.py b/django/db/backends/mysql/operations.py index 9806303539..2d6185a2ca 100644 --- a/django/db/backends/mysql/operations.py +++ b/django/db/backends/mysql/operations.py @@ -359,7 +359,8 @@ class DatabaseOperations(BaseDatabaseOperations): return "TIMESTAMPDIFF(MICROSECOND, %s, %s)" % (rhs_sql, lhs_sql), params def explain_query_prefix(self, format=None, **options): - # Alias MySQL's TRADITIONAL to TEXT for consistency with other backends. + # Alias MySQL's TRADITIONAL to TEXT for consistency with other + # backends. if format and format.upper() == "TEXT": format = "TRADITIONAL" elif ( diff --git a/django/db/backends/oracle/base.py b/django/db/backends/oracle/base.py index d00c28c3e9..c2ad881ecc 100644 --- a/django/db/backends/oracle/base.py +++ b/django/db/backends/oracle/base.py @@ -49,8 +49,8 @@ _setup_environment( [ # Oracle takes client-side character set encoding from the environment. ("NLS_LANG", ".AL32UTF8"), - # This prevents Unicode from getting mangled by getting encoded into the - # potentially non-Unicode database character set. + # This prevents Unicode from getting mangled by getting encoded into + # the potentially non-Unicode database character set. ("ORA_NCHAR_LITERAL_REPLACE", "TRUE"), ] ) @@ -110,12 +110,13 @@ class DatabaseWrapper(BaseDatabaseWrapper): vendor = "oracle" display_name = "Oracle" # This dictionary maps Field objects to their associated Oracle column - # types, as strings. Column-type strings can contain format strings; they'll - # be interpolated against the values of Field.__dict__ before being output. - # If a column type is set to None, it won't be included in the output. + # types, as strings. Column-type strings can contain format strings; + # they'll be interpolated against the values of Field.__dict__ before being + # output. If a column type is set to None, it won't be included in the + # output. # - # Any format strings starting with "qn_" are quoted before being used in the - # output (the "qn_" prefix is stripped before the lookup is performed. + # Any format strings starting with "qn_" are quoted before being used in + # the output (the "qn_" prefix is stripped before the lookup is performed. data_types = { "AutoField": "NUMBER(11) GENERATED BY DEFAULT ON NULL AS IDENTITY", "BigAutoField": "NUMBER(19) GENERATED BY DEFAULT ON NULL AS IDENTITY", @@ -200,13 +201,13 @@ class DatabaseWrapper(BaseDatabaseWrapper): } # The patterns below are used to generate SQL pattern lookup clauses when - # the right-hand side of the lookup isn't a raw string (it might be an expression - # or the result of a bilateral transformation). - # In those cases, special characters for LIKE operators (e.g. \, %, _) - # should be escaped on the database side. + # the right-hand side of the lookup isn't a raw string (it might be an + # expression or the result of a bilateral transformation). In those cases, + # special characters for LIKE operators (e.g. \, %, _) should be escaped on + # the database side. # - # Note: we use str.format() here for readability as '%' is used as a wildcard for - # the LIKE operator. + # Note: we use str.format() here for readability as '%' is used as a + # wildcard for the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" _pattern_ops = { "contains": "'%%' || {} || '%%'", diff --git a/django/db/backends/oracle/creation.py b/django/db/backends/oracle/creation.py index 682379930f..efbec14d6e 100644 --- a/django/db/backends/oracle/creation.py +++ b/django/db/backends/oracle/creation.py @@ -36,7 +36,8 @@ class DatabaseCreation(BaseDatabaseCreation): ) except Exception as e: if "ORA-01543" not in str(e): - # All errors except "tablespace already exists" cancel tests + # All errors except "tablespace already exists" cancel + # tests self.log("Got an error creating the test database: %s" % e) sys.exit(2) if not autoclobber: @@ -406,7 +407,8 @@ class DatabaseCreation(BaseDatabaseCreation): def _test_database_passwd(self): password = self._test_settings_get("PASSWORD") if password is None and self._test_user_create(): - # Oracle passwords are limited to 30 chars and can't contain symbols. + # Oracle passwords are limited to 30 chars and can't contain + # symbols. password = get_random_string(30) return password diff --git a/django/db/backends/oracle/introspection.py b/django/db/backends/oracle/introspection.py index b0077344ac..12b9b9a097 100644 --- a/django/db/backends/oracle/introspection.py +++ b/django/db/backends/oracle/introspection.py @@ -254,8 +254,8 @@ class DatabaseIntrospection(BaseDatabaseIntrospection): def get_relations(self, cursor, table_name): """ - Return a dictionary of {field_name: (field_name_other_table, other_table)} - representing all foreign keys in the given table. + Return a dictionary of {field_name: (field_name_other_table, + other_table)} representing all foreign keys in the given table. """ table_name = table_name.upper() cursor.execute( diff --git a/django/db/backends/oracle/operations.py b/django/db/backends/oracle/operations.py index 46a681a520..ce9ed7288d 100644 --- a/django/db/backends/oracle/operations.py +++ b/django/db/backends/oracle/operations.py @@ -378,9 +378,9 @@ END; # We simplify things by making Oracle identifiers always uppercase. if not name.startswith('"') and not name.endswith('"'): name = '"%s"' % truncate_name(name, self.max_name_length()) - # Oracle puts the query text into a (query % args) construct, so % signs - # in names need to be escaped. The '%%' will be collapsed back to '%' at - # that stage so we aren't really making the name longer here. + # Oracle puts the query text into a (query % args) construct, so % + # signs in names need to be escaped. The '%%' will be collapsed back to + # '%' at that stage so we aren't really making the name longer here. name = name.replace("%", "%%") return name.upper() @@ -589,8 +589,8 @@ END; def adapt_datetimefield_value(self, value): """ - Transform a datetime value to an object compatible with what is expected - by the backend driver for datetime columns. + Transform a datetime value to an object compatible with what is + expected by the backend driver for datetime columns. If naive datetime is passed assumes that is in UTC. Normally Django models.DateTimeField makes sure that if USE_TZ is True passed datetime diff --git a/django/db/backends/oracle/schema.py b/django/db/backends/oracle/schema.py index f094bfb038..48a048575d 100644 --- a/django/db/backends/oracle/schema.py +++ b/django/db/backends/oracle/schema.py @@ -136,7 +136,8 @@ class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): elif new_internal_type == "DateTimeField": new_value = "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value elif new_internal_type == "TimeField": - # TimeField are stored as TIMESTAMP with a 1900-01-01 date part. + # TimeField are stored as TIMESTAMP with a 1900-01-01 date + # part. new_value = "CONCAT('1900-01-01 ', %s)" % new_value new_value = "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value # Transfer values across diff --git a/django/db/backends/postgresql/base.py b/django/db/backends/postgresql/base.py index a0b5e4154e..cafa4c7a9c 100644 --- a/django/db/backends/postgresql/base.py +++ b/django/db/backends/postgresql/base.py @@ -61,8 +61,8 @@ else: psycopg2.extensions.register_adapter(SafeString, psycopg2.extensions.QuotedString) psycopg2.extras.register_uuid() - # Register support for inet[] manually so we don't have to handle the Inet() - # object on load all the time. + # Register support for inet[] manually so we don't have to handle the + # Inet() object on load all the time. INETARRAY_OID = 1041 INETARRAY = psycopg2.extensions.new_array_type( (INETARRAY_OID,), @@ -71,7 +71,8 @@ else: ) psycopg2.extensions.register_type(INETARRAY) -# Some of these import psycopg, so import them after checking if it's installed. +# Some of these import psycopg, so import them after checking if it's +# installed. from .client import DatabaseClient # NOQA isort:skip from .creation import DatabaseCreation # NOQA isort:skip from .features import DatabaseFeatures # NOQA isort:skip @@ -90,9 +91,10 @@ class DatabaseWrapper(BaseDatabaseWrapper): vendor = "postgresql" display_name = "PostgreSQL" # This dictionary maps Field objects to their associated PostgreSQL column - # types, as strings. Column-type strings can contain format strings; they'll - # be interpolated against the values of Field.__dict__ before being output. - # If a column type is set to None, it won't be included in the output. + # types, as strings. Column-type strings can contain format strings; + # they'll be interpolated against the values of Field.__dict__ before being + # output. If a column type is set to None, it won't be included in the + # output. data_types = { "AutoField": "integer", "BigAutoField": "bigint", @@ -150,13 +152,13 @@ class DatabaseWrapper(BaseDatabaseWrapper): } # The patterns below are used to generate SQL pattern lookup clauses when - # the right-hand side of the lookup isn't a raw string (it might be an expression - # or the result of a bilateral transformation). - # In those cases, special characters for LIKE operators (e.g. \, *, _) should be - # escaped on database side. + # the right-hand side of the lookup isn't a raw string (it might be an + # expression or the result of a bilateral transformation). In those cases, + # special characters for LIKE operators (e.g. \, *, _) should be escaped on + # database side. # - # Note: we use str.format() here for readability as '%' is used as a wildcard for - # the LIKE operator. + # Note: we use str.format() here for readability as '%' is used as a + # wildcard for the LIKE operator. pattern_esc = ( r"REPLACE(REPLACE(REPLACE({}, E'\\', E'\\\\'), E'%%', E'\\%%'), E'_', E'\\_')" ) diff --git a/django/db/backends/postgresql/introspection.py b/django/db/backends/postgresql/introspection.py index aaa3d93e60..82013eb191 100644 --- a/django/db/backends/postgresql/introspection.py +++ b/django/db/backends/postgresql/introspection.py @@ -154,8 +154,8 @@ class DatabaseIntrospection(BaseDatabaseIntrospection): def get_relations(self, cursor, table_name): """ - Return a dictionary of {field_name: (field_name_other_table, other_table)} - representing all foreign keys in the given table. + Return a dictionary of {field_name: (field_name_other_table, + other_table)} representing all foreign keys in the given table. """ cursor.execute( """ diff --git a/django/db/backends/postgresql/operations.py b/django/db/backends/postgresql/operations.py index 9db755bb89..a8c073e418 100644 --- a/django/db/backends/postgresql/operations.py +++ b/django/db/backends/postgresql/operations.py @@ -221,8 +221,8 @@ class DatabaseOperations(BaseDatabaseOperations): return ["%s;" % " ".join(sql_parts)] def sequence_reset_by_name_sql(self, style, sequences): - # 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements - # to reset sequence indices + # 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL + # statements to reset sequence indices sql = [] for sequence_info in sequences: table_name = sequence_info["table"] diff --git a/django/db/backends/sqlite3/base.py b/django/db/backends/sqlite3/base.py index f7b65651b3..e378975207 100644 --- a/django/db/backends/sqlite3/base.py +++ b/django/db/backends/sqlite3/base.py @@ -60,9 +60,9 @@ Database.register_adapter(datetime.datetime, adapt_datetime) class DatabaseWrapper(BaseDatabaseWrapper): vendor = "sqlite" display_name = "SQLite" - # SQLite doesn't actually support most of these types, but it "does the right - # thing" given more verbose field definitions, so leave them as is so that - # schema inspection is more useful. + # SQLite doesn't actually support most of these types, but it "does the + # right thing" given more verbose field definitions, so leave them as is so + # that schema inspection is more useful. data_types = { "AutoField": "integer", "BigAutoField": "integer", @@ -124,13 +124,13 @@ class DatabaseWrapper(BaseDatabaseWrapper): } # The patterns below are used to generate SQL pattern lookup clauses when - # the right-hand side of the lookup isn't a raw string (it might be an expression - # or the result of a bilateral transformation). - # In those cases, special characters for LIKE operators (e.g. \, *, _) should be - # escaped on database side. + # the right-hand side of the lookup isn't a raw string (it might be an + # expression or the result of a bilateral transformation). In those cases, + # special characters for LIKE operators (e.g. \, *, _) should be escaped on + # database side. # - # Note: we use str.format() here for readability as '%' is used as a wildcard for - # the LIKE operator. + # Note: we use str.format() here for readability as '%' is used as a + # wildcard for the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" pattern_ops = { "contains": r"LIKE '%%' || {} || '%%' ESCAPE '\'", diff --git a/django/db/backends/sqlite3/schema.py b/django/db/backends/sqlite3/schema.py index 10edfd81b9..ac6ae5efbd 100644 --- a/django/db/backends/sqlite3/schema.py +++ b/django/db/backends/sqlite3/schema.py @@ -206,9 +206,9 @@ class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): # Construct a new model with the new fields to allow self referential # primary key to resolve to. This model won't ever be materialized as a - # table and solely exists for foreign key reference resolution purposes. - # This wouldn't be required if the schema editor was operating on model - # states instead of rendered models. + # table and solely exists for foreign key reference resolution + # purposes. This wouldn't be required if the schema editor was + # operating on model states instead of rendered models. meta_contents = { "app_label": model._meta.app_label, "db_table": model._meta.db_table, @@ -303,10 +303,10 @@ class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): if field.many_to_many and field.remote_field.through._meta.auto_created: self.create_model(field.remote_field.through) elif isinstance(field, CompositePrimaryKey): - # If a CompositePrimaryKey field was added, the existing primary key field - # had to be altered too, resulting in an AddField, AlterField migration. - # The table cannot be re-created on AddField, it would result in a - # duplicate primary key error. + # If a CompositePrimaryKey field was added, the existing primary + # key field had to be altered too, resulting in an AddField, + # AlterField migration. The table cannot be re-created on AddField, + # it would result in a duplicate primary key error. return elif ( # Primary keys and unique fields are not supported in ALTER TABLE @@ -404,7 +404,8 @@ class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): related_models.add(remote_field.through) if new_field.primary_key: for many_to_many in opts.many_to_many: - # Ignore self-relationship since the table was already rebuilt. + # Ignore self-relationship since the table was already + # rebuilt. if many_to_many.related_model == model: continue if many_to_many.remote_field.through._meta.auto_created: diff --git a/django/db/migrations/autodetector.py b/django/db/migrations/autodetector.py index 648bac389c..c319cb2c03 100644 --- a/django/db/migrations/autodetector.py +++ b/django/db/migrations/autodetector.py @@ -92,8 +92,9 @@ class MigrationAutodetector: elif isinstance(obj, COMPILED_REGEX_TYPE): return RegexObject(obj) elif isinstance(obj, type): - # If this is a type that implements 'deconstruct' as an instance method, - # avoid treating this as being deconstructible itself - see #22951 + # If this is a type that implements 'deconstruct' as an instance + # method, avoid treating this as being deconstructible itself - see + # #22951 return obj elif hasattr(obj, "deconstruct"): deconstructed = obj.deconstruct() @@ -754,7 +755,8 @@ class MigrationAutodetector: beginning=True, ) - # Don't add operations which modify the database for unmanaged models + # Don't add operations which modify the database for unmanaged + # models if not model_state.options.get("managed", True): continue @@ -904,7 +906,8 @@ class MigrationAutodetector: bases=model_state.bases, managers=model_state.managers, ), - # Depend on the deletion of any possible non-proxy version of us + # Depend on the deletion of any possible non-proxy version of + # us dependencies=dependencies, ) @@ -980,8 +983,8 @@ class MigrationAutodetector: ], ) # Finally, remove the model. - # This depends on both the removal/alteration of all incoming fields - # and the removal of all its own related fields, and if it's + # This depends on both the removal/alteration of all incoming + # fields and the removal of all its own related fields, and if it's # a through model the field that references it. dependencies = [] relations = self.from_state.relations @@ -1219,8 +1222,8 @@ class MigrationAutodetector: name=field_name, ), # We might need to depend on the removal of an - # order_with_respect_to or index/constraint/unique_together operation; - # this is safely ignored if there isn't one + # order_with_respect_to or index/constraint/unique_together + # operation; this is safely ignored if there isn't one dependencies=[ OperationDependency( app_label, @@ -1265,8 +1268,8 @@ class MigrationAutodetector: field_name ) dependencies = [] - # Implement any model renames on relations; these are handled by RenameModel - # so we need to exclude them from the comparison + # Implement any model renames on relations; these are handled by + # RenameModel so we need to exclude them from the comparison if hasattr(new_field, "remote_field") and getattr( new_field.remote_field, "model", None ): @@ -1287,7 +1290,8 @@ class MigrationAutodetector: new_field.remote_field.field_name = ( old_field.remote_field.field_name ) - # Handle ForeignObjects which can have multiple from_fields/to_fields. + # Handle ForeignObjects which can have multiple + # from_fields/to_fields. from_fields = getattr(new_field, "from_fields", None) if from_fields: from_rename_key = (app_label, model_name) @@ -1718,7 +1722,8 @@ class MigrationAutodetector: old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] - # We run the old version through the field renames to account for those + # We run the old version through the field renames to account for + # those old_value = old_model_state.options.get(option_name) old_value = ( { diff --git a/django/db/migrations/executor.py b/django/db/migrations/executor.py index 61b2b54f6c..1ad7d0c18c 100644 --- a/django/db/migrations/executor.py +++ b/django/db/migrations/executor.py @@ -21,7 +21,8 @@ class MigrationExecutor: def migration_plan(self, targets, clean_start=False): """ - Given a set of targets, return a list of (Migration instance, backwards?). + Given a set of targets, return a list of (Migration instance, + backwards?). """ plan = [] if clean_start: @@ -29,7 +30,8 @@ class MigrationExecutor: else: applied = dict(self.loader.applied_migrations) for target in targets: - # If the target is (app_label, None), that means unmigrate everything + # If the target is (app_label, None), that means unmigrate + # everything if target[1] is None: for root in self.loader.graph.root_nodes(): if root[0] == target[0]: diff --git a/django/db/migrations/graph.py b/django/db/migrations/graph.py index dd845c13e8..ff5ecbc8b4 100644 --- a/django/db/migrations/graph.py +++ b/django/db/migrations/graph.py @@ -71,16 +71,16 @@ class MigrationGraph: branch merges can be detected and resolved. Migrations files can be marked as replacing another set of migrations - - this is to support the "squash" feature. The graph handler isn't responsible - for these; instead, the code to load them in here should examine the - migration files and if the replaced migrations are all either unapplied - or not present, it should ignore the replaced ones, load in just the - replacing migration, and repoint any dependencies that pointed to the + this is to support the "squash" feature. The graph handler isn't + responsible for these; instead, the code to load them in here should + examine the migration files and if the replaced migrations are all either + unapplied or not present, it should ignore the replaced ones, load in just + the replacing migration, and repoint any dependencies that pointed to the replaced migrations to point to the replacing one. - A node should be a tuple: (app_path, migration_name). The tree special-cases - things within an app - namely, root nodes and leaf nodes ignore dependencies - to other apps. + A node should be a tuple: (app_path, migration_name). The tree + special-cases things within an app - namely, root nodes and leaf nodes + ignore dependencies to other apps. """ def __init__(self): @@ -145,7 +145,8 @@ class MigrationGraph: child.parents.remove(replaced_node) # We don't want to create dependencies between the replaced # node and the replacement node as this would lead to - # self-referencing on the replacement node at a later iteration. + # self-referencing on the replacement node at a later + # iteration. if child.key not in replaced: replacement_node.add_child(child) child.add_parent(replacement_node) @@ -315,7 +316,8 @@ class MigrationGraph: """ Given a migration node or nodes, return a complete ProjectState for it. If at_end is False, return the state before the migration has run. - If nodes is not provided, return the overall most current project state. + If nodes is not provided, return the overall most current project + state. """ if nodes is None: nodes = list(self.leaf_nodes()) diff --git a/django/db/migrations/loader.py b/django/db/migrations/loader.py index 207be657b4..66944c7ab7 100644 --- a/django/db/migrations/loader.py +++ b/django/db/migrations/loader.py @@ -34,9 +34,9 @@ class MigrationLoader: Some migrations will be marked as "replacing" another set of migrations. These are loaded into a separate set of migrations away from the main ones. If all the migrations they replace are either unapplied or missing from - disk, then they are injected into the main set, replacing the named migrations. - Any dependency pointers to the replaced migrations are re-pointed to the - new migration. + disk, then they are injected into the main set, replacing the named + migrations. Any dependency pointers to the replaced migrations are + re-pointed to the new migration. This does mean that this class MUST also talk to the database as well as to disk, but this is probably fine. We're already not just operating @@ -145,7 +145,8 @@ class MigrationLoader: def get_migration_by_prefix(self, app_label, name_prefix): """ - Return the migration(s) which match the given app label and name_prefix. + Return the migration(s) which match the given app label and + name_prefix. """ # Do the search results = [] @@ -274,7 +275,8 @@ class MigrationLoader: """ Build a migration dependency graph using both the disk and database. You'll need to rebuild the graph if you apply migrations. This isn't - usually a problem as generally migration stuff runs in a one-shot process. + usually a problem as generally migration stuff runs in a one-shot + process. """ # Load disk data self.load_disk() @@ -285,7 +287,8 @@ class MigrationLoader: recorder = MigrationRecorder(self.connection) self.applied_migrations = recorder.applied_migrations() # To start, populate the migration graph with nodes for ALL migrations - # and their dependencies. Also make note of replacing migrations at this step. + # and their dependencies. Also make note of replacing migrations at + # this step. self.graph = MigrationGraph() self.replacements = {} for key, migration in self.disk_migrations.items(): @@ -296,7 +299,8 @@ class MigrationLoader: for key, migration in self.disk_migrations.items(): # Internal (same app) dependencies. self.add_internal_dependencies(key, migration) - # Add external dependencies now that the internal ones have been resolved. + # Add external dependencies now that the internal ones have been + # resolved. for key, migration in self.disk_migrations.items(): self.add_external_dependencies(key, migration) # Carry out replacements where possible and if enabled. @@ -310,8 +314,8 @@ class MigrationLoader: except NodeNotFoundError as exc: # Check if the missing node could have been replaced by any squash # migration but wasn't because the squash migration was partially - # applied before. In that case raise a more understandable exception - # (#23556). + # applied before. In that case raise a more understandable + # exception (#23556). # Get reverse replacements. reverse_replacements = {} for key, migration in self.replacements.items(): diff --git a/django/db/migrations/operations/special.py b/django/db/migrations/operations/special.py index 196f24fcd6..0700023325 100644 --- a/django/db/migrations/operations/special.py +++ b/django/db/migrations/operations/special.py @@ -31,7 +31,8 @@ class SeparateDatabaseAndState(Operation): state_operation.state_forwards(app_label, state) def database_forwards(self, app_label, schema_editor, from_state, to_state): - # We calculate state separately in here since our state functions aren't useful + # We calculate state separately in here since our state functions + # aren't useful for database_operation in self.database_operations: to_state = from_state.clone() database_operation.state_forwards(app_label, to_state) @@ -41,7 +42,8 @@ class SeparateDatabaseAndState(Operation): from_state = to_state def database_backwards(self, app_label, schema_editor, from_state, to_state): - # We calculate state separately in here since our state functions aren't useful + # We calculate state separately in here since our state functions + # aren't useful to_states = {} for dbop in self.database_operations: to_states[dbop] = to_state @@ -189,10 +191,11 @@ class RunPython(Operation): if router.allow_migrate( schema_editor.connection.alias, app_label, **self.hints ): - # We now execute the Python code in a context that contains a 'models' - # object, representing the versioned models as an app registry. - # We could try to override the global cache, but then people will still - # use direct imports, so we go with a documentation approach instead. + # We now execute the Python code in a context that contains a + # 'models' object, representing the versioned models as an app + # registry. We could try to override the global cache, but then + # people will still use direct imports, so we go with a + # documentation approach instead. self.code(from_state.apps, schema_editor) def database_backwards(self, app_label, schema_editor, from_state, to_state): diff --git a/django/db/migrations/serializer.py b/django/db/migrations/serializer.py index cfc2657261..8366fb0a42 100644 --- a/django/db/migrations/serializer.py +++ b/django/db/migrations/serializer.py @@ -250,7 +250,8 @@ class OperationSerializer(BaseSerializer): from django.db.migrations.writer import OperationWriter string, imports = OperationWriter(self.value, indentation=0).serialize() - # Nested operation, trailing comma is handled in upper OperationWriter._write() + # Nested operation, trailing comma is handled in upper + # OperationWriter._write() return string.rstrip(","), imports diff --git a/django/db/migrations/state.py b/django/db/migrations/state.py index 8e6dd5538f..802aeb0b5e 100644 --- a/django/db/migrations/state.py +++ b/django/db/migrations/state.py @@ -73,9 +73,10 @@ def get_related_models_recursive(model): Relationships are either defined by explicit relational fields, like ForeignKey, ManyToManyField or OneToOneField, or by inheriting from another - model (a superclass is related to its subclasses, but not vice versa). Note, - however, that a model inheriting from a concrete model is also related to - its superclass through the implicit *_ptr OneToOneField on the subclass. + model (a superclass is related to its subclasses, but not vice versa). + Note, however, that a model inheriting from a concrete model is also + related to its superclass through the implicit *_ptr OneToOneField on the + subclass. """ seen = set() queue = _get_related_models(model) diff --git a/django/db/migrations/writer.py b/django/db/migrations/writer.py index e2befd4d4e..c1101b5bb0 100644 --- a/django/db/migrations/writer.py +++ b/django/db/migrations/writer.py @@ -160,8 +160,8 @@ class MigrationWriter: "\n".join(sorted(dependencies)) + "\n" if dependencies else "" ) - # Format imports nicely, swapping imports of functions from migration files - # for comments + # Format imports nicely, swapping imports of functions from migration + # files for comments migration_imports = set() for line in list(imports): if re.match(r"^import (.*)\.\d+[^\s]*$", line): diff --git a/django/db/models/aggregates.py b/django/db/models/aggregates.py index 444d72addb..1cf82416cb 100644 --- a/django/db/models/aggregates.py +++ b/django/db/models/aggregates.py @@ -353,10 +353,10 @@ class StringAgg(Aggregate): extra_context["template"] = template c = self.copy() - # The creation of the delimiter SQL and the ordering of the parameters must be - # handled explicitly, as MySQL puts the delimiter at the end of the aggregate - # using the `SEPARATOR` declaration (rather than treating as an expression like - # other database backends). + # The creation of the delimiter SQL and the ordering of the parameters + # must be handled explicitly, as MySQL puts the delimiter at the end of + # the aggregate using the `SEPARATOR` declaration (rather than treating + # as an expression like other database backends). delimiter_params = [] if c.delimiter: delimiter_sql, delimiter_params = compiler.compile(c.delimiter) diff --git a/django/db/models/base.py b/django/db/models/base.py index 901743147d..7c20319da6 100644 --- a/django/db/models/base.py +++ b/django/db/models/base.py @@ -493,10 +493,10 @@ class Model(AltersData, metaclass=ModelBase): # Set up the storage for instance state self._state = ModelState() - # There is a rather weird disparity here; if kwargs, it's set, then args - # overrides it. It should be one or the other; don't duplicate the work - # The reason for the kwargs check is that standard iterator passes in by - # args, and instantiation for iteration is 33% faster. + # There is a rather weird disparity here; if kwargs, it's set, then + # args overrides it. It should be one or the other; don't duplicate the + # work The reason for the kwargs check is that standard iterator passes + # in by args, and instantiation for iteration is 33% faster. if len(args) > len(opts.concrete_fields): # Daft, but matches old exception sans the err msg. raise IndexError("Number of args exceeds number of fields") @@ -504,9 +504,9 @@ class Model(AltersData, metaclass=ModelBase): if not kwargs: fields_iter = iter(opts.concrete_fields) # The ordering of the zip calls matter - zip throws StopIteration - # when an iter throws it. So if the first iter throws it, the second - # is *not* consumed. We rely on this, so don't change the order - # without changing the logic. + # when an iter throws it. So if the first iter throws it, the + # second is *not* consumed. We rely on this, so don't change the + # order without changing the logic. for val, field in zip(args, fields_iter): if val is _DEFERRED: continue @@ -540,7 +540,8 @@ class Model(AltersData, metaclass=ModelBase): is_related_object = True except KeyError: try: - # Object instance wasn't passed in -- must be an ID. + # Object instance wasn't passed in -- must be an + # ID. val = kwargs.pop(field.attname) except KeyError: val = field.get_default() @@ -1079,7 +1080,8 @@ class Model(AltersData, metaclass=ModelBase): and all(f.has_default() or f.has_db_default() for f in meta.pk_fields) ): force_insert = True - # If possible, try an UPDATE. If that doesn't update anything, do an INSERT. + # If possible, try an UPDATE. If that doesn't update anything, do an + # INSERT. if pk_set and not force_insert: base_qs = cls._base_manager.using(using) values = [ @@ -1142,21 +1144,22 @@ class Model(AltersData, metaclass=ModelBase): if not values: # We can end up here when saving a model in inheritance chain where # update_fields doesn't target any field in current model. In that - # case we just say the update succeeded. Another case ending up here - # is a model with just PK - in that case check that the PK still - # exists. + # case we just say the update succeeded. Another case ending up + # here is a model with just PK - in that case check that the PK + # still exists. return update_fields is not None or filtered.exists() if self._meta.select_on_save and not forced_update: return ( filtered.exists() and - # It may happen that the object is deleted from the DB right after - # this check, causing the subsequent UPDATE to return zero matching - # rows. The same result can occur in some rare cases when the - # database returns zero despite the UPDATE being executed - # successfully (a row is matched and updated). In order to - # distinguish these two cases, the object's existence in the - # database is again checked for if the UPDATE query returns 0. + # It may happen that the object is deleted from the DB right + # after this check, causing the subsequent UPDATE to return + # zero matching rows. The same result can occur in some rare + # cases when the database returns zero despite the UPDATE being + # executed successfully (a row is matched and updated). In + # order to distinguish these two cases, the object's existence + # in the database is again checked for if the UPDATE query + # returns 0. (filtered._update(values) > 0 or filtered.exists()) ) return filtered._update(values) > 0 @@ -1347,7 +1350,8 @@ class Model(AltersData, metaclass=ModelBase): Hook for doing any extra model-wide validation after clean() has been called on every field by self.clean_fields. Any ValidationError raised by this method will not be associated with a particular field; it will - have a special-case association with the field defined by NON_FIELD_ERRORS. + have a special-case association with the field defined by + NON_FIELD_ERRORS. """ pass @@ -1878,7 +1882,9 @@ class Model(AltersData, metaclass=ModelBase): @classmethod def _check_m2m_through_same_relationship(cls): - """Check if no relationship model is used by more than one m2m field.""" + """ + Check if no relationship model is used by more than one m2m field. + """ errors = [] seen_intermediary_signatures = [] @@ -2003,7 +2009,8 @@ class Model(AltersData, metaclass=ModelBase): @classmethod def _check_column_name_clashes(cls): - # Store a list of column names which have already been used by other fields. + # Store a list of column names which have already been used by other + # fields. used_column_names = [] errors = [] diff --git a/django/db/models/constraints.py b/django/db/models/constraints.py index ae2709abb8..73ab23bdfa 100644 --- a/django/db/models/constraints.py +++ b/django/db/models/constraints.py @@ -593,8 +593,8 @@ class UniqueConstraint(BaseConstraint): ].features.interprets_empty_strings_as_nulls ) ): - # A composite constraint containing NULL value cannot cause - # a violation since NULL != NULL in SQL. + # A composite constraint containing NULL value cannot + # cause a violation since NULL != NULL in SQL. return lookup_kwargs[field.name] = lookup_value lookup_args = [] @@ -646,8 +646,8 @@ class UniqueConstraint(BaseConstraint): and self.violation_error_message == self.default_violation_error_message ): - # When fields are defined, use the unique_error_message() as - # a default for backward compatibility. + # When fields are defined, use the unique_error_message() + # as a default for backward compatibility. validation_error_message = instance.unique_error_message( model, self.fields ) diff --git a/django/db/models/deletion.py b/django/db/models/deletion.py index 9221364ff4..b1939f8b35 100644 --- a/django/db/models/deletion.py +++ b/django/db/models/deletion.py @@ -82,8 +82,9 @@ def DO_NOTHING(collector, field, sub_objs, using): def get_candidate_relations_to_delete(opts): - # The candidate relations are the ones that come from N-1 and 1-1 relations. - # N-N (i.e., many-to-many) relations aren't candidates for deletion. + # The candidate relations are the ones that come from N-1 and 1-1 + # relations. N-N (i.e., many-to-many) relations aren't candidates for + # deletion. return ( f for f in opts.get_fields(include_hidden=True) @@ -434,8 +435,8 @@ class Collector: self.data[model] = sorted(instances, key=attrgetter("pk")) # if possible, bring the models in an order suitable for databases that - # don't support transactions or cannot defer constraint checks until the - # end of a transaction. + # don't support transactions or cannot defer constraint checks until + # the end of a transaction. self.sort() # number of objects deleted for each model label deleted_counter = Counter() diff --git a/django/db/models/expressions.py b/django/db/models/expressions.py index bf89a4f561..012a7c346b 100644 --- a/django/db/models/expressions.py +++ b/django/db/models/expressions.py @@ -289,7 +289,8 @@ class BaseExpression: in this query * reuse: a set of reusable joins for multijoins * summarize: a terminal aggregate clause - * for_save: whether this expression about to be used in a save or update + * for_save: whether this expression about to be used in a save or + update Return: an Expression to be added to the query. """ @@ -349,9 +350,9 @@ class BaseExpression: As a guess, if the output fields of all source fields match then simply infer the same type here. - If a source's output field resolves to None, exclude it from this check. - If all sources are None, then an error is raised higher up the stack in - the output_field property. + If a source's output field resolves to None, exclude it from this + check. If all sources are None, then an error is raised higher up the + stack in the output_field property. """ # This guess is mostly a bad idea, but there is quite a lot of code # (especially 3rd party Func subclasses) that depend on it, we'd need a @@ -500,7 +501,8 @@ class BaseExpression: return sql, params def get_expression_for_validation(self): - # Ignore expressions that cannot be used during a constraint validation. + # Ignore expressions that cannot be used during a constraint + # validation. if not getattr(self, "constraint_validation_compatible", True): try: (expression,) = self.get_source_expressions() @@ -1264,7 +1266,8 @@ class Star(Expression): class DatabaseDefault(Expression): """ - Expression to use DEFAULT keyword during insert otherwise the underlying expression. + Expression to use DEFAULT keyword during insert otherwise the underlying + expression. """ def __init__(self, expression, output_field=None): @@ -1625,7 +1628,8 @@ class When(Expression): ): c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) if for_save and c.condition is not None: - # Resolve condition with for_save=False, since it's used as a filter. + # Resolve condition with for_save=False, since it's used as a + # filter. c.condition = self.condition.resolve_expression( query, allow_joins, reuse, summarize, for_save=False ) diff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py index 69289627f0..e7add282a6 100644 --- a/django/db/models/fields/__init__.py +++ b/django/db/models/fields/__init__.py @@ -997,7 +997,8 @@ class Field(RegisterLookupMixin): def get_db_prep_value(self, value, connection, prepared=False): """ - Return field's value prepared for interacting with the database backend. + Return field's value prepared for interacting with the database + backend. Used by the default implementations of get_db_prep_save(). """ @@ -1927,8 +1928,8 @@ class EmailField(CharField): def deconstruct(self): name, path, args, kwargs = super().deconstruct() - # We do not exclude max_length if it matches default as we want to change - # the default in future. + # We do not exclude max_length if it matches default as we want to + # change the default in future. return name, path, args, kwargs def formfield(self, **kwargs): diff --git a/django/db/models/fields/files.py b/django/db/models/fields/files.py index 8f807b1156..5216ff565f 100644 --- a/django/db/models/fields/files.py +++ b/django/db/models/fields/files.py @@ -176,11 +176,11 @@ class FileDescriptor(DeferredAttribute): # instance.file needs to ultimately return some instance of `File`, # probably a subclass. Additionally, this returned object needs to have # the FieldFile API so that users can easily do things like - # instance.file.path and have that delegated to the file storage engine. - # Easy enough if we're strict about assignment in __set__, but if you - # peek below you can see that we're not. So depending on the current - # value of the field we have to dynamically construct some sort of - # "thing" to return. + # instance.file.path and have that delegated to the file storage + # engine. Easy enough if we're strict about assignment in __set__, but + # if you peek below you can see that we're not. So depending on the + # current value of the field we have to dynamically construct some sort + # of "thing" to return. # The instance dict contains whatever was originally assigned # in __set__. @@ -204,8 +204,8 @@ class FileDescriptor(DeferredAttribute): instance.__dict__[self.field.attname] = attr # Other types of files may be assigned as well, but they need to have - # the FieldFile interface added to them. Thus, we wrap any other type of - # File inside a FieldFile (well, the field's attr_class, which is + # the FieldFile interface added to them. Thus, we wrap any other type + # of File inside a FieldFile (well, the field's attr_class, which is # usually FieldFile). elif isinstance(file, File) and not isinstance(file, FieldFile): file_copy = self.field.attr_class(instance, self.field, file.name) @@ -215,7 +215,8 @@ class FileDescriptor(DeferredAttribute): # Finally, because of the (some would say boneheaded) way pickle works, # the underlying FieldFile might not actually itself have an associated - # file. So we need to reset the details of the FieldFile in those cases. + # file. So we need to reset the details of the FieldFile in those + # cases. elif isinstance(file, FieldFile) and not hasattr(file, "field"): file.instance = instance file.field = self.field diff --git a/django/db/models/fields/related.py b/django/db/models/fields/related.py index bad71a5fd6..a59dcac68c 100644 --- a/django/db/models/fields/related.py +++ b/django/db/models/fields/related.py @@ -663,8 +663,8 @@ class ForeignObject(RelatedField): frozenset(uc.fields) <= foreign_fields for uc in remote_opts.total_unique_constraints ) - # If the model defines a composite primary key and the foreign key - # refers to it, the target is unique. + # If the model defines a composite primary key and the foreign + # key refers to it, the target is unique. or ( frozenset(field.name for field in remote_opts.pk_fields) == foreign_fields @@ -746,8 +746,8 @@ class ForeignObject(RelatedField): kwargs["to"] = self.remote_field.model.lower() else: kwargs["to"] = self.remote_field.model._meta.label_lower - # If swappable is True, then see if we're actually pointing to the target - # of a swap. + # If swappable is True, then see if we're actually pointing to the + # target of a swap. swappable_setting = self.swappable_setting if swappable_setting is not None: # If it's already a settings reference, error @@ -1825,8 +1825,8 @@ class ManyToManyField(RelatedField): kwargs["through"] = self.remote_field.through._meta.label if through_fields := getattr(self.remote_field, "through_fields", None): kwargs["through_fields"] = through_fields - # If swappable is True, then see if we're actually pointing to the target - # of a swap. + # If swappable is True, then see if we're actually pointing to the + # target of a swap. swappable_setting = self.swappable_setting if swappable_setting is not None: # If it's already a settings reference, error. diff --git a/django/db/models/fields/related_descriptors.py b/django/db/models/fields/related_descriptors.py index 8da7aaef91..3e2150e0f6 100644 --- a/django/db/models/fields/related_descriptors.py +++ b/django/db/models/fields/related_descriptors.py @@ -309,17 +309,17 @@ class ForwardManyToOneDescriptor: ) remote_field = self.field.remote_field - # If we're setting the value of a OneToOneField to None, we need to clear - # out the cache on any old related object. Otherwise, deleting the - # previously-related object will also cause this object to be deleted, - # which is wrong. + # If we're setting the value of a OneToOneField to None, we need to + # clear out the cache on any old related object. Otherwise, deleting + # the previously-related object will also cause this object to be + # deleted, which is wrong. if value is None: - # Look up the previously-related object, which may still be available - # since we've not yet cleared out the related field. - # Use the cache directly, instead of the accessor; if we haven't + # Look up the previously-related object, which may still be + # available since we've not yet cleared out the related field. Use + # the cache directly, instead of the accessor; if we haven't # populated the cache, then we don't care - we're only accessing - # the object to invalidate the accessor cache, so there's no - # need to populate the cache just to expire it again. + # the object to invalidate the accessor cache, so there's no need + # to populate the cache just to expire it again. related = self.field.get_cached_value(instance, default=None) # If we've got an old related object, we need to clear out its @@ -357,7 +357,8 @@ class ForwardManyToOneDescriptor: class ForwardOneToOneDescriptor(ForwardManyToOneDescriptor): """ - Accessor to the related object on the forward side of a one-to-one relation. + Accessor to the related object on the forward side of a one-to-one + relation. In the example:: @@ -531,7 +532,8 @@ class ReverseOneToOneDescriptor: - ``self`` is the descriptor managing the ``restaurant`` attribute - ``instance`` is the ``place`` instance - - ``value`` is the ``restaurant`` instance on the right of the equal sign + - ``value`` is the ``restaurant`` instance on the right of the equal + sign Keep in mind that ``Restaurant`` holds the foreign key to ``Place``. """ @@ -586,12 +588,13 @@ class ReverseOneToOneDescriptor: for index, field in enumerate(self.related.field.local_related_fields): setattr(value, field.attname, related_pk[index]) - # Set the related instance cache used by __get__ to avoid an SQL query - # when accessing the attribute we just set. + # Set the related instance cache used by __get__ to avoid an SQL + # query when accessing the attribute we just set. self.related.set_cached_value(instance, value) - # Set the forward accessor cache on the related object to the current - # instance to avoid an extra SQL query if it's accessed later on. + # Set the forward accessor cache on the related object to the + # current instance to avoid an extra SQL query if it's accessed + # later on. self.related.field.set_cached_value(value, instance) def __reduce__(self): @@ -1076,8 +1079,8 @@ def create_forward_many_to_many_manager(superclass, rel, reverse): def _build_remove_filters(self, removed_vals): filters = Q.create([(self.source_field_name, self.related_val)]) - # No need to add a subquery condition if removed_vals is a QuerySet without - # filters. + # No need to add a subquery condition if removed_vals is a QuerySet + # without filters. removed_vals_filters = ( not isinstance(removed_vals, QuerySet) or removed_vals._has_filters() ) @@ -1145,8 +1148,8 @@ def create_forward_many_to_many_manager(superclass, rel, reverse): # M2M: need to annotate the query in order to get the primary model # that the secondary model was actually related to. We know that - # there will already be a join on the join table, so we can just add - # the select. + # there will already be a join on the join table, so we can just + # add the select. # For non-autocreated 'through' models, can't assume we are # dealing with PK values. @@ -1475,10 +1478,10 @@ def create_forward_many_to_many_manager(superclass, rel, reverse): def _add_items( self, source_field_name, target_field_name, *objs, through_defaults=None ): - # source_field_name: the PK fieldname in join table for the source object - # target_field_name: the PK fieldname in join table for the target object - # *objs - objects to add. Either object instances, or primary keys - # of object instances. + # source_field_name: the PK fieldname in join table for the source + # object target_field_name: the PK fieldname in join table for the + # target object *objs - objects to add. Either object instances, or + # primary keys of object instances. if not objs: return @@ -1544,10 +1547,10 @@ def create_forward_many_to_many_manager(superclass, rel, reverse): ) def _remove_items(self, source_field_name, target_field_name, *objs): - # source_field_name: the PK colname in join table for the source object - # target_field_name: the PK colname in join table for the target object - # *objs - objects to remove. Either object instances, or primary - # keys of object instances. + # source_field_name: the PK colname in join table for the source + # object target_field_name: the PK colname in join table for the + # target object *objs - objects to remove. Either object instances, + # or primary keys of object instances. if not objs: return diff --git a/django/db/models/fields/related_lookups.py b/django/db/models/fields/related_lookups.py index 9fc7db7c34..639c29d7ba 100644 --- a/django/db/models/fields/related_lookups.py +++ b/django/db/models/fields/related_lookups.py @@ -28,8 +28,9 @@ def get_normalized_value(value, lhs): try: value_list.append(getattr(value, source.attname)) except AttributeError: - # A case like Restaurant.objects.filter(place=restaurant_instance), - # where place is a OneToOneField and the primary key of Restaurant. + # A case like + # Restaurant.objects.filter(place=restaurant_instance), where + # place is a OneToOneField and the primary key of Restaurant. pk = value.pk return pk if isinstance(pk, tuple) else (pk,) return tuple(value_list) @@ -101,10 +102,10 @@ class RelatedLookupMixin: ): # If we get here, we are dealing with single-column relations. self.rhs = get_normalized_value(self.rhs, self.lhs)[0] - # We need to run the related field's get_prep_value(). Consider case - # ForeignKey to IntegerField given value 'abc'. The ForeignKey itself - # doesn't have validation for non-integers, so we must run validation - # using the target field. + # We need to run the related field's get_prep_value(). Consider + # case ForeignKey to IntegerField given value 'abc'. The ForeignKey + # itself doesn't have validation for non-integers, so we must run + # validation using the target field. if self.prepare_rhs and hasattr(self.lhs.output_field, "path_infos"): # Get the target field. We can safely assume there is only one # as we don't get to the direct value branch otherwise. diff --git a/django/db/models/fields/tuple_lookups.py b/django/db/models/fields/tuple_lookups.py index 62818a37c4..b861bbe9cc 100644 --- a/django/db/models/fields/tuple_lookups.py +++ b/django/db/models/fields/tuple_lookups.py @@ -370,7 +370,8 @@ class TupleIn(TupleLookupMixin, In): return super(TupleLookupMixin, self).as_sql(compiler, connection) # e.g.: (a, b, c) in [(x1, y1, z1), (x2, y2, z2)] as SQL: - # WHERE (a = x1 AND b = y1 AND c = z1) OR (a = x2 AND b = y2 AND c = z2) + # WHERE (a = x1 AND b = y1 AND c = z1) OR (a = x2 AND b = y2 AND c = + # z2) root = WhereNode([], connector=OR) lhs = self.lhs diff --git a/django/db/models/functions/datetime.py b/django/db/models/functions/datetime.py index 361e4ce385..b536690c8a 100644 --- a/django/db/models/functions/datetime.py +++ b/django/db/models/functions/datetime.py @@ -96,7 +96,8 @@ class Extract(TimezoneMixin, Transform): "Extract input expression must be DateField, DateTimeField, " "TimeField, or DurationField." ) - # Passing dates to functions expecting datetimes is most likely a mistake. + # Passing dates to functions expecting datetimes is most likely a + # mistake. if type(field) is DateField and copy.lookup_name in ( "hour", "minute", diff --git a/django/db/models/functions/json.py b/django/db/models/functions/json.py index 3a4c9c81b3..fee7dd05f4 100644 --- a/django/db/models/functions/json.py +++ b/django/db/models/functions/json.py @@ -98,8 +98,8 @@ class JSONObject(Func): def as_postgresql(self, compiler, connection, **extra_context): # Casting keys to text is only required when using JSONB_BUILD_OBJECT - # or when using JSON_OBJECT on PostgreSQL 16+ with server-side bindings. - # This is done in all cases for consistency. + # or when using JSON_OBJECT on PostgreSQL 16+ with server-side + # bindings. This is done in all cases for consistency. copy = self.copy() copy.set_source_expressions( [ diff --git a/django/db/models/functions/text.py b/django/db/models/functions/text.py index 9c48659bf9..28660c5e66 100644 --- a/django/db/models/functions/text.py +++ b/django/db/models/functions/text.py @@ -110,7 +110,8 @@ class ConcatPair(Func): ) def coalesce(self): - # null on either side results in null for expression, wrap with coalesce + # null on either side results in null for expression, wrap with + # coalesce c = self.copy() c.set_source_expressions( [ diff --git a/django/db/models/options.py b/django/db/models/options.py index 296309236f..0e229dea3a 100644 --- a/django/db/models/options.py +++ b/django/db/models/options.py @@ -348,9 +348,10 @@ class Options: # being referenced, because there will be new relationships in the # cache. Otherwise, expire the cache of references *to* this field. # The mechanism for getting at the related model is slightly odd - - # ideally, we'd just ask for field.related_model. However, related_model - # is a cached property, and all the models haven't been loaded yet, so - # we need to make sure we don't cache a string reference. + # ideally, we'd just ask for field.related_model. However, + # related_model is a cached property, and all the models haven't been + # loaded yet, so we need to make sure we don't cache a string + # reference. if ( field.is_relation and hasattr(field.remote_field, "model") @@ -427,8 +428,8 @@ class Options: except ValueError: # setting not in the format app_label.model_name # raising ImproperlyConfigured here causes problems with - # test cleanup code - instead it is raised in get_user_model - # or as part of validation. + # test cleanup code - instead it is raised in + # get_user_model or as part of validation. return swapped_for if ( @@ -534,10 +535,10 @@ class Options: # For legacy reasons, the fields property should only contain forward # fields that are not private or with a m2m cardinality. Therefore we # pass these three filters as filters to the generator. - # The third filter is a longwinded way of checking f.related_model - we don't - # use that property directly because related_model is a cached property, - # and all the models may not have been loaded yet; we don't want to cache - # the string reference to the related_model. + # The third filter is a longwinded way of checking f.related_model - we + # don't use that property directly because related_model is a cached + # property, and all the models may not have been loaded yet; we don't + # want to cache the string reference to the related_model. def is_not_an_m2m_field(f): return not (f.is_relation and f.many_to_many) @@ -707,7 +708,8 @@ class Options: def all_parents(self): """ Return all the ancestors of this model as a tuple ordered by MRO. - Useful for determining if something is an ancestor, regardless of lineage. + Useful for determining if something is an ancestor, regardless of + lineage. """ result = OrderedSet(self.parents) for parent in self.parents: @@ -800,8 +802,8 @@ class Options: """ This method is used by each model to find its reverse objects. As this method is very expensive and is accessed frequently (it looks up every - field in a model, in every app), it is computed on first access and then - is set as a property on every model. + field in a model, in every app), it is computed on first access and + then is set as a property on every model. """ related_objects_graph = defaultdict(list) diff --git a/django/db/models/query.py b/django/db/models/query.py index 8163b5b973..3e3753ee5a 100644 --- a/django/db/models/query.py +++ b/django/db/models/query.py @@ -755,8 +755,9 @@ class QuerySet(AltersData): Insert each of the instances into the database. Do *not* call save() on each of the instances, do not send any pre/post_save signals, and do not set the primary key attribute if it is an - autoincrement field (except if features.can_return_rows_from_bulk_insert=True). - Multi-table models are not supported. + autoincrement field (except if + features.can_return_rows_from_bulk_insert=True). Multi-table models are + not supported. """ # When you bulk insert you don't get the primary keys back (if it's an # autoincrement, except if can_return_rows_from_bulk_insert=True), so @@ -774,8 +775,9 @@ class QuerySet(AltersData): raise ValueError("Batch size must be a positive integer.") # Check that the parents share the same concrete model with the our # model to detect the inheritance pattern ConcreteGrandParent -> - # MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy - # would not identify that case as involving multiple tables. + # MultiTableParent -> ProxyChild. Simply checking + # self.model._meta.proxy would not identify that case as involving + # multiple tables. for parent in self.model._meta.all_parents: if parent._meta.concrete_model is not self.model._meta.concrete_model: raise ValueError("Can't bulk create a multi-table inherited model") @@ -1302,10 +1304,10 @@ class QuerySet(AltersData): def _update(self, values): """ - A version of update() that accepts field objects instead of field names. - Used primarily for model saving and not intended for use by general - code (it requires too much poking around at model internals to be - useful at that level). + A version of update() that accepts field objects instead of field + names. Used primarily for model saving and not intended for use by + general code (it requires too much poking around at model internals to + be useful at that level). """ if self.query.is_sliced: raise TypeError("Cannot update a query once a slice has been taken.") @@ -2365,9 +2367,9 @@ def prefetch_related_objects(model_instances, *related_lookups): # Prepare objects: good_objects = True for obj in obj_list: - # Since prefetching can re-use instances, it is possible to have - # the same instance multiple times in obj_list, so obj might - # already be prepared. + # Since prefetching can re-use instances, it is possible to + # have the same instance multiple times in obj_list, so obj + # might already be prepared. if not hasattr(obj, "_prefetched_objects_cache"): try: obj._prefetched_objects_cache = {} @@ -2376,7 +2378,8 @@ def prefetch_related_objects(model_instances, *related_lookups): # values_list(flat=True), for example (TypeError) or # a QuerySet subclass that isn't returning Model # instances (AttributeError), either in Django or a 3rd - # party. prefetch_related() doesn't make sense, so quit. + # party. prefetch_related() doesn't make sense, so + # quit. good_objects = False break if not good_objects: @@ -2384,8 +2387,9 @@ def prefetch_related_objects(model_instances, *related_lookups): # Descend down tree - # We assume that objects retrieved are homogeneous (which is the premise - # of prefetch_related), so what applies to first object applies to all. + # We assume that objects retrieved are homogeneous (which is the + # premise of prefetch_related), so what applies to first object + # applies to all. first_obj = obj_list[0] to_attr = lookup.get_current_to_attr(level)[0] prefetcher, descriptor, attr_found, is_fetched = get_prefetcher( @@ -2462,8 +2466,8 @@ def prefetch_related_objects(model_instances, *related_lookups): if new_obj is None: continue # We special-case `list` rather than something more generic - # like `Iterable` because we don't want to accidentally match - # user models that define __iter__. + # like `Iterable` because we don't want to accidentally + # match user models that define __iter__. if isinstance(new_obj, list): new_obj_list.extend(new_obj) else: @@ -2528,8 +2532,8 @@ def get_prefetcher(instance, through_attr, to_attr): if through_attr == to_attr: is_fetched = rel_obj_descriptor.is_cached else: - # descriptor doesn't support prefetching, so we go ahead and get - # the attribute on the instance rather than the class to + # descriptor doesn't support prefetching, so we go ahead and + # get the attribute on the instance rather than the class to # support many related managers rel_obj = getattr(instance, through_attr) if hasattr(rel_obj, "get_prefetch_querysets"): @@ -2556,12 +2560,14 @@ def prefetch_one_level(instances, prefetcher, lookup, level): # prefetcher must have a method get_prefetch_querysets() which takes a list # of instances, and returns a tuple: - # (queryset of instances of self.model that are related to passed in instances, + # (queryset of instances of self.model that are related to passed in + # instances, # callable that gets value to be matched for returned instances, # callable that gets value to be matched for passed in instances, # boolean that is True for singly related objects, # cache or field name to assign to, - # boolean that is True when the previous argument is a cache name vs a field name). + # boolean that is True when the previous argument is a cache name vs a + # field name). # The 'values to be matched' must be hashable as they will be used # in a dictionary. @@ -2601,8 +2607,9 @@ def prefetch_one_level(instances, prefetcher, lookup, level): to_attr, as_attr = lookup.get_current_to_attr(level) # Make sure `to_attr` does not conflict with a field. if as_attr and instances: - # We assume that objects retrieved are homogeneous (which is the premise - # of prefetch_related), so what applies to first object applies to all. + # We assume that objects retrieved are homogeneous (which is the + # premise of prefetch_related), so what applies to first object applies + # to all. model = instances[0].__class__ try: model._meta.get_field(to_attr) diff --git a/django/db/models/query_utils.py b/django/db/models/query_utils.py index 3e644a3c26..5da3d81672 100644 --- a/django/db/models/query_utils.py +++ b/django/db/models/query_utils.py @@ -302,8 +302,9 @@ class RegisterLookupMixin: @staticmethod def merge_dicts(dicts): """ - Merge dicts in reverse to preference the order of the original list. e.g., - merge_dicts([a, b]) will preference the keys in 'a' over those in 'b'. + Merge dicts in reverse to preference the order of the original list. + e.g., merge_dicts([a, b]) will preference the keys in 'a' over those in + 'b'. """ merged = {} for d in reversed(dicts): @@ -435,8 +436,8 @@ def check_rel_lookup_compatibility(model, target_opts, field): # Restaurant.objects.filter(pk__in=Restaurant.objects.all()). # If we didn't have the primary key check, then pk__in (== place__in) would # give Place's opts as the target opts, but Restaurant isn't compatible - # with that. This logic applies only to primary keys, as when doing __in=qs, - # we are going to turn this into __in=qs.values('pk') later on. + # with that. This logic applies only to primary keys, as when doing + # __in=qs, we are going to turn this into __in=qs.values('pk') later on. return check(target_opts) or ( getattr(field, "primary_key", False) and check(field.model._meta) ) diff --git a/django/db/models/sql/compiler.py b/django/db/models/sql/compiler.py index b0b2ac5583..f72ba907ad 100644 --- a/django/db/models/sql/compiler.py +++ b/django/db/models/sql/compiler.py @@ -52,10 +52,11 @@ class SQLCompiler: # they would return an empty result set. self.elide_empty = elide_empty self.quote_cache = {"*": "*"} - # The select, klass_info, and annotations are needed by QuerySet.iterator() - # these are set as a side-effect of executing the query. Note that we calculate - # separately a list of extra select columns needed for grammatical correctness - # of the query, but these columns are not included in self.select. + # The select, klass_info, and annotations are needed by + # QuerySet.iterator() these are set as a side-effect of executing the + # query. Note that we calculate separately a list of extra select + # columns needed for grammatical correctness of the query, but these + # columns are not included in self.select. self.select = None self.annotation_col_map = None self.klass_info = None @@ -946,9 +947,9 @@ class SQLCompiler: # If the query is used as a subquery, the extra selects would # result in more columns than the left-hand side expression is # expecting. This can happen when a subquery uses a combination - # of order_by() and distinct(), forcing the ordering expressions - # to be selected as well. Wrap the query in another subquery - # to exclude extraneous selects. + # of order_by() and distinct(), forcing the ordering + # expressions to be selected as well. Wrap the query in another + # subquery to exclude extraneous selects. sub_selects = [] sub_params = [] for index, (select, _, alias) in enumerate(self.select, start=1): @@ -2107,8 +2108,8 @@ class SQLUpdateCompiler(SQLCompiler): # If the result_type is NO_RESULTS then the aux_row_count is None. aux_row_count = query.get_compiler(self.using).execute_sql(result_type) if is_empty and aux_row_count: - # Returns the row count for any related updates as the number of - # rows updated. + # Returns the row count for any related updates as the number + # of rows updated. row_count = aux_row_count is_empty = False return row_count diff --git a/django/db/models/sql/datastructures.py b/django/db/models/sql/datastructures.py index be6934485c..ffdd36c0c8 100644 --- a/django/db/models/sql/datastructures.py +++ b/django/db/models/sql/datastructures.py @@ -37,8 +37,8 @@ class Join: - table_alias (possible alias for the table, can be None) - join_type (can be None for those entries that aren't joined from anything) - - parent_alias (which table is this join's parent, can be None similarly - to join_type) + - parent_alias (which table is this join's parent, can be None + similarly to join_type) - as_sql() - relabeled_clone() """ @@ -76,7 +76,8 @@ class Join: def as_sql(self, compiler, connection): """ Generate the full - LEFT OUTER JOIN sometable ON sometable.somecol = othertable.othercol, params + LEFT OUTER JOIN sometable ON sometable.somecol = + othertable.othercol, params clause for this join. """ join_conditions = [] diff --git a/django/db/models/sql/query.py b/django/db/models/sql/query.py index 20dbf7cfaa..5e87f65e7c 100644 --- a/django/db/models/sql/query.py +++ b/django/db/models/sql/query.py @@ -308,10 +308,9 @@ class Query(BaseExpression): self.alias_map = {} # Whether to provide alias to columns during reference resolving. self.alias_cols = alias_cols - # Sometimes the query contains references to aliases in outer queries (as - # a result of split_exclude). Correct alias quoting needs to know these - # aliases too. - # Map external tables to whether they are aliased. + # Sometimes the query contains references to aliases in outer queries + # (as a result of split_exclude). Correct alias quoting needs to know + # these aliases too. Map external tables to whether they are aliased. self.external_aliases = {} self.table_map = {} # Maps table names to list of aliases. self.used_aliases = set() @@ -593,8 +592,8 @@ class Query(BaseExpression): and not inner_query.annotation_select_mask ): # In case of Model.objects[0:3].count(), there would be no - # field selected in the inner query, yet we must use a subquery. - # So, make sure at least one field is selected. + # field selected in the inner query, yet we must use a + # subquery. So, make sure at least one field is selected. inner_query.select = ( self.model._meta.pk.get_col(inner_query.get_initial_alias()), ) @@ -932,10 +931,11 @@ class Query(BaseExpression): an outer join. If 'unconditional' is False, only promote the join if it is nullable or the parent join is an outer join. - The children promotion is done to avoid join chains that contain a LOUTER - b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted, - then we must also promote b->c automatically, or otherwise the promotion - of a->b doesn't actually change anything in the query results. + The children promotion is done to avoid join chains that contain a + LOUTER b INNER c. So, if we have currently a INNER b INNER c and a->b + is promoted, then we must also promote b->c automatically, or otherwise + the promotion of a->b doesn't actually change anything in the query + results. """ aliases = list(aliases) while aliases: @@ -1228,7 +1228,8 @@ class Query(BaseExpression): if FORBIDDEN_ALIAS_PATTERN.search(alias): raise ValueError( "Column aliases cannot contain whitespace characters, quotation marks, " - # RemovedInDjango70Warning: When the deprecation ends, replace with: + # RemovedInDjango70Warning: When the deprecation ends, replace + # with: # "semicolons, percent signs, or SQL comments." "semicolons, or SQL comments." ) @@ -1256,7 +1257,8 @@ class Query(BaseExpression): def resolve_expression(self, query, *args, **kwargs): clone = self.clone() - # Subqueries need to use a different set of aliases than the outer query. + # Subqueries need to use a different set of aliases than the outer + # query. clone.bump_prefix(query) clone.subquery = True clone.where.resolve_expression(query, *args, **kwargs) @@ -1893,9 +1895,9 @@ class Query(BaseExpression): Return the final field involved in the joins, the target field (used for any 'where' constraint), the final 'opts' value, the joins, the field path traveled to generate the joins, and a transform function - that takes a field and alias and is equivalent to `field.get_col(alias)` - in the simple case but wraps field transforms if they were included in - names. + that takes a field and alias and is equivalent to + `field.get_col(alias)` in the simple case but wraps field transforms if + they were included in names. The target field is the field containing the concrete value. Final field can be something different, for example foreign key pointing to @@ -2052,7 +2054,8 @@ class Query(BaseExpression): # Summarize currently means we are doing an aggregate() query # which is executed as a wrapped subquery if any of the # aggregate() elements reference an existing annotation. In - # that case we need to return a Ref to the subquery's annotation. + # that case we need to return a Ref to the subquery's + # annotation. if name not in self.annotation_select: raise FieldError( "Cannot aggregate over the '%s' alias. Use annotate() " @@ -2127,8 +2130,8 @@ class Query(BaseExpression): alias = col.alias if alias in can_reuse: pk = select_field.model._meta.pk - # Need to add a restriction so that outer query's filters are in effect for - # the subquery, too. + # Need to add a restriction so that outer query's filters are in + # effect for the subquery, too. query.bump_prefix(self) lookup_class = select_field.get_lookup("exact") # Note that the query.select[0].alias is different from alias @@ -2204,7 +2207,8 @@ class Query(BaseExpression): """ Return True if adding filters to this instance is still possible. - Typically, this means no limits or offsets have been put on the results. + Typically, this means no limits or offsets have been put on the + results. """ return not self.is_sliced @@ -2440,8 +2444,8 @@ class Query(BaseExpression): """ # Fields on related models are stored in the literal double-underscore # format, so that we can use a set datastructure. We do the foo__bar - # splitting and handling when computing the SQL column names (as part of - # get_columns()). + # splitting and handling when computing the SQL column names (as part + # of get_columns()). existing, defer = self.deferred_loading if defer: # Add to existing deferred names. @@ -2630,8 +2634,8 @@ class Query(BaseExpression): cols in split_exclude(). Return a lookup usable for doing outerq.filter(lookup=self) and a - boolean indicating if the joins in the prefix contain a LEFT OUTER join. - _""" + boolean indicating if the joins in the prefix contain a LEFT OUTER + join. _""" all_paths = [] for _, paths in names_with_path: all_paths.extend(paths) @@ -2678,9 +2682,10 @@ class Query(BaseExpression): if extra_restriction: self.where.add(extra_restriction, AND) else: - # TODO: It might be possible to trim more joins from the start of the - # inner query if it happens to have a longer join chain containing the - # values in select_fields. Lets punt this one for now. + # TODO: It might be possible to trim more joins from the start of + # the inner query if it happens to have a longer join chain + # containing the values in select_fields. Lets punt this one for + # now. select_fields = [r[1] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths] # The found starting point is likely a join_class instead of a diff --git a/django/db/models/sql/subqueries.py b/django/db/models/sql/subqueries.py index 9cb971b38f..2705114a54 100644 --- a/django/db/models/sql/subqueries.py +++ b/django/db/models/sql/subqueries.py @@ -1,5 +1,6 @@ """ -Query subclasses which provide extra functionality beyond simple data retrieval. +Query subclasses which provide extra functionality beyond simple data +retrieval. """ from django.core.exceptions import FieldError @@ -116,7 +117,8 @@ class UpdateQuery(Query): if field.generated: continue if hasattr(val, "resolve_expression"): - # Resolve expressions here so that annotations are no longer needed + # Resolve expressions here so that annotations are no longer + # needed val = val.resolve_expression(self, allow_joins=False, for_save=True) self.values.append((field, model, val)) diff --git a/django/db/transaction.py b/django/db/transaction.py index 0c2eee8e73..1710d1ef17 100644 --- a/django/db/transaction.py +++ b/django/db/transaction.py @@ -252,9 +252,9 @@ class Atomic(ContextDecorator): # minimize overhead for the database server. connection.savepoint_commit(sid) except Error: - # If rolling back to a savepoint fails, mark for - # rollback at a higher level and avoid shadowing - # the original exception. + # If rolling back to a savepoint fails, mark + # for rollback at a higher level and avoid + # shadowing the original exception. connection.needs_rollback = True raise else: @@ -270,8 +270,8 @@ class Atomic(ContextDecorator): connection.close() raise else: - # This flag will be set to True again if there isn't a savepoint - # allowing to perform the rollback at this level. + # This flag will be set to True again if there isn't a + # savepoint allowing to perform the rollback at this level. connection.needs_rollback = False if connection.in_atomic_block: # Roll back to savepoint if there is one, mark for rollback diff --git a/django/db/utils.py b/django/db/utils.py index faaf3bf862..4541edbe5a 100644 --- a/django/db/utils.py +++ b/django/db/utils.py @@ -224,7 +224,8 @@ class ConnectionRouter: try: method = getattr(router, action) except AttributeError: - # If the router doesn't have a method, skip to the next one. + # If the router doesn't have a method, skip to the next + # one. pass else: chosen_db = method(model, **hints) diff --git a/django/dispatch/__init__.py b/django/dispatch/__init__.py index a615f9905a..bd219b4809 100644 --- a/django/dispatch/__init__.py +++ b/django/dispatch/__init__.py @@ -1,7 +1,8 @@ """Multi-consumer multi-producer dispatching mechanism -Originally based on pydispatch (BSD) https://pypi.org/project/PyDispatcher/2.0.1/ -See license.txt for original license. +Originally based on pydispatch (BSD) +https://pypi.org/project/PyDispatcher/2.0.1/ See license.txt for original +license. Heavily modified for Django's purposes. """ diff --git a/django/dispatch/dispatcher.py b/django/dispatch/dispatcher.py index 4b962ce524..eed321c690 100644 --- a/django/dispatch/dispatcher.py +++ b/django/dispatch/dispatcher.py @@ -29,7 +29,8 @@ class Signal: Internal attributes: receivers: - [((id(receiver), id(sender)), ref(receiver), ref(sender), is_async)] + [((id(receiver), id(sender)), ref(receiver), ref(sender), + is_async)] sender_receivers_cache: WeakKeyDictionary[sender, list[receiver]] """ @@ -75,12 +76,12 @@ class Signal: weak Whether to use weak references to the receiver. By default, the module will attempt to use weak references to the receiver - objects. If this parameter is false, then strong references will - be used. + objects. If this parameter is false, then strong references + will be used. dispatch_uid - An identifier used to uniquely identify a particular instance of - a receiver. This will usually be a string, though it may be + An identifier used to uniquely identify a particular instance + of a receiver. This will usually be a string, though it may be anything hashable. """ from django.conf import settings @@ -133,8 +134,8 @@ class Signal: """ Disconnect receiver from sender for signal. - If weak references are used, disconnect need not be called. The receiver - will be removed from dispatch automatically. + If weak references are used, disconnect need not be called. The + receiver will be removed from dispatch automatically. Arguments: @@ -173,9 +174,9 @@ class Signal: """ Send signal from sender to all connected receivers. - If any receiver raises an error, the error propagates back through send, - terminating the dispatch loop. So it's possible that all receivers - won't be called if an error is raised. + If any receiver raises an error, the error propagates back through + send, terminating the dispatch loop. So it's possible that all + receivers won't be called if an error is raised. If any receivers are asynchronous, they are called after all the synchronous receivers via a single call to async_to_sync(). They are @@ -292,8 +293,8 @@ class Signal: Arguments: sender - The sender of the signal. Can be any Python object (normally one - registered with a connect if you actually want something to + The sender of the signal. Can be any Python object (normally + one registered with a connect if you actually want something to occur). named @@ -357,8 +358,8 @@ class Signal: Arguments: sender - The sender of the signal. Can be any Python object (normally one - registered with a connect if you actually want something to + The sender of the signal. Can be any Python object (normally + one registered with a connect if you actually want something to occur). named @@ -439,8 +440,9 @@ class Signal: receivers = None if self.use_caching and not self._dead_receivers: receivers = self.sender_receivers_cache.get(sender) - # We could end up here with NO_RECEIVERS even if we do check this case in - # .send() prior to calling _live_receivers() due to concurrent .send() call. + # We could end up here with NO_RECEIVERS even if we do check this + # case in .send() prior to calling _live_receivers() due to + # concurrent .send() call. if receivers is NO_RECEIVERS: return [], [] if receivers is None: diff --git a/django/forms/boundfield.py b/django/forms/boundfield.py index c0324d5c1d..f6b721c72a 100644 --- a/django/forms/boundfield.py +++ b/django/forms/boundfield.py @@ -114,7 +114,8 @@ class BoundField(RenderableFieldMixin): def as_text(self, attrs=None, **kwargs): """ - Return a string of HTML for representing this as an <input type="text">. + Return a string of HTML for representing this as an <input + type="text">. """ return self.as_widget(TextInput(), attrs, **kwargs) @@ -124,7 +125,8 @@ class BoundField(RenderableFieldMixin): def as_hidden(self, attrs=None, **kwargs): """ - Return a string of HTML for representing this as an <input type="hidden">. + Return a string of HTML for representing this as an <input + type="hidden">. """ return self.as_widget(self.field.hidden_widget(), attrs, **kwargs) @@ -181,7 +183,8 @@ class BoundField(RenderableFieldMixin): ) # Only add the suffix if the label does not end in punctuation. # Translators: If found as last label character, these punctuation - # characters will prevent the default label_suffix to be appended to the label + # characters will prevent the default label_suffix to be appended to + # the label if label_suffix and contents and contents[-1] not in _(":?.!"): contents = format_html("{}{}", contents, label_suffix) widget = self.field.widget @@ -239,7 +242,8 @@ class BoundField(RenderableFieldMixin): def auto_id(self): """ Calculate and return the ID attribute for this BoundField, if the - associated Form has specified auto_id. Return an empty string otherwise. + associated Form has specified auto_id. Return an empty string + otherwise. """ auto_id = self.form.auto_id # Boolean or string if auto_id and "%s" in str(auto_id): diff --git a/django/forms/fields.py b/django/forms/fields.py index 04aa2039fd..182d63c9b4 100644 --- a/django/forms/fields.py +++ b/django/forms/fields.py @@ -126,15 +126,18 @@ class Field: # help_text -- An optional string to use as "help text" for this Field. # error_messages -- An optional dictionary to override the default # messages that the field will raise. - # show_hidden_initial -- Boolean that specifies if it is needed to render a + # show_hidden_initial -- Boolean that specifies if it is needed to + # render a # hidden widget with initial value after widget. # validators -- List of additional validators to use # localize -- Boolean that specifies if the field should be localized. - # disabled -- Boolean that specifies whether the field is disabled, that + # disabled -- Boolean that specifies whether the field is disabled, + # that # is its widget is shown in the form but not editable. # label_suffix -- Suffix to be added to the label. Overrides # form's label_suffix. - # bound_field_class -- BoundField class to use in Field.get_bound_field. + # bound_field_class -- BoundField class to use in + # Field.get_bound_field. self.required, self.label, self.initial = required, label, initial self.show_hidden_initial = show_hidden_initial self.help_text = help_text @@ -727,8 +730,8 @@ class ImageField(FileField): from PIL import Image - # We need to get a file object for Pillow. We might have a path or we might - # have to read the data into memory. + # We need to get a file object for Pillow. We might have a path or we + # might have to read the data into memory. if hasattr(data, "temporary_file_path"): file = data.temporary_file_path() else: @@ -929,7 +932,8 @@ class TypedChoiceField(ChoiceField): def _coerce(self, value): """ - Validate that the value can be coerced to the right type (if not empty). + Validate that the value can be coerced to the right type (if not + empty). """ if value == self.empty_value or value in self.empty_values: return self.empty_value diff --git a/django/forms/forms.py b/django/forms/forms.py index d05bf4bb9e..760ba7b767 100644 --- a/django/forms/forms.py +++ b/django/forms/forms.py @@ -137,12 +137,12 @@ class BaseForm(RenderableFormMixin): """ Rearrange the fields according to field_order. - field_order is a list of field names specifying the order. Append fields - not included in the list in the default order for backward compatibility - with subclasses not overriding field_order. If field_order is None, - keep all fields in the order defined in the class. Ignore unknown - fields in field_order to allow disabling fields in form subclasses - without redefining ordering. + field_order is a list of field names specifying the order. Append + fields not included in the list in the default order for backward + compatibility with subclasses not overriding field_order. If + field_order is None, keep all fields in the order defined in the class. + Ignore unknown fields in field_order to allow disabling fields in form + subclasses without redefining ordering. """ if field_order is None: return @@ -367,10 +367,10 @@ class BaseForm(RenderableFormMixin): def clean(self): """ - Hook for doing any extra form-wide cleaning after Field.clean() has been - called on every field. Any ValidationError raised by this method will - not be associated with a particular field; it will have a special-case - association with the field named '__all__'. + Hook for doing any extra form-wide cleaning after Field.clean() has + been called on every field. Any ValidationError raised by this method + will not be associated with a particular field; it will have a + special-case association with the field named '__all__'. """ return self.cleaned_data diff --git a/django/forms/formsets.py b/django/forms/formsets.py index 94aebe4924..054cc0bc0a 100644 --- a/django/forms/formsets.py +++ b/django/forms/formsets.py @@ -307,10 +307,10 @@ class BaseFormSet(RenderableFormMixin): raise AttributeError( "'%s' object has no attribute 'ordered_forms'" % self.__class__.__name__ ) - # Construct _ordering, which is a list of (form_index, order_field_value) - # tuples. After constructing this list, we'll sort it by order_field_value - # so we have a way to get to the form indexes in the order specified - # by the form data. + # Construct _ordering, which is a list of (form_index, + # order_field_value) tuples. After constructing this list, we'll sort + # it by order_field_value so we have a way to get to the form indexes + # in the order specified by the form data. if not hasattr(self, "_ordering"): self._ordering = [] for i, form in enumerate(self.forms): diff --git a/django/forms/models.py b/django/forms/models.py index 574399ccb1..7fe803624e 100644 --- a/django/forms/models.py +++ b/django/forms/models.py @@ -70,7 +70,8 @@ def construct_instance(form, instance, fields=None, exclude=None): if exclude and f.name in exclude: continue # Leave defaults for fields that aren't in POST data, except for - # checkbox inputs because they don't appear in POST data if not checked. + # checkbox inputs because they don't appear in POST data if not + # checked. if ( f.has_default() and form[f.name].field.widget.value_omitted_from_data( @@ -167,7 +168,8 @@ def fields_for_model( ``formfield_callback`` is a callable that takes a model field and returns a form field. - ``localized_fields`` is a list of names of fields which should be localized. + ``localized_fields`` is a list of names of fields which should be + localized. ``labels`` is a dictionary of model field names mapped to a label. @@ -422,9 +424,9 @@ class BaseModelForm(BaseForm, AltersData): # Exclude empty fields that are not required by the form, if the # underlying model field is required. This keeps the model field # from raising a required error. Note: don't exclude the field from - # validation if the model field allows blanks. If it does, the blank - # value may be included in a unique check, so cannot be excluded - # from validation. + # validation if the model field allows blanks. If it does, the + # blank value may be included in a unique check, so cannot be + # excluded from validation. else: form_field = self.fields[field] field_value = self.cleaned_data.get(field) @@ -612,7 +614,8 @@ def modelform_factory( ``widgets`` is a dictionary of model field names mapped to a widget. - ``localized_fields`` is a list of names of fields which should be localized. + ``localized_fields`` is a list of names of fields which should be + localized. ``formfield_callback`` is a callable that takes a model field and returns a form field. @@ -860,7 +863,8 @@ class BaseModelFormSet(BaseFormSet, AltersData): for d in row_data ) if row_data and None not in row_data: - # if we've already seen it then we have a uniqueness failure + # if we've already seen it then we have a uniqueness + # failure if row_data in seen_data: # poke error messages into the right places and mark # the form as invalid @@ -887,7 +891,8 @@ class BaseModelFormSet(BaseFormSet, AltersData): and form.cleaned_data[field] is not None and form.cleaned_data[unique_for] is not None ): - # if it's a date lookup we need to get the data for all the fields + # if it's a date lookup we need to get the data for all the + # fields if lookup == "date": date = form.cleaned_data[unique_for] date_data = (date.year, date.month, date.day) @@ -896,7 +901,8 @@ class BaseModelFormSet(BaseFormSet, AltersData): else: date_data = (getattr(form.cleaned_data[unique_for], lookup),) data = (form.cleaned_data[field], *date_data) - # if we've already seen it then we have a uniqueness failure + # if we've already seen it then we have a uniqueness + # failure if data in seen_data: # poke error messages into the right places and mark # the form as invalid @@ -1181,7 +1187,8 @@ class BaseInlineFormSet(BaseModelFormSet): kwargs = {"pk_field": True} else: # The foreign key field might not be on the form, so we poke at the - # Model field to get the label, since we need that for error messages. + # Model field to get the label, since we need that for error + # messages. name = self.fk.name kwargs = { "label": getattr( @@ -1553,12 +1560,12 @@ class ModelChoiceField(ChoiceField): return self._choices # Otherwise, execute the QuerySet in self.queryset to determine the - # choices dynamically. Return a fresh ModelChoiceIterator that has not been - # consumed. Note that we're instantiating a new ModelChoiceIterator *each* - # time _get_choices() is called (and, thus, each time self.choices is - # accessed) so that we can ensure the QuerySet has not been consumed. This - # construct might look complicated but it allows for lazy evaluation of - # the queryset. + # choices dynamically. Return a fresh ModelChoiceIterator that has not + # been consumed. Note that we're instantiating a new + # ModelChoiceIterator *each* time _get_choices() is called (and, thus, + # each time self.choices is accessed) so that we can ensure the + # QuerySet has not been consumed. This construct might look complicated + # but it allows for lazy evaluation of the queryset. return self.iterator(self) choices = property(_get_choices, ChoiceField.choices.fset) diff --git a/django/forms/widgets.py b/django/forms/widgets.py index 9b5ad1b2b9..5a25b66e9a 100644 --- a/django/forms/widgets.py +++ b/django/forms/widgets.py @@ -71,7 +71,8 @@ class MediaAsset: self.attributes = attributes def __eq__(self, other): - # Compare the path only, to ensure performant comparison in Media.merge. + # Compare the path only, to ensure performant comparison in + # Media.merge. return (self.__class__ is other.__class__ and self.path == other.path) or ( isinstance(other, str) and self._path == other ) @@ -161,8 +162,8 @@ class Media: ] def render_css(self): - # To keep rendering order consistent, we can't just iterate over items(). - # We need to sort the keys, and iterate over the sorted list. + # To keep rendering order consistent, we can't just iterate over + # items(). We need to sort the keys, and iterate over the sorted list. media = sorted(self._css) return chain.from_iterable( [ @@ -585,7 +586,8 @@ class ClearableFileInput(FileInput): # checks the "clear" checkbox), we return a unique marker # object that FileField will turn into a ValidationError. return FILE_INPUT_CONTRADICTION - # False signals to clear any existing value, as opposed to just None + # False signals to clear any existing value, as opposed to just + # None return False return upload diff --git a/django/http/multipartparser.py b/django/http/multipartparser.py index 4ee8401eb6..531f9a0468 100644 --- a/django/http/multipartparser.py +++ b/django/http/multipartparser.py @@ -168,7 +168,8 @@ class MultiPartParser: # Instantiate the parser and stream: stream = LazyStream(ChunkIter(self._input_data, self._chunk_size)) - # Whether or not to signal a file-completion at the beginning of the loop. + # Whether or not to signal a file-completion at the beginning of the + # loop. old_field_name = None counters = [0] * len(handlers) @@ -418,8 +419,8 @@ class LazyStream: The LazyStream wrapper allows one to get and "unget" bytes from a stream. Given a producer object (an iterator that yields bytestrings), the - LazyStream object will support iteration, reading, and keeping a "look-back" - variable in case you need to "unget" some bytes. + LazyStream object will support iteration, reading, and keeping a + "look-back" variable in case you need to "unget" some bytes. """ def __init__(self, producer, length=None): diff --git a/django/http/request.py b/django/http/request.py index ff5974770f..c8adde768d 100644 --- a/django/http/request.py +++ b/django/http/request.py @@ -90,7 +90,9 @@ class HttpRequest: @cached_property def accepted_types(self): - """Return a list of MediaType instances, in order of preference (quality).""" + """ + Return a list of MediaType instances, in order of preference (quality). + """ header_value = self.headers.get("Accept", "*/*") return sorted( ( @@ -105,7 +107,8 @@ class HttpRequest: @cached_property def accepted_types_by_precedence(self): """ - Return a list of MediaType instances, in order of precedence (specificity). + Return a list of MediaType instances, in order of precedence + (specificity). """ return sorted( self.accepted_types, @@ -347,7 +350,8 @@ class HttpRequest: @property def upload_handlers(self): if not self._upload_handlers: - # If there are no upload handlers defined, initialize them from settings. + # If there are no upload handlers defined, initialize them from + # settings. self._initialize_handlers() return self._upload_handlers @@ -380,7 +384,8 @@ class HttpRequest: "You cannot access body after reading from request's data stream" ) - # Limit the maximum request data size that will be handled in-memory. + # Limit the maximum request data size that will be handled + # in-memory. if ( settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and int(self.META.get("CONTENT_LENGTH") or 0) @@ -404,7 +409,9 @@ class HttpRequest: self._files = MultiValueDict() def _load_post_and_files(self): - """Populate self._post and self._files if the content-type is a form type""" + """ + Populate self._post and self._files if the content-type is a form type + """ if self.method != "POST": self._post, self._files = ( QueryDict(encoding=self._encoding), @@ -543,8 +550,8 @@ class QueryDict(MultiValueDict): By default QueryDicts are immutable, though the copy() method will always return a mutable copy. - Both keys and values set on this class are converted from the given encoding - (DEFAULT_CHARSET by default) to str. + Both keys and values set on this class are converted from the given + encoding (DEFAULT_CHARSET by default) to str. """ # These are both reset in __init__, but is specified here at the class @@ -562,7 +569,8 @@ class QueryDict(MultiValueDict): "max_num_fields": settings.DATA_UPLOAD_MAX_NUMBER_FIELDS, } if isinstance(query_string, bytes): - # query_string normally contains URL-encoded data, a subset of ASCII. + # query_string normally contains URL-encoded data, a subset of + # ASCII. try: query_string = query_string.decode(self.encoding) except UnicodeDecodeError: @@ -747,7 +755,8 @@ class MediaType: return False if bool(self.range_params) == bool(other.range_params): - # If both have params or neither have params, they must be identical. + # If both have params or neither have params, they must be + # identical. result = self.range_params == other.range_params else: # If self has params and other does not, it's a match. diff --git a/django/http/response.py b/django/http/response.py index 6d09bc87e2..40b2d7089d 100644 --- a/django/http/response.py +++ b/django/http/response.py @@ -323,7 +323,8 @@ class HttpResponseBase: # See https://docs.python.org/library/io.html#io.IOBase # The WSGI server must call this method upon completion of the request. - # See http://blog.dscpl.com.au/2012/10/obligations-for-calling-close-on.html + # See + # http://blog.dscpl.com.au/2012/10/obligations-for-calling-close-on.html def close(self): for closer in self._resource_closers: try: diff --git a/django/middleware/cache.py b/django/middleware/cache.py index df26def6b4..10fff365af 100644 --- a/django/middleware/cache.py +++ b/django/middleware/cache.py @@ -29,8 +29,8 @@ More details about how the caching works: of the response's "Cache-Control" header, falling back to the CACHE_MIDDLEWARE_SECONDS setting if the section was not found. -* This middleware expects that a HEAD request is answered with the same response - headers exactly like the corresponding GET request. +* This middleware expects that a HEAD request is answered with the same + response headers exactly like the corresponding GET request. * When a hit occurs, a shallow copy of the original response object is returned from process_request. @@ -163,7 +163,8 @@ class FetchFromCacheMiddleware(MiddlewareMixin): request._cache_update_cache = True return None # No cache information available, need to rebuild. response = self.cache.get(cache_key) - # if it wasn't found and we are looking for a HEAD, try looking just for that + # if it wasn't found and we are looking for a HEAD, try looking just + # for that if response is None and request.method == "HEAD": cache_key = get_cache_key( request, self.key_prefix, "HEAD", cache=self.cache diff --git a/django/middleware/csrf.py b/django/middleware/csrf.py index 2e4b30ed54..c2800cfad4 100644 --- a/django/middleware/csrf.py +++ b/django/middleware/csrf.py @@ -420,7 +420,8 @@ class CsrfViewMiddleware(MiddlewareMixin): if getattr(callback, "csrf_exempt", False): return None - # Assume that anything not defined as 'safe' by RFC 9110 needs protection + # Assume that anything not defined as 'safe' by RFC 9110 needs + # protection if request.method in ("GET", "HEAD", "OPTIONS", "TRACE"): return self._accept(request) diff --git a/django/middleware/http.py b/django/middleware/http.py index 84c5466bb6..72ef52a126 100644 --- a/django/middleware/http.py +++ b/django/middleware/http.py @@ -6,8 +6,9 @@ from django.utils.http import parse_http_date_safe class ConditionalGetMiddleware(MiddlewareMixin): """ Handle conditional GET operations. If the response has an ETag or - Last-Modified header and the request has If-None-Match or If-Modified-Since, - replace the response with HttpNotModified. Add an ETag header if needed. + Last-Modified header and the request has If-None-Match or + If-Modified-Since, replace the response with HttpNotModified. Add an ETag + header if needed. """ def process_response(self, request, response): diff --git a/django/template/base.py b/django/template/base.py index 121a47d638..3e8a59fbe7 100644 --- a/django/template/base.py +++ b/django/template/base.py @@ -482,7 +482,8 @@ class Parser: nodelist = NodeList() while self.tokens: token = self.next_token() - # Use the raw values here for TokenType.* for a tiny performance boost. + # Use the raw values here for TokenType.* for a tiny performance + # boost. token_type = token.token_type.value if token_type == 0: # TokenType.TEXT self.extend_nodelist(nodelist, TextNode(token.contents), token) @@ -845,8 +846,8 @@ class Variable: try: self.literal = mark_safe(unescape_string_literal(var)) except ValueError: - # Otherwise we'll set self.lookups so that resolve() knows we're - # dealing with a bonafide variable + # Otherwise we'll set self.lookups so that resolve() knows + # we're dealing with a bonafide variable if VARIABLE_ATTRIBUTE_SEPARATOR + "_" in var or var[0] == "_": raise TemplateSyntaxError( "Variables and attributes may " @@ -907,7 +908,8 @@ class Variable: # numpy < 1.9 and 1.9+ respectively except (TypeError, AttributeError, KeyError, ValueError, IndexError): try: # attribute lookup - # Don't return class attributes if the class is the context: + # Don't return class attributes if the class is the + # context: if isinstance(current, BaseContext) and getattr( type(current), bit ): diff --git a/django/template/context.py b/django/template/context.py index bacce88173..60f16f9be0 100644 --- a/django/template/context.py +++ b/django/template/context.py @@ -81,7 +81,10 @@ class BaseContext: context[key] = value def __getitem__(self, key): - "Get a variable's value, starting at the current context and going upward" + """ + Get a variable's value, starting at the current context and going + upward + """ for d in reversed(self.dicts): if key in d: return d[key] @@ -177,16 +180,17 @@ class RenderContext(BaseContext): """ A stack container for storing Template state. - RenderContext simplifies the implementation of template Nodes by providing a - safe place to store state between invocations of a node's `render` method. + RenderContext simplifies the implementation of template Nodes by providing + a safe place to store state between invocations of a node's `render` + method. The RenderContext also provides scoping rules that are more sensible for 'template local' variables. The render context stack is pushed before each template is rendered, creating a fresh scope with nothing in it. Name - resolution fails if a variable is not found at the top of the RequestContext - stack. Thus, variables are local to a specific template and don't affect the - rendering of other templates as they would if they were stored in the normal - template context. + resolution fails if a variable is not found at the top of the + RequestContext stack. Thus, variables are local to a specific template and + don't affect the rendering of other templates as they would if they were + stored in the normal template context. """ template = None diff --git a/django/template/context_processors.py b/django/template/context_processors.py index f9e5f218e4..214972de53 100644 --- a/django/template/context_processors.py +++ b/django/template/context_processors.py @@ -17,8 +17,8 @@ from django.utils.functional import SimpleLazyObject, lazy def csrf(request): """ - Context processor that provides a CSRF token, or the string 'NOTPROVIDED' if - it has not been provided by either a view decorator or the middleware + Context processor that provides a CSRF token, or the string 'NOTPROVIDED' + if it has not been provided by either a view decorator or the middleware """ def _get_val(): diff --git a/django/template/defaultfilters.py b/django/template/defaultfilters.py index 66c6e76d20..b50b790fc1 100644 --- a/django/template/defaultfilters.py +++ b/django/template/defaultfilters.py @@ -169,9 +169,10 @@ def floatformat(text, arg=-1): # Exponent values can be "F", "n", "N". number_of_digits_and_exponent_sum = 0 - # Values with more than 200 digits, or with a large exponent, are returned "as is" - # to avoid high memory consumption and potential denial-of-service attacks. - # The cut-off of 200 is consistent with django.utils.numberformat.floatformat(). + # Values with more than 200 digits, or with a large exponent, are returned + # "as is" to avoid high memory consumption and potential denial-of-service + # attacks. The cut-off of 200 is consistent with + # django.utils.numberformat.floatformat(). if number_of_digits_and_exponent_sum > 200: return input_val @@ -281,7 +282,8 @@ def stringformat(value, arg): This specifier uses Python string formatting syntax, with the exception that the leading "%" is dropped. - See https://docs.python.org/library/stdtypes.html#printf-style-string-formatting + See + https://docs.python.org/library/stdtypes.html#printf-style-string-formatting for documentation of Python string formatting. """ if isinstance(value, tuple): diff --git a/django/template/defaulttags.py b/django/template/defaulttags.py index 98fa43dd69..a20598152c 100644 --- a/django/template/defaulttags.py +++ b/django/template/defaulttags.py @@ -205,8 +205,8 @@ class ForNode(Node): values = reversed(values) num_loopvars = len(self.loopvars) unpack = num_loopvars > 1 - # Create a forloop value in the context. We'll update counters on each - # iteration just below. + # Create a forloop value in the context. We'll update counters on + # each iteration just below. loop_dict = context["forloop"] = { "parentloop": parentloop, "length": len_values, @@ -224,8 +224,8 @@ class ForNode(Node): pop_context = False if unpack: - # If there are multiple loop variables, unpack the item into - # them. + # If there are multiple loop variables, unpack the item + # into them. try: len_item = len(item) except TypeError: # not an iterable @@ -293,8 +293,8 @@ class IfChangedNode(Node): # changes. if "forloop" in context: # Ifchanged is bound to the local for loop. - # When there is a loop-in-loop, the state is bound to the inner loop, - # so it resets when the outer loop continues. + # When there is a loop-in-loop, the state is bound to the inner + # loop, so it resets when the outer loop continues. return context["forloop"] else: # Using ifchanged outside loops. Effectively this is a no-op @@ -1274,10 +1274,10 @@ def regroup(parser, token): and ``Trumpet``, and ``list`` is the list of musicians who play this instrument. - Note that ``{% regroup %}`` does not work when the list to be grouped is not - sorted by the key you are grouping by! This means that if your list of - musicians was not sorted by instrument, you'd need to make sure it is sorted - before using it, i.e.:: + Note that ``{% regroup %}`` does not work when the list to be grouped is + not sorted by the key you are grouping by! This means that if your list of + musicians was not sorted by instrument, you'd need to make sure it is + sorted before using it, i.e.:: {% regroup musicians|dictsort:"instrument" by instrument as grouped %} """ diff --git a/django/template/library.py b/django/template/library.py index 1a65087e92..3fc4c5ebfc 100644 --- a/django/template/library.py +++ b/django/template/library.py @@ -72,7 +72,8 @@ class Library: # @register.filter return self.filter_function(name, **flags) else: - # @register.filter('somename') or @register.filter(name='somename') + # @register.filter('somename') or + # @register.filter(name='somename') def dec(func): return self.filter(name, func, **flags) diff --git a/django/template/loader_tags.py b/django/template/loader_tags.py index c3eb66b577..3a0d054e62 100644 --- a/django/template/loader_tags.py +++ b/django/template/loader_tags.py @@ -58,7 +58,8 @@ class BlockNode(Node): push = block = block_context.pop(self.name) if block is None: block = self - # Create new block so we can store context without thread-safety issues. + # Create new block so we can store context without + # thread-safety issues. block = type(self)(block.name, block.nodelist) block.context = context context["block"] = block diff --git a/django/template/smartif.py b/django/template/smartif.py index da32b38277..f6e8323bed 100644 --- a/django/template/smartif.py +++ b/django/template/smartif.py @@ -11,8 +11,8 @@ Parser and utilities for the smart 'if' tag class TokenBase: """ - Base class for operators and literals, mainly for debugging and for throwing - syntax errors. + Base class for operators and literals, mainly for debugging and for + throwing syntax errors. """ id = None # node/token type name diff --git a/django/test/selenium.py b/django/test/selenium.py index be8f4a815f..264ca7f713 100644 --- a/django/test/selenium.py +++ b/django/test/selenium.py @@ -29,7 +29,8 @@ class SeleniumTestCaseBase(type(LiveServerTestCase)): multiple browsers specs are provided (e.g. --selenium=firefox,chrome). """ test_class = super().__new__(cls, name, bases, attrs) - # If the test class is either browser-specific or a test base, return it. + # If the test class is either browser-specific or a test base, return + # it. if test_class.browser or not any( name.startswith("test") and callable(value) for name, value in attrs.items() ): @@ -62,7 +63,8 @@ class SeleniumTestCaseBase(type(LiveServerTestCase)): ) setattr(module, browser_test_class.__name__, browser_test_class) return test_class - # If no browsers were specified, skip this class (it'll still be discovered). + # If no browsers were specified, skip this class (it'll still be + # discovered). return unittest.skip("No browsers specified.")(test_class) @classmethod @@ -214,8 +216,8 @@ class SeleniumTestCase(LiveServerTestCase, metaclass=SeleniumTestCaseBase): if features is not None: params["features"] = features - # Not using .execute_cdp_cmd() as it isn't supported by the remote web driver - # when using --selenium-hub. + # Not using .execute_cdp_cmd() as it isn't supported by the remote web + # driver when using --selenium-hub. self.selenium.execute( driver_command="executeCdpCommand", params={"cmd": "Emulation.setEmulatedMedia", "params": params}, @@ -242,7 +244,9 @@ class SeleniumTestCase(LiveServerTestCase, metaclass=SeleniumTestCaseBase): self.selenium.save_screenshot(path) def get_browser_logs(self, source=None, level="ALL"): - """Return Chrome console logs filtered by level and optionally source.""" + """ + Return Chrome console logs filtered by level and optionally source. + """ try: logs = self.selenium.get_log("browser") except AttributeError: diff --git a/django/test/testcases.py b/django/test/testcases.py index 744303f7a4..5f0c819815 100644 --- a/django/test/testcases.py +++ b/django/test/testcases.py @@ -281,7 +281,8 @@ class SimpleTestCase(unittest.TestCase): # Dynamically created connections are always allowed. and self.alias in connections ): - # Connection has not yet been established, but the alias is not allowed. + # Connection has not yet been established, but the alias is not + # allowed. message = cls._disallowed_database_msg % { "test": f"{cls.__module__}.{cls.__qualname__}", "alias": self.alias, @@ -1231,9 +1232,9 @@ class TransactionTestCase(SimpleTestCase): if self._should_reload_connections(): # Some DB cursors include SQL statements as part of cursor # creation. If you have a test that does a rollback, the effect - # of these statements is lost, which can affect the operation of - # tests (e.g., losing a timezone setting causing objects to be - # created with the wrong time). To make sure this doesn't + # of these statements is lost, which can affect the operation + # of tests (e.g., losing a timezone setting causing objects to + # be created with the wrong time). To make sure this doesn't # happen, get a clean connection at the start of every test. for conn in connections.all(initialized_only=True): conn.close() @@ -1783,9 +1784,9 @@ class LiveServerTestCase(TransactionTestCase): framework, such as Selenium for example, instead of the built-in dummy client. It inherits from TransactionTestCase instead of TestCase because the - threads don't share the same transactions (unless if using in-memory sqlite) - and each thread needs to commit all their transactions so that the other - thread can see the changes. + threads don't share the same transactions (unless if using in-memory + sqlite) and each thread needs to commit all their transactions so that the + other thread can see the changes. """ host = "localhost" diff --git a/django/test/utils.py b/django/test/utils.py index 63067c98a6..ea39794e1a 100644 --- a/django/test/utils.py +++ b/django/test/utils.py @@ -637,7 +637,8 @@ def compare_xml(want, got): important. Ignore comment nodes, processing instructions, document type node, and leading and trailing whitespaces. - Based on https://github.com/lxml/lxml/blob/master/src/lxml/doctestcompare.py + Based on + https://github.com/lxml/lxml/blob/master/src/lxml/doctestcompare.py """ _norm_whitespace_re = re.compile(r"[ \t\n][ \t\n]+") diff --git a/django/urls/resolvers.py b/django/urls/resolvers.py index 13043835dd..bad5490dcb 100644 --- a/django/urls/resolvers.py +++ b/django/urls/resolvers.py @@ -170,8 +170,8 @@ class CheckURLMixin: Check that the pattern does not begin with a forward slash. """ if not settings.APPEND_SLASH: - # Skip check as it can be useful to start a URL pattern with a slash - # when APPEND_SLASH=False. + # Skip check as it can be useful to start a URL pattern with a + # slash when APPEND_SLASH=False. return [] if self._regex.startswith(("/", "^/", "^\\/")) and not self._regex.endswith( "/" @@ -325,7 +325,8 @@ class RoutePattern(CheckURLMixin): # Only use regex overhead if there are converters. if self.converters: if match := self.regex.search(path): - # RoutePattern doesn't allow non-named groups so args are ignored. + # RoutePattern doesn't allow non-named groups so args are + # ignored. kwargs = match.groupdict() for key, value in kwargs.items(): converter = self.converters[key] @@ -334,7 +335,8 @@ class RoutePattern(CheckURLMixin): except ValueError: return None return path[match.end() :], (), kwargs - # If this is an endpoint, the path should be exactly the same as the route. + # If this is an endpoint, the path should be exactly the same as the + # route. elif self._is_endpoint: if self._route == path: return "", (), {} @@ -680,11 +682,12 @@ class URLResolver: if sub_match: # Merge captured arguments in match with submatch sub_match_dict = {**kwargs, **self.default_kwargs} - # Update the sub_match_dict with the kwargs from the sub_match. + # Update the sub_match_dict with the kwargs from the + # sub_match. sub_match_dict.update(sub_match.kwargs) - # If there are *any* named groups, ignore all non-named groups. - # Otherwise, pass all non-named arguments as positional - # arguments. + # If there are *any* named groups, ignore all non-named + # groups. Otherwise, pass all non-named arguments as + # positional arguments. sub_match_args = sub_match.args if not sub_match_dict: sub_match_args = args + sub_match.args diff --git a/django/urls/utils.py b/django/urls/utils.py index 2bea922917..b5054b163c 100644 --- a/django/urls/utils.py +++ b/django/urls/utils.py @@ -10,9 +10,9 @@ def get_callable(lookup_view): """ Return a callable corresponding to lookup_view. * If lookup_view is already a callable, return it. - * If lookup_view is a string import path that can be resolved to a callable, - import that callable and return it, otherwise raise an exception - (ImportError or ViewDoesNotExist). + * If lookup_view is a string import path that can be resolved to a + callable, import that callable and return it, otherwise raise an + exception (ImportError or ViewDoesNotExist). """ if callable(lookup_view): return lookup_view diff --git a/django/utils/_os.py b/django/utils/_os.py index e9e1bcbfaf..5cd8c566a8 100644 --- a/django/utils/_os.py +++ b/django/utils/_os.py @@ -22,7 +22,8 @@ def safe_join(base, *paths): # a) The next character is the path separator (to prevent conditions like # safe_join("/dir", "/../d")) # b) The final path must be the same as the base path. - # c) The base path must be the most root path (meaning either "/" or "C:\\") + # c) The base path must be the most root path (meaning either "/" or + # "C:\\") if ( not normcase(final_path).startswith(normcase(base_path + sep)) and normcase(final_path) != normcase(base_path) diff --git a/django/utils/autoreload.py b/django/utils/autoreload.py index a620d0adb7..c6716215f5 100644 --- a/django/utils/autoreload.py +++ b/django/utils/autoreload.py @@ -468,8 +468,9 @@ class WatchmanReloader(BaseReloader): def _subscribe(self, directory, name, expression): root, rel_path = self._watch_root(directory) - # Only receive notifications of files changing, filtering out other types - # like special files: https://facebook.github.io/watchman/docs/type + # Only receive notifications of files changing, filtering out other + # types like special files: + # https://facebook.github.io/watchman/docs/type only_files_expression = [ "allof", ["anyof", ["type", "f"], ["type", "l"]], diff --git a/django/utils/cache.py b/django/utils/cache.py index 3b014fbe51..f2cbd1d033 100644 --- a/django/utils/cache.py +++ b/django/utils/cache.py @@ -207,7 +207,8 @@ def get_conditional_response(request, etag=None, last_modified=None, response=No return _not_modified(request, response) # Step 5: Test the If-Range precondition (not supported). - # Step 6: Return original response since there isn't a conditional response. + # Step 6: Return original response since there isn't a conditional + # response. return response diff --git a/django/utils/csp.py b/django/utils/csp.py index b989a47c23..d57fc98995 100644 --- a/django/utils/csp.py +++ b/django/utils/csp.py @@ -9,7 +9,8 @@ class CSP(StrEnum): Content Security Policy constants for directive values and special tokens. These constants represent: - 1. Standard quoted string values from the CSP spec (e.g., 'self', 'unsafe-inline') + 1. Standard quoted string values from the CSP spec (e.g., 'self', + 'unsafe-inline') 2. Special placeholder tokens (NONCE) that get replaced by the middleware Using this enum instead of raw strings provides better type checking, @@ -43,13 +44,15 @@ class CSP(StrEnum): WASM_UNSAFE_EVAL = "'wasm-unsafe-eval'" # Special placeholder that gets replaced by the middleware. - # The value itself is arbitrary and should not be mistaken for a real nonce. + # The value itself is arbitrary and should not be mistaken for a real + # nonce. NONCE = "<CSP_NONCE_SENTINEL>" class LazyNonce(SimpleLazyObject): """ - Lazily generates a cryptographically secure nonce string, for use in CSP headers. + Lazily generates a cryptographically secure nonce string, for use in CSP + headers. The nonce is only generated when first accessed (e.g., via string interpolation or inside a template). @@ -62,7 +65,8 @@ class LazyNonce(SimpleLazyObject): <script{% if csp_nonce %} nonce="{{ csp_nonce }}"...{% endif %}> - The `{% if %}` block will only render if the nonce has been evaluated elsewhere. + The `{% if %}` block will only render if the nonce has been evaluated + elsewhere. """ diff --git a/django/utils/dateformat.py b/django/utils/dateformat.py index 4a158432d9..94a1e8595a 100644 --- a/django/utils/dateformat.py +++ b/django/utils/dateformat.py @@ -144,10 +144,10 @@ class TimeFormat(Formatter): def P(self): """ - Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off - if they're zero and the strings 'midnight' and 'noon' if appropriate. - Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.' - Proprietary extension. + Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left + off if they're zero and the strings 'midnight' and 'noon' if + appropriate. Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', + '12:30 p.m.' Proprietary extension. """ if self.data.minute == 0 and self.data.hour == 0: return _("midnight") @@ -216,7 +216,10 @@ class DateFormat(TimeFormat): return WEEKDAYS_ABBR[self.data.weekday()] def E(self): - "Alternative month names as required by some locales. Proprietary extension." + """ + Alternative month names as required by some locales. Proprietary + extension. + """ return MONTHS_ALT[self.data.month] def F(self): diff --git a/django/utils/decorators.py b/django/utils/decorators.py index fb12c7fbcd..83586dfc2c 100644 --- a/django/utils/decorators.py +++ b/django/utils/decorators.py @@ -150,8 +150,8 @@ def make_middleware_decorator(middleware_class): response = middleware.process_template_response( request, response ) - # Defer running of process_response until after the template - # has been rendered: + # Defer running of process_response until after the + # template has been rendered: if hasattr(middleware, "process_response"): def callback(response): diff --git a/django/utils/feedgenerator.py b/django/utils/feedgenerator.py index e52720a61e..a9eaffc205 100644 --- a/django/utils/feedgenerator.py +++ b/django/utils/feedgenerator.py @@ -65,9 +65,10 @@ def _guess_stylesheet_mimetype(url): """ mimetypedb = mimetypes.MimeTypes() - # The official mimetype for XSLT files is technically `application/xslt+xml` - # but as of 2024 almost no browser supports that (they all expect text/xsl). - # On top of that, windows seems to assume that the type for xsl is text/xml. + # The official mimetype for XSLT files is technically + # `application/xslt+xml` but as of 2024 almost no browser supports that + # (they all expect text/xsl). On top of that, windows seems to assume that + # the type for xsl is text/xml. mimetypedb.readfp(StringIO("text/xsl\txsl\ntext/xsl\txslt")) return mimetypedb.guess_type(url) @@ -215,8 +216,8 @@ class SyndicationFeed: def root_attributes(self): """ - Return extra attributes to place on the root (i.e. feed/channel) element. - Called from write(). + Return extra attributes to place on the root (i.e. feed/channel) + element. Called from write(). """ return {} @@ -235,7 +236,8 @@ class SyndicationFeed: def item_attributes(self, item): """ - Return extra attributes to place on each item (i.e. item/entry) element. + Return extra attributes to place on each item (i.e. item/entry) + element. """ return {} @@ -295,8 +297,8 @@ class RssFeed(SyndicationFeed): def write(self, outfile, encoding): handler = SimplerXMLGenerator(outfile, encoding, short_empty_elements=True) handler.startDocument() - # Any stylesheet must come after the start of the document but before any tag. - # https://www.w3.org/Style/styling-XML.en.html + # Any stylesheet must come after the start of the document but before + # any tag. https://www.w3.org/Style/styling-XML.en.html self.add_stylesheets(handler) handler.startElement("rss", self.rss_attributes()) handler.startElement("channel", self.root_attributes()) diff --git a/django/utils/html.py b/django/utils/html.py index 56435eb7e2..d3b904a822 100644 --- a/django/utils/html.py +++ b/django/utils/html.py @@ -256,8 +256,8 @@ def smart_urlquote(url): netloc = unquote_quote(netloc) if query: - # Separately unquoting key/value, so as to not mix querystring separators - # included in query values. See #22267. + # Separately unquoting key/value, so as to not mix querystring + # separators included in query values. See #22267. query_parts = [ (unquote(q[0]), unquote(q[1])) for q in parse_qsl(query, keep_blank_values=True) @@ -352,7 +352,8 @@ class Urlizer: url = smart_urlquote(html.unescape(middle)) elif len(middle) <= MAX_URL_LENGTH and self.simple_url_2_re.match(middle): unescaped_middle = html.unescape(middle) - # RemovedInDjango70Warning: When the deprecation ends, replace with: + # RemovedInDjango70Warning: When the deprecation ends, replace + # with: # url = smart_urlquote(f"https://{unescaped_middle}") protocol = ( "https" @@ -462,7 +463,8 @@ class Urlizer: trail_start = len(rstripped) amount_trailing_semicolons = len(middle) - len(middle.rstrip(";")) if amp > -1 and amount_trailing_semicolons > 1: - # Leave up to most recent semicolon as might be an entity. + # Leave up to most recent semicolon as might be an + # entity. recent_semicolon = middle[trail_start:].index(";") middle_semicolon_index = recent_semicolon + trail_start + 1 trail = middle[middle_semicolon_index:] + trail diff --git a/django/utils/http.py b/django/utils/http.py index 1c7aec7141..504f28c678 100644 --- a/django/utils/http.py +++ b/django/utils/http.py @@ -284,10 +284,10 @@ def _url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False): url_info = urlsplit(url) except ValueError: # e.g. invalid IPv6 addresses return False - # Forbid URLs like http:///example.com - with a scheme, but without a hostname. - # In that URL, example.com is not the hostname but, a path component. However, - # Chrome will still consider example.com to be the hostname, so we must not - # allow this syntax. + # Forbid URLs like http:///example.com - with a scheme, but without a + # hostname. In that URL, example.com is not the hostname but, a path + # component. However, Chrome will still consider example.com to be the + # hostname, so we must not allow this syntax. if not url_info.netloc and url_info.scheme: return False # Forbid URLs that start with control characters. Some browsers (like diff --git a/django/utils/module_loading.py b/django/utils/module_loading.py index cb579e7f8c..28067e8d89 100644 --- a/django/utils/module_loading.py +++ b/django/utils/module_loading.py @@ -18,8 +18,8 @@ def cached_import(module_path, class_name): def import_string(dotted_path): """ - Import a dotted module path and return the attribute/class designated by the - last name in the path. Raise ImportError if the import failed. + Import a dotted module path and return the attribute/class designated by + the last name in the path. Raise ImportError if the import failed. """ try: module_path, class_name = dotted_path.rsplit(".", 1) diff --git a/django/utils/termcolors.py b/django/utils/termcolors.py index 3d1eee6e41..aeef02f1b0 100644 --- a/django/utils/termcolors.py +++ b/django/utils/termcolors.py @@ -154,9 +154,9 @@ def parse_color_setting(config_string): bg is a background color. option is a display options. - Specifying a named palette is the same as manually specifying the individual - definitions for each role. Any individual definitions following the palette - definition will augment the base palette definition. + Specifying a named palette is the same as manually specifying the + individual definitions for each role. Any individual definitions following + the palette definition will augment the base palette definition. Valid roles: 'error', 'success', 'warning', 'notice', 'sql_field', 'sql_coltype', diff --git a/django/utils/timesince.py b/django/utils/timesince.py index a61b02d94d..e6d5b194ac 100644 --- a/django/utils/timesince.py +++ b/django/utils/timesince.py @@ -137,6 +137,7 @@ def timesince(d, now=None, reversed=False, time_strings=None, depth=2): def timeuntil(d, now=None, time_strings=None, depth=2): """ - Like timesince, but return a string measuring the time until the given time. + Like timesince, but return a string measuring the time until the given + time. """ return timesince(d, now, reversed=True, time_strings=time_strings, depth=depth) diff --git a/django/utils/translation/trans_real.py b/django/utils/translation/trans_real.py index 86fe823bf7..abc1477442 100644 --- a/django/utils/translation/trans_real.py +++ b/django/utils/translation/trans_real.py @@ -103,7 +103,8 @@ class TranslationCatalog: yield from cat.keys() def update(self, trans): - # Merge if plural function is the same as the top catalog, else prepend. + # Merge if plural function is the same as the top catalog, else + # prepend. if trans.plural.__code__ == self._plurals[0]: self._catalogs[0].update(trans._catalog) else: @@ -154,7 +155,8 @@ class DjangoTranslation(gettext_module.GNUTranslations): if self.domain == "django": if localedirs is not None: - # A module-level cache is used for caching 'django' translations + # A module-level cache is used for caching 'django' + # translations warnings.warn( "localedirs is ignored when domain is 'django'.", RuntimeWarning ) @@ -253,7 +255,8 @@ class DjangoTranslation(gettext_module.GNUTranslations): if not getattr(other, "_catalog", None): return # NullTranslations() has no _catalog if self._catalog is None: - # Take plural and _info from first catalog found (generally Django's). + # Take plural and _info from first catalog found (generally + # Django's). self.plural = other.plural self._info = other._info.copy() self._catalog = TranslationCatalog(other) @@ -329,7 +332,8 @@ def get_language(): return t.to_language() except AttributeError: pass - # If we don't have a real translation object, assume it's the default language. + # If we don't have a real translation object, assume it's the default + # language. return settings.LANGUAGE_CODE @@ -511,7 +515,8 @@ def get_supported_language_variant(lang_code, strict=False): not strict and (index := lang_code.rfind("-", 0, LANGUAGE_CODE_MAX_LENGTH)) > 0 ): - # There is a generic variant under the maximum length accepted length. + # There is a generic variant under the maximum length accepted + # length. lang_code = lang_code[:index] else: raise LookupError(lang_code) diff --git a/django/utils/xmlutils.py b/django/utils/xmlutils.py index c3eb3ba6a3..5c65e580ed 100644 --- a/django/utils/xmlutils.py +++ b/django/utils/xmlutils.py @@ -22,8 +22,8 @@ class SimplerXMLGenerator(XMLGenerator): def characters(self, content): if content and re.search(r"[\x00-\x08\x0B-\x0C\x0E-\x1F]", content): - # Fail loudly when content has control chars (unsupported in XML 1.0) - # See https://www.w3.org/International/questions/qa-controls + # Fail loudly when content has control chars (unsupported in XML + # 1.0) See https://www.w3.org/International/questions/qa-controls raise UnserializableContentError( "Control characters are not supported in XML 1.0" ) diff --git a/django/views/debug.py b/django/views/debug.py index 9d8d1987de..75f30ca601 100644 --- a/django/views/debug.py +++ b/django/views/debug.py @@ -233,7 +233,8 @@ class SafeExceptionReporterFilter: return "{!r} while evaluating {!r}".format(e, value) if is_multivalue_dict: - # Cleanse MultiValueDicts (request.POST is the one we usually care about) + # Cleanse MultiValueDicts (request.POST is the one we usually care + # about) value = self.get_cleansed_multivaluedict(request, value) return value diff --git a/django/views/decorators/debug.py b/django/views/decorators/debug.py index 3b868bcf29..506aaeb64d 100644 --- a/django/views/decorators/debug.py +++ b/django/views/decorators/debug.py @@ -54,8 +54,8 @@ def sensitive_variables(*variables): "Python file (not a builtin or from a native extension)." ) else: - # A source file may not be available (e.g. in .pyc-only builds), - # use the first line number instead. + # A source file may not be available (e.g. in .pyc-only + # builds), use the first line number instead. first_line_number = wrapped_func.__code__.co_firstlineno key = hash(f"{file_path}:{first_line_number}") diff --git a/django/views/generic/dates.py b/django/views/generic/dates.py index 12ec4104cd..2daab2cbf8 100644 --- a/django/views/generic/dates.py +++ b/django/views/generic/dates.py @@ -651,8 +651,8 @@ class TodayArchiveView(MultipleObjectTemplateResponseMixin, BaseTodayArchiveView class BaseDateDetailView(YearMixin, MonthMixin, DayMixin, DateMixin, BaseDetailView): """ - Base detail view for a single object on a single date; this differs from the - standard DetailView by accepting a year/month/day in the URL. + Base detail view for a single object on a single date; this differs from + the standard DetailView by accepting a year/month/day in the URL. This requires subclassing to provide a response mixin. """ diff --git a/django/views/generic/detail.py b/django/views/generic/detail.py index 70e427c59b..fa4e4848e9 100644 --- a/django/views/generic/detail.py +++ b/django/views/generic/detail.py @@ -144,8 +144,9 @@ class SingleObjectTemplateResponseMixin(TemplateResponseMixin): if name: names.insert(0, name) - # The least-specific option is the default <app>/<model>_detail.html; - # only use this if the object in question is a model. + # The least-specific option is the default + # <app>/<model>_detail.html; only use this if the object in + # question is a model. if isinstance(self.object, models.Model): object_meta = self.object._meta names.append( @@ -185,5 +186,6 @@ class DetailView(SingleObjectTemplateResponseMixin, BaseDetailView): Render a "detail" view of an object. By default this is a model instance looked up from `self.queryset`, but the - view will support display of *any* object by overriding `self.get_object()`. + view will support display of *any* object by overriding + `self.get_object()`. """ diff --git a/django/views/i18n.py b/django/views/i18n.py index 79a10408dd..49e3f808c1 100644 --- a/django/views/i18n.py +++ b/django/views/i18n.py @@ -105,7 +105,8 @@ class JavaScriptCatalog(View): You can override the gettext domain for this view, but usually you don't want to do that as JavaScript messages go to the djangojs domain. This - might be needed if you deliver your JavaScript source from Django templates. + might be needed if you deliver your JavaScript source from Django + templates. """ domain = "djangojs" @@ -153,8 +154,8 @@ class JavaScriptCatalog(View): @property def _plural_string(self): """ - Return the plural string (including nplurals) for this catalog language, - or None if no plural string is available. + Return the plural string (including nplurals) for this catalog + language, or None if no plural string is available. """ if "" in self.translation._catalog: for line in self.translation._catalog[""].split("\n"): diff --git a/docs/conf.py b/docs/conf.py index bb75c46892..98070b14fc 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,10 +1,12 @@ # Django documentation build configuration file, created by # sphinx-quickstart on Thu Mar 27 09:06:53 2008. # -# This file is execfile()d with the current directory set to its containing dir. +# This file is execfile()d with the current directory set to its containing +# dir. # # The contents of this file are pickled, so don't put values in the namespace -# that aren't picklable (module imports are okay, they're removed automatically). +# that aren't picklable (module imports are okay, they're removed +# automatically). # # All configuration values have a default; values that are commented out # serve to show the default. @@ -40,8 +42,8 @@ import github_links # NOQA # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = "4.5.0" -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ "djangodocs", "sphinx.ext.extlinks", @@ -79,7 +81,8 @@ linkcheck_ignore = [ ] # Spelling check needs an additional module that is not installed by default. -# Add it only if spelling check is requested so docs can be generated without it. +# Add it only if spelling check is requested so docs can be generated without +# it. if "spelling" in sys.argv: extensions.append("sphinxcontrib.spelling") @@ -165,7 +168,8 @@ today_fmt = "%B %d, %Y" # directories to ignore when looking for source files. exclude_patterns = ["_build", "_theme", "requirements.txt"] -# The reST default role (used for this markup: `text`) to use for all documents. +# The reST default role (used for this markup: `text`) to use for all +# documents. default_role = "default-role-error" # If true, '()' will be appended to :func: etc. cross-reference text. @@ -387,10 +391,10 @@ epub_copyright = copyright # The basename for the epub file. It defaults to the project name. # epub_basename = 'Django' -# The HTML theme for the epub output. Since the default themes are not optimized -# for small screen space, using the same theme for HTML and epub output is -# usually not wise. This defaults to 'epub', a theme designed to save visual -# space. +# The HTML theme for the epub output. Since the default themes are not +# optimized for small screen space, using the same theme for HTML and epub +# output is usually not wise. This defaults to 'epub', a theme designed to save +# visual space. epub_theme = "djangodocs-epub" # The language of the text. It defaults to the language option diff --git a/scripts/manage_translations.py b/scripts/manage_translations.py index ffa6ff3d0e..ce3093ca4a 100644 --- a/scripts/manage_translations.py +++ b/scripts/manage_translations.py @@ -146,7 +146,8 @@ def _get_locale_dirs(resources, include_core=True): """ Return a tuple (contrib name, absolute path) for all locale directories, optionally including the django core catalog. - If resources list is not None, filter directories matching resources content. + If resources list is not None, filter directories matching resources + content. """ contrib_dir = os.path.join(os.getcwd(), "django", "contrib") dirs = [] diff --git a/tests/admin_changelist/tests.py b/tests/admin_changelist/tests.py index cd2fe7c645..68f6ca453e 100644 --- a/tests/admin_changelist/tests.py +++ b/tests/admin_changelist/tests.py @@ -143,7 +143,8 @@ class ChangeListTests(TestCase): def test_select_related_preserved(self): """ Regression test for #10348: ChangeList.get_queryset() shouldn't - overwrite a custom select_related provided by ModelAdmin.get_queryset(). + overwrite a custom select_related provided by + ModelAdmin.get_queryset(). """ m = ChildAdmin(Child, custom_site) request = self.factory.get("/child/") @@ -874,7 +875,8 @@ class ChangeListTests(TestCase): request = self.factory.get("/", data={SEARCH_VAR: search_term}) request.user = self.superuser with self.subTest(search_term=search_term): - # 1 query for filtered result, 1 for filtered count, 1 for total count. + # 1 query for filtered result, 1 for filtered count, 1 for + # total count. with self.assertNumQueries(3): cl = model_admin.get_changelist_instance(request) self.assertCountEqual(cl.queryset, expected_result) @@ -1347,7 +1349,9 @@ class ChangeListTests(TestCase): self.assertEqual(queryset.count(), 1) def test_changelist_view_list_editable_changed_objects_uses_filter(self): - """list_editable edits use a filtered queryset to limit memory usage.""" + """ + list_editable edits use a filtered queryset to limit memory usage. + """ a = Swallow.objects.create(origin="Swallow A", load=4, speed=1) Swallow.objects.create(origin="Swallow B", load=2, speed=2) data = { @@ -1367,7 +1371,8 @@ class ChangeListTests(TestCase): self.assertEqual(response.status_code, 200) self.assertIn("WHERE", context.captured_queries[4]["sql"]) self.assertIn("IN", context.captured_queries[4]["sql"]) - # Check only the first few characters since the UUID may have dashes. + # Check only the first few characters since the UUID may have + # dashes. self.assertIn(str(a.pk)[:8], context.captured_queries[4]["sql"]) def test_list_editable_error_title(self): @@ -1420,7 +1425,8 @@ class ChangeListTests(TestCase): check_results_order() # When an order field is defined but multiple records have the same - # value for that field, make sure everything gets ordered by -pk as well. + # value for that field, make sure everything gets ordered by -pk as + # well. UnorderedObjectAdmin.ordering = ["bool"] check_results_order() @@ -1481,7 +1487,8 @@ class ChangeListTests(TestCase): check_results_order(ascending=True) # When an order field is defined but multiple records have the same - # value for that field, make sure everything gets ordered by -pk as well. + # value for that field, make sure everything gets ordered by -pk as + # well. OrderedObjectAdmin.ordering = ["bool"] check_results_order() diff --git a/tests/admin_checks/tests.py b/tests/admin_checks/tests.py index fc87260c9c..430b113ac7 100644 --- a/tests/admin_checks/tests.py +++ b/tests/admin_checks/tests.py @@ -680,9 +680,9 @@ class SystemChecksTestCase(SimpleTestCase): def test_fk_exclusion(self): """ - Regression test for #11709 - when testing for fk excluding (when exclude is - given) make sure fk_name is honored or things blow up when there is more - than one fk to the parent model. + Regression test for #11709 - when testing for fk excluding (when + exclude is given) make sure fk_name is honored or things blow up when + there is more than one fk to the parent model. """ class TwoAlbumFKAndAnEInline(admin.TabularInline): @@ -859,9 +859,9 @@ class SystemChecksTestCase(SimpleTestCase): def test_graceful_m2m_fail(self): """ - Regression test for #12203/#12237 - Fail more gracefully when a M2M field that - specifies the 'through' option is included in the 'fields' or the 'fieldsets' - ModelAdmin options. + Regression test for #12203/#12237 - Fail more gracefully when a M2M + field that specifies the 'through' option is included in the 'fields' + or the 'fieldsets' ModelAdmin options. """ class BookAdmin(admin.ModelAdmin): @@ -946,7 +946,8 @@ class SystemChecksTestCase(SimpleTestCase): def test_non_model_first_field(self): """ Regression for ensuring ModelAdmin.field can handle first elem being a - non-model field (test fix for UnboundLocalError introduced with r16225). + non-model field (test fix for UnboundLocalError introduced with + r16225). """ class SongForm(forms.ModelForm): diff --git a/tests/admin_filters/tests.py b/tests/admin_filters/tests.py index 558164f75c..530d4c53b6 100644 --- a/tests/admin_filters/tests.py +++ b/tests/admin_filters/tests.py @@ -910,7 +910,8 @@ class ListFiltersTests(TestCase): request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) - # Make sure that only actual authors are present in author's list filter + # Make sure that only actual authors are present in author's list + # filter filterspec = changelist.get_filters(request)[0][4] expected = [(self.alfred.pk, "alfred"), (self.bob.pk, "bob")] self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected)) @@ -1029,7 +1030,8 @@ class ListFiltersTests(TestCase): request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) - # Make sure that only actual contributors are present in contrib's list filter + # Make sure that only actual contributors are present in contrib's list + # filter filterspec = changelist.get_filters(request)[0][5] expected = [(self.bob.pk, "bob"), (self.lisa.pk, "lisa")] self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected)) diff --git a/tests/admin_inlines/tests.py b/tests/admin_inlines/tests.py index 63e7393853..0b73089950 100644 --- a/tests/admin_inlines/tests.py +++ b/tests/admin_inlines/tests.py @@ -241,7 +241,10 @@ class TestInline(TestDataMixin, TestCase): ) def test_no_parent_callable_lookup(self): - """Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable""" + """ + Admin inline `readonly_field` shouldn't invoke parent ModelAdmin + callable + """ # Identically named callable isn't present in the parent ModelAdmin, # rendering of the add view shouldn't explode response = self.client.get(reverse("admin:admin_inlines_novel_add")) @@ -324,7 +327,9 @@ class TestInline(TestDataMixin, TestCase): self.assertContains(response, "Label from ModelForm.Meta") def test_inline_hidden_field_no_column(self): - """#18263 -- Make sure hidden fields don't get a column in tabular inlines""" + """ + #18263 -- Make sure hidden fields don't get a column in tabular inlines + """ parent = SomeParentModel.objects.create(name="a") SomeChildModel.objects.create(name="b", position="0", parent=parent) SomeChildModel.objects.create(name="c", position="1", parent=parent) @@ -1232,7 +1237,8 @@ class TestInlinePermissions(TestCase): ) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) - # Change permission on inner2s, so we can change existing but not add new + # Change permission on inner2s, so we can change existing but not add + # new self.assertContains( response, '<h2 id="inner2_set-heading" class="inline-heading">Inner2s</h2>', diff --git a/tests/admin_scripts/tests.py b/tests/admin_scripts/tests.py index a272fced74..97d2d7cf7e 100644 --- a/tests/admin_scripts/tests.py +++ b/tests/admin_scripts/tests.py @@ -191,7 +191,10 @@ class AdminScriptTestCase(SimpleTestCase): ) def assertNotInOutput(self, stream, msg): - "Utility assertion: assert that the given message doesn't exist in the output" + """ + Utility assertion: assert that the given message doesn't exist in the + output + """ self.assertNotIn( msg, stream, "'%s' matches actual output text '%s'" % (msg, stream) ) @@ -503,7 +506,10 @@ class DjangoAdminMinimalSettings(AdminScriptTestCase): self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): - "minimal: django-admin can't execute user commands unless settings are provided" + """ + minimal: django-admin can't execute user commands unless settings are + provided + """ args = ["noargs_command"] out, err = self.run_django_admin(args) self.assertNoOutput(out) @@ -745,7 +751,10 @@ class DjangoAdminSettingsDirectory(AdminScriptTestCase): ) def test_setup_environ_custom_template(self): - "directory: startapp creates the correct directory with a custom template" + """ + directory: startapp creates the correct directory with a custom + template + """ template_path = os.path.join(custom_templates_dir, "app_template") args = ["startapp", "--template", template_path, "custom_settings_test"] app_path = os.path.join(self.test_dir, "custom_settings_test") @@ -1089,7 +1098,10 @@ class ManageMinimalSettings(AdminScriptTestCase): self.assertOutput(err, "No installed app with label 'admin_scripts'.") def test_builtin_with_settings(self): - "minimal: manage.py builtin commands fail if settings are provided as argument" + """ + minimal: manage.py builtin commands fail if settings are provided as + argument + """ args = ["check", "--settings=test_project.settings", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(out) @@ -1126,7 +1138,10 @@ class ManageMinimalSettings(AdminScriptTestCase): self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): - "minimal: manage.py can't execute user commands without appropriate settings" + """ + minimal: manage.py can't execute user commands without appropriate + settings + """ args = ["noargs_command"] out, err = self.run_manage(args) self.assertNoOutput(out) @@ -1175,7 +1190,10 @@ class ManageAlternateSettings(AdminScriptTestCase): ) def test_builtin_with_settings(self): - "alternate: manage.py builtin commands work with settings provided as argument" + """ + alternate: manage.py builtin commands work with settings provided as + argument + """ args = ["check", "--settings=alternate_settings", "admin_scripts"] out, err = self.run_manage(args) self.assertOutput(out, SYSTEM_CHECK_MSG) @@ -1331,7 +1349,9 @@ class ManageMultipleSettings(AdminScriptTestCase): self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): - "multiple: manage.py can't execute user commands using default settings" + """ + multiple: manage.py can't execute user commands using default settings + """ args = ["noargs_command"] out, err = self.run_manage(args) self.assertNoOutput(out) @@ -1709,7 +1729,8 @@ class ManageRunserver(SimpleTestCase): def test_readonly_database(self): """ - runserver.check_migrations() doesn't choke when a database is read-only. + runserver.check_migrations() doesn't choke when a database is + read-only. """ with mock.patch.object(MigrationRecorder, "has_table", return_value=False): self.cmd.check_migrations() @@ -1754,7 +1775,9 @@ class ManageRunserver(SimpleTestCase): self.addCleanup(setattr, registry, "registered_checks", original_checks) class CustomRunserverCommand(RunserverCommand): - """Rather than mock run(), raise immediately after system checks run.""" + """ + Rather than mock run(), raise immediately after system checks run. + """ def check_migrations(self, *args, **kwargs): raise CustomException @@ -1921,7 +1944,8 @@ class CommandTypes(AdminScriptTestCase): def test_version_alternative(self): "--version is equivalent to version" args1, args2 = ["version"], ["--version"] - # It's possible one outputs on stderr and the other on stdout, hence the set + # It's possible one outputs on stderr and the other on stdout, hence + # the set self.assertEqual(set(self.run_manage(args1)), set(self.run_manage(args2))) def test_help(self): @@ -2136,13 +2160,18 @@ class CommandTypes(AdminScriptTestCase): self._test_base_command(args, expected_labels, option_a="'x'") def test_base_command_with_options(self): - "User BaseCommands can execute with multiple options when a label is provided" + """ + User BaseCommands can execute with multiple options when a label is + provided + """ args = ["base_command", "testlabel", "-a", "x", "--option_b=y"] expected_labels = "('testlabel',)" self._test_base_command(args, expected_labels, option_a="'x'", option_b="'y'") def test_base_command_with_wrong_option(self): - "User BaseCommands outputs command usage when wrong option is specified" + """ + User BaseCommands outputs command usage when wrong option is specified + """ args = ["base_command", "--invalid"] out, err = self.run_manage(args) self.assertNoOutput(out) @@ -2164,8 +2193,8 @@ class CommandTypes(AdminScriptTestCase): def test_base_run_from_argv(self): """ - Test run_from_argv properly terminates even with custom execute() (#19665) - Also test proper traceback display. + Test run_from_argv properly terminates even with custom execute() + (#19665) Also test proper traceback display. """ err = StringIO() command = BaseCommand(stderr=err) @@ -2292,7 +2321,10 @@ class CommandTypes(AdminScriptTestCase): self.assertOutput(err, "No installed app with label 'NOT_AN_APP'.") def test_app_command_some_invalid_app_labels(self): - "User AppCommands can execute when some of the provided app names are invalid" + """ + User AppCommands can execute when some of the provided app names are + invalid + """ args = ["app_command", "auth", "NOT_AN_APP"] out, err = self.run_manage(args) self.assertOutput(err, "No installed app with label 'NOT_AN_APP'.") @@ -2316,7 +2348,10 @@ class CommandTypes(AdminScriptTestCase): self.assertOutput(err, "Enter at least one label") def test_label_command_multiple_label(self): - "User LabelCommands are executed multiple times if multiple labels are provided" + """ + User LabelCommands are executed multiple times if multiple labels are + provided + """ args = ["label_command", "testlabel", "anotherlabel"] out, err = self.run_manage(args) self.assertNoOutput(err) @@ -2558,7 +2593,9 @@ class StartProject(LiveServerTestCase, AdminScriptTestCase): ) def test_invalid_project_name(self): - "Make sure the startproject management command validates a project name" + """ + Make sure the startproject management command validates a project name + """ for bad_name in ("7testproject", "../testproject"): with self.subTest(project_name=bad_name): args = ["startproject", bad_name] @@ -2773,7 +2810,10 @@ class StartProject(LiveServerTestCase, AdminScriptTestCase): self.assertTrue(os.path.exists(os.path.join(testproject_dir, "run.py"))) def test_file_without_extension(self): - "Make sure the startproject management command is able to render custom files" + """ + Make sure the startproject management command is able to render custom + files + """ template_path = os.path.join(custom_templates_dir, "project_template") args = [ "startproject", @@ -2845,7 +2885,8 @@ class StartProject(LiveServerTestCase, AdminScriptTestCase): def test_custom_project_destination_missing(self): """ - Create the directory when the provided destination directory doesn't exist. + Create the directory when the provided destination directory doesn't + exist. """ template_path = os.path.join(custom_templates_dir, "project_template") args = [ diff --git a/tests/admin_utils/test_logentry.py b/tests/admin_utils/test_logentry.py index a800de0fad..37d13d24bd 100644 --- a/tests/admin_utils/test_logentry.py +++ b/tests/admin_utils/test_logentry.py @@ -114,7 +114,8 @@ class LogEntryTests(TestCase): def test_logentry_change_message_localized_datetime_input(self): """ - Localized date/time inputs shouldn't affect changed form data detection. + Localized date/time inputs shouldn't affect changed form data + detection. """ post_data = { "site": self.site.pk, diff --git a/tests/admin_utils/tests.py b/tests/admin_utils/tests.py index 6d165637e7..c90836c6d8 100644 --- a/tests/admin_utils/tests.py +++ b/tests/admin_utils/tests.py @@ -329,7 +329,8 @@ class UtilsTests(SimpleTestCase): self.assertEqual(label_for_field(lambda x: "nothing", Article), "--") self.assertEqual(label_for_field("site_id", Article), "Site id") - # The correct name and attr are returned when `__` is in the field name. + # The correct name and attr are returned when `__` is in the field + # name. self.assertEqual(label_for_field("site__domain", Article), "Site domain") self.assertEqual( label_for_field("site__domain", Article, return_attr=True), diff --git a/tests/admin_views/admin.py b/tests/admin_views/admin.py index 5e14069bae..95d16a4770 100644 --- a/tests/admin_views/admin.py +++ b/tests/admin_views/admin.py @@ -1264,8 +1264,8 @@ site.register(RelatedPrepopulated, search_fields=["name"]) site.register(RelatedWithUUIDPKModel) site.register(ReadOnlyRelatedField, ReadOnlyRelatedFieldAdmin) -# We intentionally register Promo and ChapterXtra1 but not Chapter nor ChapterXtra2. -# That way we cover all four cases: +# We intentionally register Promo and ChapterXtra1 but not Chapter nor +# ChapterXtra2. That way we cover all four cases: # related ForeignKey object registered in admin # related ForeignKey object not registered in admin # related OneToOne object registered in admin diff --git a/tests/admin_views/test_adminsite.py b/tests/admin_views/test_adminsite.py index 7c4841f916..9a0f632db9 100644 --- a/tests/admin_views/test_adminsite.py +++ b/tests/admin_views/test_adminsite.py @@ -30,8 +30,8 @@ urlpatterns = [ @override_settings(ROOT_URLCONF="admin_views.test_adminsite") class SiteEachContextTest(TestCase): """ - Check each_context contains the documented variables and that available_apps context - variable structure is the expected one. + Check each_context contains the documented variables and that + available_apps context variable structure is the expected one. """ request_factory = RequestFactory() diff --git a/tests/admin_views/test_skip_link_to_content.py b/tests/admin_views/test_skip_link_to_content.py index 80cf6e5357..3284b76495 100644 --- a/tests/admin_views/test_skip_link_to_content.py +++ b/tests/admin_views/test_skip_link_to_content.py @@ -36,12 +36,14 @@ class SeleniumTests(AdminSeleniumTestCase): self.assertTrue(skip_link.is_displayed()) # Press RETURN to skip the navbar links (view site / documentation / - # change password / log out) and focus first model in the admin_views list. + # change password / log out) and focus first model in the admin_views + # list. skip_link.send_keys(Keys.RETURN) self.assertFalse(skip_link.is_displayed()) # `skip link` disappear. keys = [Keys.TAB, Keys.TAB] # The 1st TAB is the section title. if self.browser == "firefox": - # For some reason Firefox doesn't focus the section title ('ADMIN_VIEWS'). + # For some reason Firefox doesn't focus the section title + # ('ADMIN_VIEWS'). keys.remove(Keys.TAB) body.send_keys(keys) actors_a_tag = self.selenium.find_element(By.LINK_TEXT, "Actors") @@ -61,7 +63,8 @@ class SeleniumTests(AdminSeleniumTestCase): ) self.assertEqual(self.selenium.switch_to.active_element, actors_a_tag) - # Go to the Actor form and the first input will be focused automatically. + # Go to the Actor form and the first input will be focused + # automatically. with self.wait_page_loaded(): actors_a_tag.send_keys(Keys.RETURN) first_input = self.selenium.find_element(By.ID, "id_name") diff --git a/tests/admin_views/tests.py b/tests/admin_views/tests.py index ba322bd344..8460aa81fc 100644 --- a/tests/admin_views/tests.py +++ b/tests/admin_views/tests.py @@ -250,7 +250,8 @@ class AdminViewBasicTestCase(TestCase): "article_set-INITIAL_FORMS": "3", "article_set-MAX_NUM_FORMS": "0", "article_set-0-id": cls.a1.pk, - # there is no title in database, give one here or formset will fail. + # there is no title in database, give one here or formset will + # fail. "article_set-0-title": "Norske bostaver æøå skaper problemer", "article_set-0-content": "<p>Middle content</p>", "article_set-0-date_0": "2008-03-18", @@ -992,7 +993,8 @@ class AdminViewBasicTest(AdminViewBasicTestCase): response = self.client.get(changelist_url, {"notarealfield": "5"}) self.assertRedirects(response, "%s?e=1" % changelist_url) - # Spanning relationships through a nonexistent related object (Refs #16716) + # Spanning relationships through a nonexistent related object (Refs + # #16716) response = self.client.get(changelist_url, {"notarealfield__whatever": "5"}) self.assertRedirects(response, "%s?e=1" % changelist_url) @@ -1037,9 +1039,9 @@ class AdminViewBasicTest(AdminViewBasicTestCase): def test_named_group_field_choices_change_list(self): """ - Ensures the admin changelist shows correct values in the relevant column - for rows corresponding to instances of a model in which a named group - has been used in the choices option of a field. + Ensures the admin changelist shows correct values in the relevant + column for rows corresponding to instances of a model in which a named + group has been used in the choices option of a field. """ link1 = reverse("admin:admin_views_fabric_change", args=(self.fab1.pk,)) link2 = reverse("admin:admin_views_fabric_change", args=(self.fab2.pk,)) @@ -1262,8 +1264,8 @@ class AdminViewBasicTest(AdminViewBasicTestCase): def test_allowed_filtering_15103(self): """ Regressions test for ticket 15103 - filtering on fields defined in a - ForeignKey 'limit_choices_to' should be allowed, otherwise raw_id_fields - can break. + ForeignKey 'limit_choices_to' should be allowed, otherwise + raw_id_fields can break. """ # Filters should be allowed if they are defined on a ForeignKey # pointing to this model. @@ -1353,7 +1355,8 @@ class AdminViewBasicTest(AdminViewBasicTestCase): response = self.client.get( reverse("admin:admin_views_unchangeableobject_changelist") ) - # Check the format of the shown object -- shouldn't contain a change link + # Check the format of the shown object -- shouldn't contain a change + # link self.assertContains( response, '<th class="field-__str__">%s</th>' % o, html=True ) @@ -1963,7 +1966,8 @@ class AdminJavaScriptTest(TestCase): def test_js_minified_only_if_debug_is_false(self): """ - The minified versions of the JS files are only used when DEBUG is False. + The minified versions of the JS files are only used when DEBUG is + False. """ with override_settings(DEBUG=False): response = self.client.get(reverse("admin:admin_views_section_add")) @@ -2346,7 +2350,8 @@ class AdminViewPermissionsTest(TestCase): self.assertContains(login, ERROR_MESSAGE) new_user = User(username="jondoe", password="secret", email="super@example.com") new_user.save() - # check to ensure if there are multiple email addresses a user doesn't get a 500 + # check to ensure if there are multiple email addresses a user doesn't + # get a 500 login = self.client.post(login_url, self.super_email_login) self.assertContains(login, ERROR_MESSAGE) @@ -2662,7 +2667,8 @@ class AdminViewPermissionsTest(TestCase): ) article_changelist_url = reverse("admin:admin_views_article_changelist") - # add user should not be able to view the list of article or change any of them + # add user should not be able to view the list of article or change any + # of them self.client.force_login(self.adduser) response = self.client.get(article_changelist_url) self.assertEqual(response.status_code, 403) @@ -2744,7 +2750,8 @@ class AdminViewPermissionsTest(TestCase): ) self.client.post(reverse("admin:logout")) - # Test redirection when using row-level change permissions. Refs #11513. + # Test redirection when using row-level change permissions. Refs + # #11513. r1 = RowLevelChangePermissionModel.objects.create(id=1, name="odd id") r2 = RowLevelChangePermissionModel.objects.create(id=2, name="even id") r3 = RowLevelChangePermissionModel.objects.create(id=3, name="odd id mult 3") @@ -3093,7 +3100,8 @@ class AdminViewPermissionsTest(TestCase): def test_history_view(self): """History view should restrict access.""" - # add user should not be able to view the list of article or change any of them + # add user should not be able to view the list of article or change any + # of them self.client.force_login(self.adduser) response = self.client.get( reverse("admin:admin_views_article_history", args=(self.a1.pk,)) @@ -3116,7 +3124,8 @@ class AdminViewPermissionsTest(TestCase): ) self.assertEqual(response.status_code, 200) - # Test redirection when using row-level change permissions. Refs #11513. + # Test redirection when using row-level change permissions. Refs + # #11513. rl1 = RowLevelChangePermissionModel.objects.create(id=1, name="odd id") rl2 = RowLevelChangePermissionModel.objects.create(id=2, name="even id") logins = [ @@ -3180,12 +3189,14 @@ class AdminViewPermissionsTest(TestCase): user has permission to add that related item. """ self.client.force_login(self.adduser) - # The user can't add sections yet, so they shouldn't see the "add section" link. + # The user can't add sections yet, so they shouldn't see the "add + # section" link. url = reverse("admin:admin_views_article_add") add_link_text = "add_id_section" response = self.client.get(url) self.assertNotContains(response, add_link_text) - # Allow the user to add sections too. Now they can see the "add section" link. + # Allow the user to add sections too. Now they can see the "add + # section" link. user = User.objects.get(username="adduser") perm = get_perm(Section, get_permission_codename("add", Section._meta)) user.user_permissions.add(perm) @@ -3313,7 +3324,8 @@ class AdminViewPermissionsTest(TestCase): # Logged in? Redirect. self.client.force_login(self.superuser) response = self.client.get(shortcut_url, follow=False) - # Can't use self.assertRedirects() because User.get_absolute_url() is silly. + # Can't use self.assertRedirects() because User.get_absolute_url() is + # silly. self.assertEqual(response.status_code, 302) # Domain may depend on contrib.sites tests also run self.assertRegex(response.url, "http://(testserver|example.com)/dummy/foo/") @@ -3538,7 +3550,10 @@ class AdminViewsNoUrlTest(TestCase): ) def test_no_standard_modeladmin_urls(self): - """Admin index views don't break when user's ModelAdmin removes standard urls""" + """ + Admin index views don't break when user's ModelAdmin removes standard + urls + """ self.client.force_login(self.changeuser) r = self.client.get(reverse("admin:index")) # we shouldn't get a 500 error caused by a NoReverseMatch @@ -3920,7 +3935,9 @@ class AdminViewStringPrimaryKeyTest(TestCase): self.assertContains(response, "Changed something") def test_get_change_view(self): - "Retrieving the object using urlencoded form of primary key should work" + """ + Retrieving the object using urlencoded form of primary key should work + """ response = self.client.get( reverse( "admin:admin_views_modelwithstringprimarykey_change", args=(self.pk,) @@ -3936,7 +3953,8 @@ class AdminViewStringPrimaryKeyTest(TestCase): response = self.client.get( reverse("admin:admin_views_modelwithstringprimarykey_changelist") ) - # this URL now comes through reverse(), thus url quoting and iri_to_uri encoding + # this URL now comes through reverse(), thus url quoting and iri_to_uri + # encoding pk_final_url = escape(iri_to_uri(quote(self.pk))) change_url = reverse( "admin:admin_views_modelwithstringprimarykey_change", args=("__fk__",) @@ -3976,7 +3994,8 @@ class AdminViewStringPrimaryKeyTest(TestCase): "admin:admin_views_modelwithstringprimarykey_delete", args=(quote(self.pk),) ) response = self.client.get(url) - # this URL now comes through reverse(), thus url quoting and iri_to_uri encoding + # this URL now comes through reverse(), thus url quoting and iri_to_uri + # encoding change_url = reverse( "admin:admin_views_modelwithstringprimarykey_change", args=("__fk__",) ).replace("__fk__", escape(iri_to_uri(quote(self.pk)))) @@ -3984,7 +4003,10 @@ class AdminViewStringPrimaryKeyTest(TestCase): self.assertContains(response, should_contain) def test_url_conflicts_with_add(self): - "A model with a primary key that ends with add or is `add` should be visible" + """ + A model with a primary key that ends with add or is `add` should be + visible + """ add_model = ModelWithStringPrimaryKey.objects.create( pk="i have something to add" ) @@ -4046,7 +4068,9 @@ class AdminViewStringPrimaryKeyTest(TestCase): self.assertContains(response, should_contain) def test_change_view_history_link(self): - """Object history button link should work and contain the pk value quoted.""" + """ + Object history button link should work and contain the pk value quoted. + """ url = reverse( "admin:%s_modelwithstringprimarykey_change" % ModelWithStringPrimaryKey._meta.app_label, @@ -4521,8 +4545,8 @@ class AdminViewListEditable(TestCase): self.assertContains(response, "Unordered object #1") def test_list_editable_action_submit(self): - # List editable changes should not be executed if the action "Go" button is - # used to submit the form. + # List editable changes should not be executed if the action "Go" + # button is used to submit the form. data = { "form-TOTAL_FORMS": "3", "form-INITIAL_FORMS": "3", @@ -4606,9 +4630,8 @@ class AdminViewListEditable(TestCase): self.assertContains(response, '<td class="field-id">%d</td>' % story2.id, 1) def test_pk_hidden_fields_with_list_display_links(self): - """Similarly as test_pk_hidden_fields, but when the hidden pk fields are - referenced in list_display_links. - Refs #12475. + """Similarly as test_pk_hidden_fields, but when the hidden pk fields + are referenced in list_display_links. Refs #12475. """ story1 = OtherStory.objects.create( title="The adventures of Guido", @@ -5478,7 +5501,10 @@ class AdminInlineTests(TestCase): self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1 Updated") def test_char_pk_inline(self): - "A model with a character PK can be saved as inlines. Regression for #10992" + """ + A model with a character PK can be saved as inlines. Regression for + #10992 + """ # First add a new inline self.post_data["doohickey_set-0-code"] = "DH1" self.post_data["doohickey_set-0-name"] = "Doohickey 1" @@ -5513,7 +5539,10 @@ class AdminInlineTests(TestCase): self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1 Updated") def test_integer_pk_inline(self): - "A model with an integer PK can be saved as inlines. Regression for #10992" + """ + A model with an integer PK can be saved as inlines. Regression for + #10992 + """ # First add a new inline self.post_data["whatsit_set-0-index"] = "42" self.post_data["whatsit_set-0-name"] = "Whatsit 1" @@ -6106,7 +6135,8 @@ class SeleniumTests(AdminSeleniumTestCase): self.selenium.get(object_url) self.selenium.find_element(By.ID, "id_name").send_keys(" hello") - # The slugs got prepopulated didn't change since they were originally not empty + # The slugs got prepopulated didn't change since they were originally + # not empty slug1 = self.selenium.find_element(By.ID, "id_slug1").get_attribute("value") slug2 = self.selenium.find_element(By.ID, "id_slug2").get_attribute("value") self.assertEqual(slug1, "this-is-the-main-name-the-best-2012-02-18") @@ -7168,7 +7198,8 @@ class ReadonlyTest(AdminFieldExtractionMixin, TestCase): def test_correct_autoescaping(self): """ - Make sure that non-field readonly elements are properly autoescaped (#24461) + Make sure that non-field readonly elements are properly autoescaped + (#24461) """ section = Section.objects.create(name="<a>evil</a>") response = self.client.get( @@ -7207,7 +7238,8 @@ class LimitChoicesToInAdminTest(TestCase): last_action=datetime.datetime.today() - datetime.timedelta(days=1), ) response = self.client.get(reverse("admin:admin_views_stumpjoke_add")) - # The allowed option should appear twice; the limited option should not appear. + # The allowed option should appear twice; the limited option should not + # appear. self.assertContains(response, threepwood.username, count=2) self.assertNotContains(response, marley.username) diff --git a/tests/admin_widgets/tests.py b/tests/admin_widgets/tests.py index 83f5481832..c47e0e3ec1 100644 --- a/tests/admin_widgets/tests.py +++ b/tests/admin_widgets/tests.py @@ -300,7 +300,8 @@ class AdminFormfieldForDBFieldTests(SimpleTestCase): class AdminFormfieldForDBFieldWithRequestTests(TestDataMixin, TestCase): def test_filter_choices_by_request_user(self): """ - Ensure the user can only see their own cars in the foreign key dropdown. + Ensure the user can only see their own cars in the foreign key + dropdown. """ self.client.force_login(self.superuser) response = self.client.get(reverse("admin:admin_widgets_cartire_add")) @@ -1052,8 +1053,8 @@ class DateTimePickerSeleniumTests(AdminWidgetSeleniumTestCase): def test_calendar_nonday_class(self): """ - Ensure cells that are not days of the month have the `nonday` CSS class. - Refs #4574. + Ensure cells that are not days of the month have the `nonday` CSS + class. Refs #4574. """ from selenium.webdriver.common.by import By @@ -1184,9 +1185,9 @@ class DateTimePickerShortcutsSeleniumTests(AdminWidgetSeleniumTestCase): date/time/datetime picker shortcuts work in the current time zone. Refs #20663. - This test case is fairly tricky, it relies on selenium still running the browser - in the default time zone "America/Chicago" despite `override_settings` changing - the time zone to "Asia/Singapore". + This test case is fairly tricky, it relies on selenium still running + the browser in the default time zone "America/Chicago" despite + `override_settings` changing the time zone to "Asia/Singapore". """ from selenium.webdriver.common.by import By @@ -1502,7 +1503,8 @@ class HorizontalVerticalFilterSeleniumTests(AdminWidgetSeleniumTestCase): self.select_option(from_box, str(self.peter.id)) self.select_option(from_box, str(self.lisa.id)) - # Confirm they're selected after clicking inactive buttons: ticket #26575 + # Confirm they're selected after clicking inactive buttons: ticket + # #26575 self.assertSelectedOptions(from_box, [str(self.peter.id), str(self.lisa.id)]) self.selenium.find_element(By.ID, remove_button).click() self.assertSelectedOptions(from_box, [str(self.peter.id), str(self.lisa.id)]) @@ -1515,7 +1517,8 @@ class HorizontalVerticalFilterSeleniumTests(AdminWidgetSeleniumTestCase): self.select_option(to_box, str(self.jason.id)) self.select_option(to_box, str(self.john.id)) - # Confirm they're selected after clicking inactive buttons: ticket #26575 + # Confirm they're selected after clicking inactive buttons: ticket + # #26575 self.assertSelectedOptions(to_box, [str(self.jason.id), str(self.john.id)]) self.selenium.find_element(By.ID, choose_button).click() self.assertSelectedOptions(to_box, [str(self.jason.id), str(self.john.id)]) diff --git a/tests/aggregation/tests.py b/tests/aggregation/tests.py index 1cf3a8f66e..bd33a532b3 100644 --- a/tests/aggregation/tests.py +++ b/tests/aggregation/tests.py @@ -1321,9 +1321,10 @@ class AggregateTestCase(TestCase): def as_sql(self, compiler, connection, **extra_context): copy = self.copy() - # Most database backends do not support compiling multiple arguments on - # the Max aggregate, and that isn't what is being tested here anyway. To - # avoid errors, the extra argument is just dropped. + # Most database backends do not support compiling multiple + # arguments on the Max aggregate, and that isn't what is being + # tested here anyway. To avoid errors, the extra argument is + # just dropped. copy.set_source_expressions( copy.get_source_expressions()[0:1] + [None, None] ) diff --git a/tests/aggregation_regress/tests.py b/tests/aggregation_regress/tests.py index 9199bf3eba..3e1a6a71f9 100644 --- a/tests/aggregation_regress/tests.py +++ b/tests/aggregation_regress/tests.py @@ -314,7 +314,8 @@ class AggregationTests(TestCase): publisher_id=self.p2.id, rating=3.0, ) - # Different DB backends return different types for the extra select computation + # Different DB backends return different types for the extra select + # computation self.assertIn(obj.manufacture_cost, (11.545, Decimal("11.545"))) # Order of the annotate/extra in the query doesn't matter @@ -335,7 +336,8 @@ class AggregationTests(TestCase): publisher_id=self.p2.id, rating=3.0, ) - # Different DB backends return different types for the extra select computation + # Different DB backends return different types for the extra select + # computation self.assertIn(obj.manufacture_cost, (11.545, Decimal("11.545"))) # Values queries can be combined with annotate and extra @@ -498,8 +500,8 @@ class AggregationTests(TestCase): }, ) - # Regression for #15624 - Missing SELECT columns when using values, annotate - # and aggregate in a single query + # Regression for #15624 - Missing SELECT columns when using values, + # annotate and aggregate in a single query self.assertEqual( Book.objects.annotate(c=Count("authors")).values("c").aggregate(Max("c")), {"c__max": 3}, @@ -935,11 +937,13 @@ class AggregationTests(TestCase): .order_by() .query ) - # There is just one GROUP BY clause (zero commas means at most one clause). + # There is just one GROUP BY clause (zero commas means at most one + # clause). self.assertEqual(qstr[qstr.index("GROUP BY") :].count(", "), 0) def test_duplicate_alias(self): - # Regression for #11256 - duplicating a default alias raises ValueError. + # Regression for #11256 - duplicating a default alias raises + # ValueError. msg = ( "The named annotation 'authors__age__avg' conflicts with " "the default name for another annotation." @@ -1004,7 +1008,8 @@ class AggregationTests(TestCase): def test_reverse_relation_name_conflict(self): # Regression for #11256 - providing an aggregate name - # that conflicts with a reverse-related name on the model raises ValueError + # that conflicts with a reverse-related name on the model raises + # ValueError msg = "The annotation 'book_contact_set' conflicts with a field on the model." with self.assertRaisesMessage(ValueError, msg): Author.objects.annotate(book_contact_set=Avg("friends__age")) @@ -1417,8 +1422,8 @@ class AggregationTests(TestCase): def test_annotate_joins(self): """ The base table's join isn't promoted to LOUTER. This could - cause the query generation to fail if there is an exclude() for fk-field - in the query, too. Refs #19087. + cause the query generation to fail if there is an exclude() for + fk-field in the query, too. Refs #19087. """ qs = Book.objects.annotate(n=Count("pk")) self.assertIs(qs.query.alias_map["aggregation_regress_book"].join_type, None) @@ -1934,8 +1939,8 @@ class JoinPromotionTests(TestCase): Count("alfa__name") ) self.assertIn(" INNER JOIN ", str(qs.query)) - # Also, the existing join is unpromoted when doing filtering for already - # promoted join. + # Also, the existing join is unpromoted when doing filtering for + # already promoted join. qs = Charlie.objects.annotate(Count("alfa__name")).filter( alfa__name__isnull=False ) diff --git a/tests/apps/tests.py b/tests/apps/tests.py index fba9c43a34..0f395b7fc3 100644 --- a/tests/apps/tests.py +++ b/tests/apps/tests.py @@ -388,7 +388,8 @@ class AppsTests(SimpleTestCase): class LazyC(models.Model): pass - # Everything should be loaded - make sure the callback was executed properly. + # Everything should be loaded - make sure the callback was executed + # properly. self.assertEqual(model_classes, [LazyA, LazyB, LazyB, LazyC, LazyA]) @@ -421,7 +422,9 @@ class AppConfigTests(SimpleTestCase): self.assertEqual(ac.path, "foo") def test_dunder_path(self): - """If single element in __path__, use it (in preference to __file__).""" + """ + If single element in __path__, use it (in preference to __file__). + """ ac = AppConfig("label", Stub(__path__=["a"], __file__="b/__init__.py")) self.assertEqual(ac.path, "a") diff --git a/tests/auth_tests/test_auth_backends.py b/tests/auth_tests/test_auth_backends.py index 32fb092cf4..0ba169249b 100644 --- a/tests/auth_tests/test_auth_backends.py +++ b/tests/auth_tests/test_auth_backends.py @@ -457,7 +457,9 @@ class BaseModelBackendTest: PASSWORD_HASHERS=["auth_tests.test_auth_backends.CountingMD5PasswordHasher"] ) def test_authentication_timing(self): - """Hasher is run once regardless of whether the user exists. Refs #20760.""" + """ + Hasher is run once regardless of whether the user exists. Refs #20760. + """ # Re-set the password, because this tests overrides PASSWORD_HASHERS self.user.set_password("test") self.user.save() @@ -875,7 +877,8 @@ class InActiveUserBackendTest(TestCase): class PermissionDeniedBackend: """ - Always raises PermissionDenied in `authenticate`, `has_perm` and `has_module_perms`. + Always raises PermissionDenied in `authenticate`, `has_perm` and + `has_module_perms`. """ def authenticate(self, request, username=None, password=None): @@ -920,7 +923,10 @@ class PermissionDeniedBackendTest(TestCase): @modify_settings(AUTHENTICATION_BACKENDS={"prepend": backend}) def test_permission_denied(self): - "user is not authenticated after a backend raises permission denied #2550" + """ + user is not authenticated after a backend raises permission denied + #2550 + """ self.assertIsNone(authenticate(username="test", password="test")) # user_login_failed signal is sent. self.assertEqual( diff --git a/tests/auth_tests/test_basic.py b/tests/auth_tests/test_basic.py index 8d54e187fc..85155dad90 100644 --- a/tests/auth_tests/test_basic.py +++ b/tests/auth_tests/test_basic.py @@ -112,7 +112,10 @@ class BasicTestCase(TestCase): @override_settings(AUTH_USER_MODEL="badsetting") def test_swappable_user_bad_setting(self): - "The alternate user setting must point to something in the format app.model" + """ + The alternate user setting must point to something in the format + app.model + """ msg = "AUTH_USER_MODEL must be of the form 'app_label.model_name'" with self.assertRaisesMessage(ImproperlyConfigured, msg): get_user_model() diff --git a/tests/auth_tests/test_context_processors.py b/tests/auth_tests/test_context_processors.py index 7e6c6a556e..cebc1108dc 100644 --- a/tests/auth_tests/test_context_processors.py +++ b/tests/auth_tests/test_context_processors.py @@ -145,7 +145,8 @@ class AuthContextProcessorTests(TestCase): # bug #12037 is tested by the {% url %} in the template: self.assertContains(response, "url: /userpage/super/") - # A Q() comparing a user and with another Q() (in an AND or OR fashion). + # A Q() comparing a user and with another Q() (in an AND or OR + # fashion). Q(user=response.context["user"]) & Q(someflag=True) # Tests for user equality. This is hard because User defines diff --git a/tests/auth_tests/test_forms.py b/tests/auth_tests/test_forms.py index ebfaa8b051..735ac1d237 100644 --- a/tests/auth_tests/test_forms.py +++ b/tests/auth_tests/test_forms.py @@ -576,7 +576,8 @@ class AuthenticationFormTest(TestDataMixin, TestCase): ] ) def test_custom_login_allowed_policy(self): - # The user is inactive, but our custom form policy allows them to log in. + # The user is inactive, but our custom form policy allows them to log + # in. data = { "username": "inactive", "password": "password", @@ -1322,9 +1323,9 @@ class PasswordResetFormTest(TestDataMixin, TestCase): def test_save_plaintext_email(self): """ - Test the PasswordResetForm.save() method with no html_email_template_name - parameter passed in. - Test to ensure original behavior is unchanged after the parameter was added. + Test the PasswordResetForm.save() method with no + html_email_template_name parameter passed in. Test to ensure original + behavior is unchanged after the parameter was added. """ (user, username, email) = self.create_dummy_user() form = PasswordResetForm({"email": email}) diff --git a/tests/auth_tests/test_hashers.py b/tests/auth_tests/test_hashers.py index ba7131406b..910238d2f5 100644 --- a/tests/auth_tests/test_hashers.py +++ b/tests/auth_tests/test_hashers.py @@ -379,7 +379,8 @@ class TestUtilsHashPass(SimpleTestCase): # Revert to the old iteration count and ... hasher.iterations = old_iterations - # ... check if the password would get updated to the new iteration count. + # ... check if the password would get updated to the new iteration + # count. self.assertTrue(check_password("letmein", encoded, setter)) self.assertTrue(state["upgraded"]) finally: diff --git a/tests/auth_tests/test_management.py b/tests/auth_tests/test_management.py index 9f12e631cc..0701ac2d68 100644 --- a/tests/auth_tests/test_management.py +++ b/tests/auth_tests/test_management.py @@ -200,7 +200,10 @@ class ChangepasswordManagementCommandTestCase(TestCase): @mock.patch.object(changepassword.Command, "_get_pass", return_value="not qwerty") def test_that_changepassword_command_changes_joes_password(self, mock_get_pass): - "Executing the changepassword management command should change joe's password" + """ + Executing the changepassword management command should change joe's + password + """ self.assertTrue(self.user.check_password("qwerty")) call_command("changepassword", username="joe", stdout=self.stdout) @@ -413,7 +416,10 @@ class CreatesuperuserManagementCommandTestCase(TestCase): @override_settings(AUTH_USER_MODEL="auth_tests.CustomUser") def test_swappable_user_missing_required_field(self): - "A Custom superuser won't be created when a required field isn't provided" + """ + A Custom superuser won't be created when a required field isn't + provided + """ # We can use the management command to create a superuser # We skip validation because the temporary substitution of the # swappable User model messes with validation. diff --git a/tests/auth_tests/test_remote_user.py b/tests/auth_tests/test_remote_user.py index 4b5902b586..c871ea7dc8 100644 --- a/tests/auth_tests/test_remote_user.py +++ b/tests/auth_tests/test_remote_user.py @@ -243,7 +243,8 @@ class RemoteUserTest(TestCase): # Known user authenticates response = self.client.get("/remote_user/", **{self.header: self.known_user}) self.assertEqual(response.context["user"].username, "knownuser") - # During the session, the REMOTE_USER header disappears. Should trigger logout. + # During the session, the REMOTE_USER header disappears. Should trigger + # logout. response = self.client.get("/remote_user/") self.assertTrue(response.context["user"].is_anonymous) # verify the remoteuser middleware will not remove a user @@ -262,7 +263,8 @@ class RemoteUserTest(TestCase): "/remote_user/", **{self.header: self.known_user} ) self.assertEqual(response.context["user"].username, "knownuser") - # During the session, the REMOTE_USER header disappears. Should trigger logout. + # During the session, the REMOTE_USER header disappears. Should trigger + # logout. response = await self.async_client.get("/remote_user/") self.assertTrue(response.context["user"].is_anonymous) # verify the remoteuser middleware will not remove a user diff --git a/tests/auth_tests/test_validators.py b/tests/auth_tests/test_validators.py index fdbf495ff5..2a3a93efe8 100644 --- a/tests/auth_tests/test_validators.py +++ b/tests/auth_tests/test_validators.py @@ -114,7 +114,8 @@ class PasswordValidationTest(SimpleTestCase): help_text = password_validators_help_text_html([AmpersandValidator()]) self.assertEqual(help_text, "<ul><li>Must contain &</li></ul>") - # help_text is marked safe and therefore unchanged by conditional_escape(). + # help_text is marked safe and therefore unchanged by + # conditional_escape(). self.assertEqual(help_text, conditional_escape(help_text)) @override_settings(AUTH_PASSWORD_VALIDATORS=[]) diff --git a/tests/auth_tests/test_views.py b/tests/auth_tests/test_views.py index e6419de8e6..0a4f7d28c8 100644 --- a/tests/auth_tests/test_views.py +++ b/tests/auth_tests/test_views.py @@ -153,8 +153,8 @@ class PasswordResetTest(AuthViewsTestCase): self.assertEqual(len(mail.outbox), 1) self.assertIn("http://", mail.outbox[0].body) self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email) - # optional multipart text/html email has been added. Make sure original, - # default functionality is 100% the same + # optional multipart text/html email has been added. Make sure + # original, default functionality is 100% the same self.assertFalse(mail.outbox[0].message().is_multipart()) def test_extra_email_context(self): @@ -209,8 +209,8 @@ class PasswordResetTest(AuthViewsTestCase): # the colon is interpreted as part of a username for login purposes, # making 'evil.com' the request domain. Since HTTP_HOST is used to # produce a meaningful reset URL, we need to be certain that the - # HTTP_HOST header isn't poisoned. This is done as a check when get_host() - # is invoked, but we check here as a practical consequence. + # HTTP_HOST header isn't poisoned. This is done as a check when + # get_host() is invoked, but we check here as a practical consequence. with self.assertLogs("django.security.DisallowedHost", "ERROR"): response = self.client.post( "/password_reset/", @@ -223,7 +223,10 @@ class PasswordResetTest(AuthViewsTestCase): # Skip any 500 handler action (like sending more mail...) @override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True) def test_poisoned_http_host_admin_site(self): - "Poisoned HTTP_HOST headers can't be used for reset emails on admin views" + """ + Poisoned HTTP_HOST headers can't be used for reset emails on admin + views + """ with self.assertLogs("django.security.DisallowedHost", "ERROR"): response = self.client.post( "/admin_password_reset/", @@ -733,7 +736,8 @@ class SessionAuthenticationTests(AuthViewsTestCase): def test_user_password_change_updates_session(self): """ #21649 - Ensure contrib.auth.views.password_change updates the user's - session auth hash after a password change so the session isn't logged out. + session auth hash after a password change so the session isn't logged + out. """ self.login() original_session_key = self.client.session.session_key @@ -901,8 +905,8 @@ class LoginTest(AuthViewsTestCase): def test_session_key_flushed_on_login(self): """ To avoid reusing another user's session, ensure a new, empty session is - created if the existing session corresponds to a different authenticated - user. + created if the existing session corresponds to a different + authenticated user. """ self.login() original_session_key = self.client.session.session_key diff --git a/tests/backends/base/test_schema.py b/tests/backends/base/test_schema.py index 5409789b13..06a9a0e28a 100644 --- a/tests/backends/base/test_schema.py +++ b/tests/backends/base/test_schema.py @@ -5,7 +5,9 @@ from django.test import SimpleTestCase class SchemaEditorTests(SimpleTestCase): def test_effective_default_callable(self): - """SchemaEditor.effective_default() shouldn't call callable defaults.""" + """ + SchemaEditor.effective_default() shouldn't call callable defaults. + """ class MyStr(str): def __call__(self): diff --git a/tests/backends/postgresql/tests.py b/tests/backends/postgresql/tests.py index 37c5ee562b..c5fa17041c 100644 --- a/tests/backends/postgresql/tests.py +++ b/tests/backends/postgresql/tests.py @@ -373,7 +373,8 @@ class Tests(TestCase): try: # Start a transaction so the isolation level isn't reported as 0. new_connection.set_autocommit(False) - # Check the level on the psycopg connection, not the Django wrapper. + # Check the level on the psycopg connection, not the Django + # wrapper. self.assertEqual( new_connection.connection.isolation_level, IsolationLevel.SERIALIZABLE, diff --git a/tests/backends/tests.py b/tests/backends/tests.py index 172d6b630b..3e708401c5 100644 --- a/tests/backends/tests.py +++ b/tests/backends/tests.py @@ -57,8 +57,8 @@ class DateQuotingTest(TestCase): def test_django_date_extract(self): """ - Test the custom ``django_date_extract method``, in particular against fields - which clash with strings passed to it (e.g. 'day') (#12818). + Test the custom ``django_date_extract method``, in particular against + fields which clash with strings passed to it (e.g. 'day') (#12818). """ updated = datetime.datetime(2010, 2, 20) SchoolClass.objects.create(year=2009, last_updated=updated) @@ -234,7 +234,10 @@ class LongNameTest(TransactionTestCase): @skipUnlessDBFeature("supports_sequence_reset") class SequenceResetTest(TestCase): def test_generic_relation(self): - "Sequence names are correct when resetting generic relations (Ref #13941)" + """ + Sequence names are correct when resetting generic relations (Ref + #13941) + """ # Create an object with a manually specified PK Post.objects.create(id=10, name="1st post", text="hello world") @@ -737,7 +740,8 @@ class FkConstraintsTests(TransactionTestCase): def test_check_constraints(self): """ - Constraint checks should raise an IntegrityError when bad data is in the DB. + Constraint checks should raise an IntegrityError when bad data is in + the DB. """ with transaction.atomic(): # Create an Article. diff --git a/tests/basic/tests.py b/tests/basic/tests.py index f6eabfaed7..f8ec2715f6 100644 --- a/tests/basic/tests.py +++ b/tests/basic/tests.py @@ -179,7 +179,8 @@ class ModelInstanceCreationTests(TestCase): # You can use 'in' to test for membership... self.assertIn(a, Article.objects.all()) - # ... but there will often be more efficient ways if that is all you need: + # ... but there will often be more efficient ways if that is all you + # need: self.assertTrue(Article.objects.filter(id=a.id).exists()) def test_save_primary_with_default(self): @@ -785,7 +786,8 @@ class ManagerTest(SimpleTestCase): It's particularly useful to prevent accidentally leaking new methods into `Manager`. New `QuerySet` methods that should also be copied onto - `Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`. + `Manager` will need to be added to + `ManagerTest.QUERYSET_PROXY_METHODS`. """ self.assertEqual( sorted(BaseManager._get_queryset_methods(models.QuerySet)), diff --git a/tests/cache/tests.py b/tests/cache/tests.py index 2636a7d6ce..0b692194de 100644 --- a/tests/cache/tests.py +++ b/tests/cache/tests.py @@ -156,7 +156,9 @@ class DummyCacheTests(SimpleTestCase): self.assertIsNone(cache.get("key2")) def test_has_key(self): - "The has_key method doesn't ever return True for the dummy cache backend" + """ + The has_key method doesn't ever return True for the dummy cache backend + """ cache.set("hello1", "goodbye1") self.assertIs(cache.has_key("hello1"), False) self.assertIs(cache.has_key("goodbye1"), False) @@ -302,11 +304,10 @@ _caches_setting_base = { def caches_setting_for_tests(base=None, exclude=None, **params): - # `base` is used to pull in the memcached config from the original settings, - # `exclude` is a set of cache names denoting which `_caches_setting_base` keys - # should be omitted. - # `params` are test specific overrides and `_caches_settings_base` is the - # base config for the tests. + # `base` is used to pull in the memcached config from the original + # settings, `exclude` is a set of cache names denoting which + # `_caches_setting_base` keys should be omitted. `params` are test specific + # overrides and `_caches_settings_base` is the base config for the tests. # This results in the following search order: # params -> _caches_setting_base -> base base = base or {} @@ -469,7 +470,8 @@ class BaseCacheTests: self.assertEqual(expensive_calculation.num_runs, 1) def test_cache_write_for_model_instance_with_deferred(self): - # Don't want fields with callable as default to be called on cache write + # Don't want fields with callable as default to be called on cache + # write expensive_calculation.num_runs = 0 Poll.objects.all().delete() Poll.objects.create(question="What?") @@ -493,7 +495,8 @@ class BaseCacheTests: self.assertEqual(expensive_calculation.num_runs, 1) runs_before_cache_read = expensive_calculation.num_runs cache.get("deferred_queryset") - # We only want the default expensive calculation run on creation and set + # We only want the default expensive calculation run on creation and + # set self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read) def test_expiration(self): @@ -1166,7 +1169,8 @@ class BaseCacheTests: @override_settings( CACHES=caches_setting_for_tests( BACKEND="django.core.cache.backends.db.DatabaseCache", - # Spaces are used in the table name to ensure quoting/escaping is working + # Spaces are used in the table name to ensure quoting/escaping is + # working LOCATION="test cache table", ) ) @@ -1256,7 +1260,8 @@ class DBCacheTests(BaseCacheTests, TransactionTestCase): @override_settings( CACHES=caches_setting_for_tests( BACKEND="django.core.cache.backends.db.DatabaseCache", - # Use another table name to avoid the 'table already exists' message. + # Use another table name to avoid the 'table already exists' + # message. LOCATION="createcachetable_dry_run_mode", ) ) @@ -1409,7 +1414,9 @@ class LocMemCacheTests(BaseCacheTests, TestCase): self.assertFalse(bad_obj.locked, "Cache was locked during pickling") def test_incr_decr_timeout(self): - """incr/decr does not modify expiry time (matches memcached behavior)""" + """ + incr/decr does not modify expiry time (matches memcached behavior) + """ key = "value" _key = cache.make_key(key) cache.set(key, 1, timeout=cache.default_timeout * 10) @@ -1560,9 +1567,9 @@ class BaseMemcachedTests(BaseCacheTests): self.assertEqual(cache.get("future_foo"), "bar") def test_memcached_deletes_key_on_failed_set(self): - # By default memcached allows objects up to 1MB. For the cache_db session - # backend to always use the current session, memcached needs to delete - # the old key if it fails to set. + # By default memcached allows objects up to 1MB. For the cache_db + # session backend to always use the current session, memcached needs to + # delete the old key if it fails to set. max_value_length = 2**20 cache.set("small_value", "a") @@ -1577,7 +1584,8 @@ class BaseMemcachedTests(BaseCacheTests): # deleted, so the return/exception behavior for the set() itself is # not important. pass - # small_value should be deleted, or set if configured to accept larger values + # small_value should be deleted, or set if configured to accept larger + # values value = cache.get("small_value") self.assertTrue(value is None or value == large_value) @@ -2642,7 +2650,8 @@ class CacheMiddlewareTest(SimpleTestCase): response = default_view(request, "2") self.assertEqual(response.content, b"Hello World 1") - # Requesting the same view with the explicit cache should yield the same result + # Requesting the same view with the explicit cache should yield the + # same result response = explicit_default_view(request, "3") self.assertEqual(response.content, b"Hello World 1") diff --git a/tests/check_framework/test_security.py b/tests/check_framework/test_security.py index db21f13ea2..d5cc8a9ad6 100644 --- a/tests/check_framework/test_security.py +++ b/tests/check_framework/test_security.py @@ -215,8 +215,8 @@ class CheckStrictTransportSecurityTest(SimpleTestCase): @override_settings(MIDDLEWARE=[], SECURE_HSTS_SECONDS=0) def test_no_sts_no_middleware(self): """ - Don't warn if SECURE_HSTS_SECONDS isn't > 0 and SecurityMiddleware isn't - installed. + Don't warn if SECURE_HSTS_SECONDS isn't > 0 and SecurityMiddleware + isn't installed. """ self.assertEqual(base.check_sts(None), []) diff --git a/tests/check_framework/test_urls.py b/tests/check_framework/test_urls.py index a31c5fd856..a3b219f3a4 100644 --- a/tests/check_framework/test_urls.py +++ b/tests/check_framework/test_urls.py @@ -24,7 +24,8 @@ class CheckUrlConfigTests(SimpleTestCase): @override_settings(ROOT_URLCONF="check_framework.urls.warning_in_include") def test_check_resolver_recursive(self): - # The resolver is checked recursively (examining URL patterns in include()). + # The resolver is checked recursively (examining URL patterns in + # include()). result = check_url_config(None) self.assertEqual(len(result), 1) warning = result[0] diff --git a/tests/contenttypes_tests/test_fields.py b/tests/contenttypes_tests/test_fields.py index 764b9fa7db..c2b12b58bc 100644 --- a/tests/contenttypes_tests/test_fields.py +++ b/tests/contenttypes_tests/test_fields.py @@ -61,7 +61,8 @@ class GenericForeignKeyTests(TestCase): Answer.objects.create(text="answer", question=question) answer = Answer.objects.defer("text").get() old_question_obj = answer.question - # The reverse relation is refreshed even when the text field is deferred. + # The reverse relation is refreshed even when the text field is + # deferred. answer.refresh_from_db() self.assertIsNot(answer.question, old_question_obj) diff --git a/tests/contenttypes_tests/test_views.py b/tests/contenttypes_tests/test_views.py index 8cc11de3cb..bec79105c4 100644 --- a/tests/contenttypes_tests/test_views.py +++ b/tests/contenttypes_tests/test_views.py @@ -63,7 +63,10 @@ class ContentTypesViewsTests(TestCase): Site.objects.clear_cache() def test_shortcut_with_absolute_url(self): - "Can view a shortcut for an Author object that has a get_absolute_url method" + """ + Can view a shortcut for an Author object that has a get_absolute_url + method + """ for obj in Author.objects.all(): with self.subTest(obj=obj): short_url = "/shortcut/%s/%s/" % ( diff --git a/tests/csrf_tests/tests.py b/tests/csrf_tests/tests.py index 765d950b85..91b1046159 100644 --- a/tests/csrf_tests/tests.py +++ b/tests/csrf_tests/tests.py @@ -441,7 +441,8 @@ class CsrfViewMiddlewareTestMixin(CsrfFunctionTestMixin): def test_process_request_csrf_cookie_and_token(self): """ - If both a cookie and a token is present, the middleware lets it through. + If both a cookie and a token is present, the middleware lets it + through. """ req = self._get_POST_request_with_token() mw = CsrfViewMiddleware(post_form_view) @@ -718,14 +719,16 @@ class CsrfViewMiddlewareTestMixin(CsrfFunctionTestMixin): self.assertContains(response, malformed_referer_msg, status_code=403) # missing scheme # >>> urlsplit('//example.com/') - # SplitResult(scheme='', netloc='example.com', path='/', query='', fragment='') + # SplitResult(scheme='', netloc='example.com', path='/', query='', + # fragment='') req.META["HTTP_REFERER"] = "//example.com/" self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) # missing netloc # >>> urlsplit('https://') - # SplitResult(scheme='https', netloc='', path='', query='', fragment='') + # SplitResult(scheme='https', netloc='', path='', query='', + # fragment='') req.META["HTTP_REFERER"] = "https://" self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) diff --git a/tests/custom_lookups/tests.py b/tests/custom_lookups/tests.py index cc6f5c7f8f..6728c49afd 100644 --- a/tests/custom_lookups/tests.py +++ b/tests/custom_lookups/tests.py @@ -518,7 +518,8 @@ class YearLteTests(TestCase): def test_custom_implementation_year_exact(self): try: - # Two ways to add a customized implementation for different backends: + # Two ways to add a customized implementation for different + # backends: # First is MonkeyPatch of the class. def as_custom_sql(self, compiler, connection): lhs_sql, lhs_params = self.process_lhs( diff --git a/tests/custom_managers/tests.py b/tests/custom_managers/tests.py index b84b157da5..3d9485c13b 100644 --- a/tests/custom_managers/tests.py +++ b/tests/custom_managers/tests.py @@ -65,9 +65,11 @@ class CustomManagerTests(TestCase): for manager_name in self.custom_manager_names: with self.subTest(manager_name=manager_name): manager = getattr(Person, manager_name) - # Methods with queryset_only=False are copied even if they are private. + # Methods with queryset_only=False are copied even if they are + # private. manager._optin_private_method() - # Methods with queryset_only=True aren't copied even if they are public. + # Methods with queryset_only=True aren't copied even if they + # are public. msg = ( "%r object has no attribute 'optout_public_method'" % manager.__class__.__name__ diff --git a/tests/datatypes/tests.py b/tests/datatypes/tests.py index fa08ed878d..3469311163 100644 --- a/tests/datatypes/tests.py +++ b/tests/datatypes/tests.py @@ -34,7 +34,8 @@ class DataTypesTestCase(TestCase): self.assertEqual(d2.consumed_at, datetime.datetime(2007, 4, 20, 16, 19, 59)) def test_time_field(self): - # Test for ticket #12059: TimeField wrongly handling datetime.datetime object. + # Test for ticket #12059: TimeField wrongly handling datetime.datetime + # object. d = Donut(name="Apple Fritter") d.baked_time = datetime.datetime( year=2007, month=4, day=20, hour=16, minute=19, second=59 @@ -91,8 +92,8 @@ class DataTypesTestCase(TestCase): @skipIfDBFeature("supports_timezones") def test_error_on_timezone(self): - """Regression test for #8354: the MySQL and Oracle backends should raise - an error if given a timezone-aware datetime object.""" + """Regression test for #8354: the MySQL and Oracle backends should + raise an error if given a timezone-aware datetime object.""" dt = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=datetime.UTC) d = Donut(name="Bear claw", consumed_at=dt) # MySQL backend does not support timezone-aware datetimes. diff --git a/tests/decorators/tests.py b/tests/decorators/tests.py index 1f8d623e02..fce64a8834 100644 --- a/tests/decorators/tests.py +++ b/tests/decorators/tests.py @@ -134,7 +134,8 @@ def simple_dec(func): simple_dec_m = method_decorator(simple_dec) -# For testing method_decorator, two decorators that add an attribute to the function +# For testing method_decorator, two decorators that add an attribute to the +# function def myattr_dec(func): def wrapper(*args, **kwargs): return func(*args, **kwargs) diff --git a/tests/defer/tests.py b/tests/defer/tests.py index 989b5c63d7..c0968080b1 100644 --- a/tests/defer/tests.py +++ b/tests/defer/tests.py @@ -169,7 +169,8 @@ class DeferTests(AssertionMixin, TestCase): # You can retrieve a single column on a base class with no fields Child.objects.create(name="c1", value="foo", related=self.s1) obj = Child.objects.only("name").get(name="c1") - # on an inherited model, its PK is also fetched, hence '3' deferred fields. + # on an inherited model, its PK is also fetched, hence '3' deferred + # fields. self.assert_delayed(obj, 3) self.assertEqual(obj.name, "c1") self.assertEqual(obj.value, "foo") @@ -215,7 +216,8 @@ class BigChildDeferTests(AssertionMixin, TestCase): def test_only_baseclass_when_subclass_has_added_field(self): # You can retrieve a single field on a baseclass obj = BigChild.objects.only("name").get(name="b1") - # when inherited model, its PK is also fetched, hence '4' deferred fields. + # when inherited model, its PK is also fetched, hence '4' deferred + # fields. self.assert_delayed(obj, 4) self.assertEqual(obj.name, "b1") self.assertEqual(obj.value, "foo") diff --git a/tests/defer_regress/tests.py b/tests/defer_regress/tests.py index c45a503630..2089c8603f 100644 --- a/tests/defer_regress/tests.py +++ b/tests/defer_regress/tests.py @@ -96,7 +96,8 @@ class DeferRegressionTest(TestCase): self.assertEqual(results[0].child.name, "c1") self.assertEqual(results[0].second_child.name, "c2") - # Regression for #16409 - make sure defer() and only() work with annotate() + # Regression for #16409 - make sure defer() and only() work with + # annotate() self.assertIsInstance( list(SimpleItem.objects.annotate(Count("feature")).defer("name")), list ) @@ -105,7 +106,8 @@ class DeferRegressionTest(TestCase): ) def test_ticket_16409(self): - # Regression for #16409 - make sure defer() and only() work with annotate() + # Regression for #16409 - make sure defer() and only() work with + # annotate() self.assertIsInstance( list(SimpleItem.objects.annotate(Count("feature")).defer("name")), list ) @@ -161,8 +163,8 @@ class DeferRegressionTest(TestCase): self.assertEqual( len(Item.objects.select_related("one_to_one_item").defer("value")), 1 ) - # Make sure that `only()` doesn't break when we pass in a unique relation, - # rather than a field on the relation. + # Make sure that `only()` doesn't break when we pass in a unique + # relation, rather than a field on the relation. self.assertEqual(len(Item.objects.only("one_to_one_item")), 1) with self.assertNumQueries(1): i = Item.objects.select_related("one_to_one_item")[0] diff --git a/tests/delete/tests.py b/tests/delete/tests.py index 09c9a0a818..7b9dcdb079 100644 --- a/tests/delete/tests.py +++ b/tests/delete/tests.py @@ -517,8 +517,8 @@ class DeletionTests(TestCase): batch_size = connection.ops.bulk_batch_size(["pk"], objs) # The related fetches are done in batches. batches = ceil(len(objs) / batch_size) - # One query for Avatar.objects.all() and then one related fast delete for - # each batch. + # One query for Avatar.objects.all() and then one related fast delete + # for each batch. fetches_to_mem = 1 + batches # The Avatar objects are going to be deleted in batches of # GET_ITERATOR_CHUNK_SIZE. diff --git a/tests/dispatch/tests.py b/tests/dispatch/tests.py index e91d29abdd..745860e83e 100644 --- a/tests/dispatch/tests.py +++ b/tests/dispatch/tests.py @@ -146,7 +146,8 @@ class DispatcherTests(SimpleTestCase): try: self.assertIsNone(wref()) finally: - # Disconnect after reference check since it flushes the tested cache. + # Disconnect after reference check since it flushes the tested + # cache. d_signal.disconnect(receiver_1_arg) def test_multiple_registration(self): diff --git a/tests/expressions_window/tests.py b/tests/expressions_window/tests.py index 4480e15eb4..2d68099545 100644 --- a/tests/expressions_window/tests.py +++ b/tests/expressions_window/tests.py @@ -1293,7 +1293,9 @@ class WindowFunctionTests(TestCase): ) def test_range_unbound(self): - """A query with RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING.""" + """ + A query with RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING. + """ qs = Employee.objects.annotate( sum=Window( expression=Sum("salary"), diff --git a/tests/extra_regress/tests.py b/tests/extra_regress/tests.py index f495b2eccf..786a1b8a9a 100644 --- a/tests/extra_regress/tests.py +++ b/tests/extra_regress/tests.py @@ -307,7 +307,8 @@ class ExtraRegressTests(TestCase): [("first", "second")], ) - # Extra columns after a non-empty values_list() clause are ignored completely + # Extra columns after a non-empty values_list() clause are ignored + # completely self.assertEqual( list( TestObject.objects.values_list("first", "second").extra( diff --git a/tests/field_deconstruction/tests.py b/tests/field_deconstruction/tests.py index 41353cbaaf..8c500349fc 100644 --- a/tests/field_deconstruction/tests.py +++ b/tests/field_deconstruction/tests.py @@ -333,7 +333,8 @@ class FieldDeconstructionTests(SimpleTestCase): def test_foreign_key_swapped(self): with isolate_lru_cache(apps.get_swappable_settings_name): # It doesn't matter that we swapped out user for permission; - # there's no validation. We just want to check the setting stuff works. + # there's no validation. We just want to check the setting stuff + # works. field = models.ForeignKey("auth.Permission", models.CASCADE) name, path, args, kwargs = field.deconstruct() @@ -570,7 +571,8 @@ class FieldDeconstructionTests(SimpleTestCase): def test_many_to_many_field_swapped(self): with isolate_lru_cache(apps.get_swappable_settings_name): # It doesn't matter that we swapped out user for permission; - # there's no validation. We just want to check the setting stuff works. + # there's no validation. We just want to check the setting stuff + # works. field = models.ManyToManyField("auth.Permission") name, path, args, kwargs = field.deconstruct() diff --git a/tests/file_storage/tests.py b/tests/file_storage/tests.py index fde07a955c..a0234b2f9d 100644 --- a/tests/file_storage/tests.py +++ b/tests/file_storage/tests.py @@ -359,7 +359,8 @@ class FileStorageTests(SimpleTestCase): self.storage.url("""a/b\\c.file"""), "/test_media_url/a/b/c.file" ) - # #25905: remove leading slashes from file names to prevent unsafe url output + # #25905: remove leading slashes from file names to prevent unsafe url + # output self.assertEqual(self.storage.url("/evil.com"), "/test_media_url/evil.com") self.assertEqual(self.storage.url(r"\evil.com"), "/test_media_url/evil.com") self.assertEqual(self.storage.url("///evil.com"), "/test_media_url/evil.com") @@ -406,8 +407,8 @@ class FileStorageTests(SimpleTestCase): def test_file_storage_prevents_directory_traversal(self): """ - File storage prevents directory traversal (files can only be accessed if - they're below the storage location). + File storage prevents directory traversal (files can only be accessed + if they're below the storage location). """ with self.assertRaises(SuspiciousFileOperation): self.storage.exists("..") @@ -434,7 +435,8 @@ class FileStorageTests(SimpleTestCase): def test_makedirs_race_handling(self): """ - File storage should be robust against directory creation race conditions. + File storage should be robust against directory creation race + conditions. """ real_makedirs = os.makedirs @@ -824,7 +826,8 @@ class FileFieldStorageTests(TestCase): obj.normal.close() def test_duplicate_filename(self): - # Multiple files with the same name get _(7 random chars) appended to them. + # Multiple files with the same name get _(7 random chars) appended to + # them. tests = [ ("multiple_files", "txt"), ("multiple_files_many_extensions", "tar.gz"), @@ -861,7 +864,8 @@ class FileFieldStorageTests(TestCase): self.assertEqual(names[0], "tests/%s" % filename) self.assertRegex(names[1], "tests/fi_%s.ext" % FILE_SUFFIX_REGEX) - # Testing exception is raised when filename is too short to truncate. + # Testing exception is raised when filename is too short to + # truncate. filename = "short.longext" objs[0].limited_length.save(filename, ContentFile("Same Content")) with self.assertRaisesMessage( diff --git a/tests/file_uploads/tests.py b/tests/file_uploads/tests.py index b6b4bb0c69..6374b4cee6 100644 --- a/tests/file_uploads/tests.py +++ b/tests/file_uploads/tests.py @@ -184,7 +184,8 @@ class FileUploadTests(TestCase): def test_unicode_file_name(self): with sys_tempfile.TemporaryDirectory() as temp_dir: - # This file contains Chinese symbols and an accented char in the name. + # This file contains Chinese symbols and an accented char in the + # name. with open(os.path.join(temp_dir, UNICODE_FILENAME), "w+b") as file1: file1.write(b"b" * (2**10)) file1.seek(0) @@ -372,12 +373,14 @@ class FileUploadTests(TestCase): self.assertEqual(received["file"], "non-printable_chars.txt") def test_dangerous_file_names(self): - """Uploaded file names should be sanitized before ever reaching the view.""" + """ + Uploaded file names should be sanitized before ever reaching the view. + """ # This test simulates possible directory traversal attacks by a - # malicious uploader We have to do some monkeybusiness here to construct - # a malicious payload with an invalid file name (containing os.sep or - # os.pardir). This similar to what an attacker would need to do when - # trying such an attack. + # malicious uploader We have to do some monkeybusiness here to + # construct a malicious payload with an invalid file name (containing + # os.sep or os.pardir). This similar to what an attacker would need to + # do when trying such an attack. payload = client.FakePayload() for i, name in enumerate(CANDIDATE_TRAVERSAL_FILE_NAMES): payload.write( @@ -402,14 +405,18 @@ class FileUploadTests(TestCase): "wsgi.input": payload, } response = self.client.request(**r) - # The filenames should have been sanitized by the time it got to the view. + # The filenames should have been sanitized by the time it got to the + # view. received = response.json() for i, name in enumerate(CANDIDATE_TRAVERSAL_FILE_NAMES): got = received["file%s" % i] self.assertEqual(got, "hax0rd.txt") def test_filename_overflow(self): - """File names over 256 characters (dangerous on some platforms) get fixed up.""" + """ + File names over 256 characters (dangerous on some platforms) get fixed + up. + """ long_str = "f" * 300 cases = [ # field name, filename, expected @@ -741,7 +748,8 @@ class FileUploadTests(TestCase): # Maybe this is a little more complicated that it needs to be; but if # the django.test.client.FakePayload.read() implementation changes then # this test would fail. So we need to know exactly what kind of error - # it raises when there is an attempt to read more than the available bytes: + # it raises when there is an attempt to read more than the available + # bytes: try: client.FakePayload(b"a").read(2) except Exception as err: diff --git a/tests/file_uploads/views.py b/tests/file_uploads/views.py index c1d4ca5358..f4f3b5c514 100644 --- a/tests/file_uploads/views.py +++ b/tests/file_uploads/views.py @@ -135,7 +135,8 @@ def file_upload_interrupted_temporary_file(request): def file_upload_getlist_count(request): """ - Check the .getlist() function to ensure we receive the correct number of files. + Check the .getlist() function to ensure we receive the correct number of + files. """ file_counts = {} diff --git a/tests/files/tests.py b/tests/files/tests.py index 7e365aae39..cfda70053f 100644 --- a/tests/files/tests.py +++ b/tests/files/tests.py @@ -463,8 +463,8 @@ class FileMoveSafeTests(unittest.TestCase): ): with self.assertRaises(OSError): file_move_safe(self.file_a, self.file_b, allow_overwrite=True) - # When copystat() throws PermissionError, copymode() error besides - # PermissionError isn't ignored. + # When copystat() throws PermissionError, copymode() error + # besides PermissionError isn't ignored. with mock.patch( "django.core.files.move.copystat", side_effect=permission_error ): diff --git a/tests/fixtures/tests.py b/tests/fixtures/tests.py index bce55bc355..48e3182b59 100644 --- a/tests/fixtures/tests.py +++ b/tests/fixtures/tests.py @@ -239,7 +239,8 @@ class FixtureLoadingTests(DumpDataAssertMixin, TestCase): '"pub_date": "2006-06-16T13:00:00"}}]', ) - # Specify one model from one application, and an entire other application. + # Specify one model from one application, and an entire other + # application. self._dumpdata_assert( ["fixtures.Category", "sites"], '[{"pk": 1, "model": "fixtures.category", "fields": ' @@ -272,7 +273,8 @@ class FixtureLoadingTests(DumpDataAssertMixin, TestCase): ], ) - # Load fixture 6, JSON file with dynamic ContentType fields. Testing ManyToOne. + # Load fixture 6, JSON file with dynamic ContentType fields. Testing + # ManyToOne. management.call_command("loaddata", "fixture6.json", verbosity=0) self.assertQuerySetEqual( Tag.objects.all(), @@ -284,7 +286,8 @@ class FixtureLoadingTests(DumpDataAssertMixin, TestCase): ordered=False, ) - # Load fixture 7, XML file with dynamic ContentType fields. Testing ManyToOne. + # Load fixture 7, XML file with dynamic ContentType fields. Testing + # ManyToOne. management.call_command("loaddata", "fixture7.xml", verbosity=0) self.assertQuerySetEqual( Tag.objects.all(), @@ -298,7 +301,8 @@ class FixtureLoadingTests(DumpDataAssertMixin, TestCase): ordered=False, ) - # Load fixture 8, JSON file with dynamic Permission fields. Testing ManyToMany. + # Load fixture 8, JSON file with dynamic Permission fields. Testing + # ManyToMany. management.call_command("loaddata", "fixture8.json", verbosity=0) self.assertQuerySetEqual( Visa.objects.all(), @@ -312,7 +316,8 @@ class FixtureLoadingTests(DumpDataAssertMixin, TestCase): ordered=False, ) - # Load fixture 9, XML file with dynamic Permission fields. Testing ManyToMany. + # Load fixture 9, XML file with dynamic Permission fields. Testing + # ManyToMany. management.call_command("loaddata", "fixture9.xml", verbosity=0) self.assertQuerySetEqual( Visa.objects.all(), @@ -344,7 +349,8 @@ class FixtureLoadingTests(DumpDataAssertMixin, TestCase): '{"name": "Music for all ages", "authors": [3, 1]}}]', ) - # But you can get natural keys if you ask for them and they are available + # But you can get natural keys if you ask for them and they are + # available self._dumpdata_assert( ["fixtures.book"], '[{"pk": 1, "model": "fixtures.book", "fields": ' @@ -548,7 +554,8 @@ class FixtureLoadingTests(DumpDataAssertMixin, TestCase): exclude_list=["fixtures.Article", "fixtures.Book"], ) - # Excluding sites and fixtures.Article/Book should only leave fixtures.Category + # Excluding sites and fixtures.Article/Book should only leave + # fixtures.Category self._dumpdata_assert( ["sites", "fixtures"], '[{"pk": 1, "model": "fixtures.category", "fields": ' @@ -852,7 +859,8 @@ class FixtureLoadingTests(DumpDataAssertMixin, TestCase): self.assertEqual(Article.objects.get().headline, "Django pets kitten") def test_compressed_specified_loading(self): - # Load fixture 5 (compressed), using format *and* compression specification + # Load fixture 5 (compressed), using format *and* compression + # specification management.call_command("loaddata", "fixture5.json.zip", verbosity=0) self.assertEqual( Article.objects.get().headline, diff --git a/tests/fixtures_regress/tests.py b/tests/fixtures_regress/tests.py index 96d33e2df7..999555effe 100644 --- a/tests/fixtures_regress/tests.py +++ b/tests/fixtures_regress/tests.py @@ -307,8 +307,9 @@ class TestFixtures(TestCase): def test_empty(self): """ - Test for ticket #18213 -- Loading a fixture file with no data output a warning. - Previously empty fixture raises an error exception, see ticket #4371. + Test for ticket #18213 -- Loading a fixture file with no data output a + warning. Previously empty fixture raises an error exception, see ticket + #4371. """ msg = "No fixture data found for 'empty'. (File format may be invalid.)" with self.assertWarnsMessage(RuntimeWarning, msg): @@ -598,7 +599,8 @@ class TestFixtures(TestCase): def test_fixture_dirs_with_default_fixture_path(self): """ settings.FIXTURE_DIRS cannot contain a default fixtures directory - for application (app/fixtures) in order to avoid repeated fixture loading. + for application (app/fixtures) in order to avoid repeated fixture + loading. """ msg = ( "'%s' is a default fixture directory for the '%s' app " @@ -612,7 +614,8 @@ class TestFixtures(TestCase): def test_fixture_dirs_with_default_fixture_path_as_pathlib(self): """ settings.FIXTURE_DIRS cannot contain a default fixtures directory - for application (app/fixtures) in order to avoid repeated fixture loading. + for application (app/fixtures) in order to avoid repeated fixture + loading. """ msg = ( "'%s' is a default fixture directory for the '%s' app " @@ -686,7 +689,8 @@ class NaturalKeyFixtureTests(TestCase): def test_nk_on_serialize(self): """ - Natural key requirements are taken into account when serializing models. + Natural key requirements are taken into account when serializing + models. """ management.call_command( "loaddata", @@ -869,7 +873,8 @@ class M2MNaturalKeyFixtureTests(TestCase): def test_dependency_sorting_m2m_simple(self): """ - M2M relations without explicit through models SHOULD count as dependencies + M2M relations without explicit through models SHOULD count as + dependencies Regression test for bugs that could be caused by flawed fixes to #14226, namely if M2M checks are removed from sort_dependencies @@ -908,7 +913,8 @@ class M2MNaturalKeyFixtureTests(TestCase): def test_dependency_sorting_m2m_complex_circular_1(self): """ - Circular M2M relations with explicit through models should be serializable + Circular M2M relations with explicit through models should be + serializable """ A, B, C, AtoB, BtoC, CtoA = ( M2MComplexCircular1A, @@ -929,8 +935,9 @@ class M2MNaturalKeyFixtureTests(TestCase): def test_dependency_sorting_m2m_complex_circular_2(self): """ - Circular M2M relations with explicit through models should be serializable - This test tests the circularity with explicit natural_key.dependencies + Circular M2M relations with explicit through models should be + serializable This test tests the circularity with explicit + natural_key.dependencies """ sorted_deps = serializers.sort_dependencies( [ @@ -945,7 +952,8 @@ class M2MNaturalKeyFixtureTests(TestCase): def test_dump_and_load_m2m_simple(self): """ - Test serializing and deserializing back models with simple M2M relations + Test serializing and deserializing back models with simple M2M + relations """ a = M2MSimpleA.objects.create(data="a") b1 = M2MSimpleB.objects.create(data="b1") diff --git a/tests/flatpages_tests/test_csrf.py b/tests/flatpages_tests/test_csrf.py index 62ac5f9a14..702b0c5614 100644 --- a/tests/flatpages_tests/test_csrf.py +++ b/tests/flatpages_tests/test_csrf.py @@ -70,7 +70,10 @@ class FlatpageCSRFTests(TestCase): self.client = Client(enforce_csrf_checks=True) def test_view_flatpage(self): - "A flatpage can be served through a view, even when the middleware is in use" + """ + A flatpage can be served through a view, even when the middleware is in + use + """ response = self.client.get("/flatpage_root/flatpage/") self.assertContains(response, "<p>Isn't it flat!</p>") diff --git a/tests/flatpages_tests/test_middleware.py b/tests/flatpages_tests/test_middleware.py index 581947e9f6..5d779c656c 100644 --- a/tests/flatpages_tests/test_middleware.py +++ b/tests/flatpages_tests/test_middleware.py @@ -69,7 +69,10 @@ class TestDataMixin: ) class FlatpageMiddlewareTests(TestDataMixin, TestCase): def test_view_flatpage(self): - "A flatpage can be served through a view, even when the middleware is in use" + """ + A flatpage can be served through a view, even when the middleware is in + use + """ response = self.client.get("/flatpage_root/flatpage/") self.assertContains(response, "<p>Isn't it flat!</p>") @@ -161,7 +164,10 @@ class FlatpageMiddlewareAppendSlashTests(TestDataMixin, TestCase): self.assertEqual(response.status_code, 404) def test_redirect_fallback_flatpage(self): - "A flatpage can be served by the fallback middleware and should add a slash" + """ + A flatpage can be served by the fallback middleware and should add a + slash + """ response = self.client.get("/flatpage") self.assertRedirects(response, "/flatpage/", status_code=301) @@ -193,7 +199,10 @@ class FlatpageMiddlewareAppendSlashTests(TestDataMixin, TestCase): ) def test_redirect_fallback_flatpage_root(self): - "A flatpage at / should not cause a redirect loop when APPEND_SLASH is set" + """ + A flatpage at / should not cause a redirect loop when APPEND_SLASH is + set + """ fp = FlatPage.objects.create( url="/", title="Root", diff --git a/tests/flatpages_tests/test_templatetags.py b/tests/flatpages_tests/test_templatetags.py index eb36ee375b..f21940db79 100644 --- a/tests/flatpages_tests/test_templatetags.py +++ b/tests/flatpages_tests/test_templatetags.py @@ -50,7 +50,10 @@ class FlatpageTemplateTagTests(TestCase): cls.fp4.sites.add(cls.site1) def test_get_flatpages_tag(self): - "The flatpage template tag retrieves unregistered prefixed flatpages by default" + """ + The flatpage template tag retrieves unregistered prefixed flatpages by + default + """ out = Template( "{% load flatpages %}" "{% get_flatpages as flatpages %}" @@ -75,7 +78,10 @@ class FlatpageTemplateTagTests(TestCase): self.assertEqual(out, "A Flatpage,A Nested Flatpage,") def test_get_flatpages_tag_for_user(self): - "The flatpage template tag retrieves all flatpages for an authenticated user" + """ + The flatpage template tag retrieves all flatpages for an authenticated + user + """ me = User.objects.create_user("testuser", "test@example.com", "s3krit") out = Template( "{% load flatpages %}" @@ -89,7 +95,10 @@ class FlatpageTemplateTagTests(TestCase): ) def test_get_flatpages_with_prefix(self): - "The flatpage template tag retrieves unregistered prefixed flatpages by default" + """ + The flatpage template tag retrieves unregistered prefixed flatpages by + default + """ out = Template( "{% load flatpages %}" "{% get_flatpages '/location/' as location_flatpages %}" diff --git a/tests/foreign_object/models/article.py b/tests/foreign_object/models/article.py index dedeb8fb2d..276296c8d4 100644 --- a/tests/foreign_object/models/article.py +++ b/tests/foreign_object/models/article.py @@ -28,8 +28,8 @@ class ColConstraint: class ActiveTranslationField(models.ForeignObject): """ - This field will allow querying and fetching the currently active translation - for Article from ArticleTranslation. + This field will allow querying and fetching the currently active + translation for Article from ArticleTranslation. """ requires_unique_target = False diff --git a/tests/foreign_object/tests.py b/tests/foreign_object/tests.py index 696dfcd5fd..b4072d500d 100644 --- a/tests/foreign_object/tests.py +++ b/tests/foreign_object/tests.py @@ -72,7 +72,8 @@ class MultiColumnFKTests(TestCase): getattr(membership, "person") def test_reverse_query_returns_correct_result(self): - # Creating a valid membership because it has the same country has the person + # Creating a valid membership because it has the same country has the + # person Membership.objects.create( membership_country_id=self.usa.id, person_id=self.bob.id, diff --git a/tests/forms_tests/field_tests/test_booleanfield.py b/tests/forms_tests/field_tests/test_booleanfield.py index d6b5121b10..f1c06c3c01 100644 --- a/tests/forms_tests/field_tests/test_booleanfield.py +++ b/tests/forms_tests/field_tests/test_booleanfield.py @@ -50,7 +50,8 @@ class BooleanFieldTest(SimpleTestCase): self.assertTrue(f.has_changed(False, "on")) self.assertFalse(f.has_changed(True, "on")) self.assertTrue(f.has_changed(True, "")) - # Initial value may have mutated to a string due to show_hidden_initial (#19537) + # Initial value may have mutated to a string due to show_hidden_initial + # (#19537) self.assertTrue(f.has_changed("False", "on")) # HiddenInput widget sends string values for boolean but doesn't clean # them in value_from_datadict. diff --git a/tests/forms_tests/field_tests/test_charfield.py b/tests/forms_tests/field_tests/test_charfield.py index 2c3f9b7ebe..e1d89523f8 100644 --- a/tests/forms_tests/field_tests/test_charfield.py +++ b/tests/forms_tests/field_tests/test_charfield.py @@ -80,7 +80,8 @@ class CharFieldTest(FormFieldAssertionsMixin, SimpleTestCase): minlength/maxlength if min_length/max_length are defined on the field and the widget is not hidden. """ - # Return an empty dictionary if max_length and min_length are both None. + # Return an empty dictionary if max_length and min_length are both + # None. f = CharField() self.assertEqual(f.widget_attrs(TextInput()), {}) self.assertEqual(f.widget_attrs(Textarea()), {}) diff --git a/tests/forms_tests/field_tests/test_datefield.py b/tests/forms_tests/field_tests/test_datefield.py index 65ac76319d..a8f39aa8e8 100644 --- a/tests/forms_tests/field_tests/test_datefield.py +++ b/tests/forms_tests/field_tests/test_datefield.py @@ -215,6 +215,6 @@ class DateFieldTest(SimpleTestCase): try: f.strptime("31 мая 2011", "%d-%b-%y") except Exception as e: - # assertIsInstance or assertRaises cannot be used because UnicodeEncodeError - # is a subclass of ValueError + # assertIsInstance or assertRaises cannot be used because + # UnicodeEncodeError is a subclass of ValueError self.assertEqual(e.__class__, ValueError) diff --git a/tests/forms_tests/field_tests/test_filefield.py b/tests/forms_tests/field_tests/test_filefield.py index 9744981471..d407df99c9 100644 --- a/tests/forms_tests/field_tests/test_filefield.py +++ b/tests/forms_tests/field_tests/test_filefield.py @@ -105,8 +105,8 @@ class FileFieldTest(SimpleTestCase): # A file was not uploaded, but there is initial data self.assertFalse(f.has_changed("resume.txt", None)) - # A file was uploaded and there is initial data (file identity is not dealt - # with here) + # A file was uploaded and there is initial data (file identity is not + # dealt with here) self.assertTrue( f.has_changed( "resume.txt", {"filename": "resume.txt", "content": "My resume"} diff --git a/tests/forms_tests/field_tests/test_typedchoicefield.py b/tests/forms_tests/field_tests/test_typedchoicefield.py index 52a83eca37..3537623272 100644 --- a/tests/forms_tests/field_tests/test_typedchoicefield.py +++ b/tests/forms_tests/field_tests/test_typedchoicefield.py @@ -19,13 +19,15 @@ class TypedChoiceFieldTest(SimpleTestCase): self.assertEqual(1.0, f.clean("1")) def test_typedchoicefield_3(self): - # This can also cause weirdness: be careful (bool(-1) == True, remember) + # This can also cause weirdness: be careful (bool(-1) == True, + # remember) f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=bool) self.assertTrue(f.clean("-1")) def test_typedchoicefield_4(self): - # Even more weirdness: if you have a valid choice but your coercion function - # can't coerce, you'll still get a validation error. Don't do this! + # Even more weirdness: if you have a valid choice but your coercion + # function can't coerce, you'll still get a validation error. Don't do + # this! f = TypedChoiceField(choices=[("A", "A"), ("B", "B")], coerce=int) msg = "'Select a valid choice. B is not one of the available choices.'" with self.assertRaisesMessage(ValidationError, msg): @@ -40,7 +42,8 @@ class TypedChoiceFieldTest(SimpleTestCase): choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False ) self.assertEqual("", f.clean("")) - # If you want cleaning an empty value to return a different type, tell the field + # If you want cleaning an empty value to return a different type, tell + # the field def test_typedchoicefield_6(self): f = TypedChoiceField( diff --git a/tests/forms_tests/field_tests/test_typedmultiplechoicefield.py b/tests/forms_tests/field_tests/test_typedmultiplechoicefield.py index 6810f1ed19..e874e192d8 100644 --- a/tests/forms_tests/field_tests/test_typedmultiplechoicefield.py +++ b/tests/forms_tests/field_tests/test_typedmultiplechoicefield.py @@ -19,7 +19,8 @@ class TypedMultipleChoiceFieldTest(SimpleTestCase): self.assertEqual([1.0], f.clean(["1"])) def test_typedmultiplechoicefield_3(self): - # This can also cause weirdness: be careful (bool(-1) == True, remember) + # This can also cause weirdness: be careful (bool(-1) == True, + # remember) f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=bool) self.assertEqual([True], f.clean(["-1"])) @@ -31,8 +32,9 @@ class TypedMultipleChoiceFieldTest(SimpleTestCase): f.clean(["1", "2"]) def test_typedmultiplechoicefield_5(self): - # Even more weirdness: if you have a valid choice but your coercion function - # can't coerce, you'll still get a validation error. Don't do this! + # Even more weirdness: if you have a valid choice but your coercion + # function can't coerce, you'll still get a validation error. Don't do + # this! f = TypedMultipleChoiceField(choices=[("A", "A"), ("B", "B")], coerce=int) msg = "'Select a valid choice. B is not one of the available choices.'" with self.assertRaisesMessage(ValidationError, msg): @@ -49,7 +51,8 @@ class TypedMultipleChoiceFieldTest(SimpleTestCase): self.assertEqual([], f.clean([])) def test_typedmultiplechoicefield_7(self): - # If you want cleaning an empty value to return a different type, tell the field + # If you want cleaning an empty value to return a different type, tell + # the field f = TypedMultipleChoiceField( choices=[(1, "+1"), (-1, "-1")], coerce=int, diff --git a/tests/forms_tests/tests/test_forms.py b/tests/forms_tests/tests/test_forms.py index ae19ef1d0c..5b0c4b9a04 100644 --- a/tests/forms_tests/tests/test_forms.py +++ b/tests/forms_tests/tests/test_forms.py @@ -81,9 +81,9 @@ class MultiValueDictLike(dict): class FormsTestCase(SimpleTestCase): - # A Form is a collection of Fields. It knows how to validate a set of data and it - # knows how to render itself in a couple of default ways (e.g., an HTML table). - # You can pass it data in __init__(), as a dictionary. + # A Form is a collection of Fields. It knows how to validate a set of data + # and it knows how to render itself in a couple of default ways (e.g., an + # HTML table). You can pass it data in __init__(), as a dictionary. def test_form(self): # Pass a dictionary to a Form's __init__(). @@ -268,9 +268,10 @@ aria-describedby="id_birthday_error"> self.assertIs(p.files, files) def test_unbound_form(self): - # If you don't pass any values to the Form's __init__(), or if you pass None, - # the Form will be considered unbound and won't do any validation. Form.errors - # will be an empty dictionary *but* Form.is_valid() will return False. + # If you don't pass any values to the Form's __init__(), or if you pass + # None, the Form will be considered unbound and won't do any + # validation. Form.errors will be an empty dictionary *but* + # Form.is_valid() will return False. p = Person() self.assertFalse(p.is_bound) self.assertEqual(p.errors, {}) @@ -419,10 +420,10 @@ aria-describedby="id_birthday_error"> ) def test_cleaned_data_only_fields(self): - # cleaned_data will always *only* contain a key for fields defined in the - # Form, even if you pass extra data when you define the Form. In this - # example, we pass a bunch of extra fields to the form constructor, - # but cleaned_data contains only the form's fields. + # cleaned_data will always *only* contain a key for fields defined in + # the Form, even if you pass extra data when you define the Form. In + # this example, we pass a bunch of extra fields to the form + # constructor, but cleaned_data contains only the form's fields. data = { "first_name": "John", "last_name": "Lennon", @@ -511,8 +512,8 @@ aria-describedby="id_birthday_error"> ) def test_auto_id_true(self): - # If auto_id is any True value whose str() does not contain '%s', the "id" - # attribute will be the name of the field. + # If auto_id is any True value whose str() does not contain '%s', the + # "id" attribute will be the name of the field. p = Person(auto_id=True) self.assertHTMLEqual( p.as_ul(), @@ -525,8 +526,8 @@ aria-describedby="id_birthday_error"> ) def test_auto_id_false(self): - # If auto_id is any False value, an "id" attribute won't be output unless it - # was manually entered. + # If auto_id is any False value, an "id" attribute won't be output + # unless it was manually entered. p = Person(auto_id=False) self.assertHTMLEqual( p.as_ul(), @@ -536,8 +537,9 @@ aria-describedby="id_birthday_error"> ) def test_id_on_field(self): - # In this example, auto_id is False, but the "id" attribute for the "first_name" - # field is given. Also note that field gets a <label>, while the others don't. + # In this example, auto_id is False, but the "id" attribute for the + # "first_name" field is given. Also note that field gets a <label>, + # while the others don't. p = PersonNew(auto_id=False) self.assertHTMLEqual( p.as_ul(), @@ -548,8 +550,8 @@ aria-describedby="id_birthday_error"> ) def test_auto_id_on_form_and_field(self): - # If the "id" attribute is specified in the Form and auto_id is True, the "id" - # attribute in the Form gets precedence. + # If the "id" attribute is specified in the Form and auto_id is True, + # the "id" attribute in the Form gets precedence. p = PersonNew(auto_id=True) self.assertHTMLEqual( p.as_ul(), @@ -636,8 +638,8 @@ aria-describedby="id_birthday_error"> '<textarea name="message" rows="10" cols="40" required></textarea>', ) - # as_textarea(), as_text() and as_hidden() are shortcuts for changing the output - # widget type: + # as_textarea(), as_text() and as_hidden() are shortcuts for changing + # the output widget type: self.assertHTMLEqual( f["subject"].as_textarea(), '<textarea name="subject" rows="10" cols="40" required></textarea>', @@ -660,8 +662,8 @@ aria-describedby="id_birthday_error"> '<textarea name="message" rows="80" cols="20" required></textarea>', ) - # Instance-level attrs are *not* carried over to as_textarea(), as_text() and - # as_hidden(): + # Instance-level attrs are *not* carried over to as_textarea(), + # as_text() and as_hidden(): self.assertHTMLEqual( f["message"].as_text(), '<input type="text" name="message" required>' ) @@ -702,8 +704,9 @@ aria-describedby="id_birthday_error"> </select>""", ) - # A subtlety: If one of the choices' value is the empty string and the form is - # unbound, then the <option> for the empty-string choice will get selected. + # A subtlety: If one of the choices' value is the empty string and the + # form is unbound, then the <option> for the empty-string choice will + # get selected. class FrameworkForm(Form): name = CharField() language = ChoiceField( @@ -745,9 +748,9 @@ aria-describedby="id_birthday_error"> </select>""", ) - # When passing a custom widget instance to ChoiceField, note that setting - # 'choices' on the widget is meaningless. The widget will use the choices - # defined on the Field, not the ones defined on the Widget. + # When passing a custom widget instance to ChoiceField, note that + # setting 'choices' on the widget is meaningless. The widget will use + # the choices defined on the Field, not the ones defined on the Widget. class FrameworkForm(Form): name = CharField() language = ChoiceField( @@ -1002,7 +1005,8 @@ aria-describedby="id_birthday_error"> self.assertHTMLEqual(str(fields[0]), '<option value="john">John</option>') def test_form_with_noniterable_boundfield(self): - # You can iterate over any BoundField, not just those with widget=RadioSelect. + # You can iterate over any BoundField, not just those with + # widget=RadioSelect. class BeatleForm(Form): name = CharField() @@ -1049,7 +1053,8 @@ aria-describedby="id_birthday_error"> self.assertIs(bool(TestForm()["name"]), True) def test_forms_with_multiple_choice(self): - # MultipleChoiceField is a special case, as its data is required to be a list: + # MultipleChoiceField is a special case, as its data is required to be + # a list: class SongForm(Form): name = CharField() composers = MultipleChoiceField() @@ -1226,9 +1231,9 @@ aria-describedby="id_birthday_error"> choices=[("J", "John Lennon"), ("P", "Paul McCartney")] ) - # MultipleChoiceField rendered as_hidden() is a special case. Because it can - # have multiple values, its as_hidden() renders multiple <input type="hidden"> - # tags. + # MultipleChoiceField rendered as_hidden() is a special case. Because + # it can have multiple values, its as_hidden() renders multiple <input + # type="hidden"> tags. f = SongForm({"name": "Yesterday", "composers": ["P"]}, auto_id=False) self.assertHTMLEqual( f["composers"].as_hidden(), @@ -1260,7 +1265,8 @@ aria-describedby="id_birthday_error"> ) def test_multiple_choice_checkbox(self): - # MultipleChoiceField can also be used with the CheckboxSelectMultiple widget. + # MultipleChoiceField can also be used with the CheckboxSelectMultiple + # widget. f = SongForm(auto_id=False) self.assertHTMLEqual( str(f["composers"]), @@ -1299,9 +1305,9 @@ aria-describedby="id_birthday_error"> ) def test_checkbox_auto_id(self): - # Regarding auto_id, CheckboxSelectMultiple is a special case. Each checkbox - # gets a distinct ID, formed by appending an underscore plus the checkbox's - # zero-based index. + # Regarding auto_id, CheckboxSelectMultiple is a special case. Each + # checkbox gets a distinct ID, formed by appending an underscore plus + # the checkbox's zero-based index. class SongForm(Form): name = CharField() composers = MultipleChoiceField( @@ -1360,7 +1366,8 @@ aria-describedby="id_birthday_error"> widget=CheckboxSelectMultiple, ) - # The MultipleHiddenInput widget renders multiple values as hidden fields. + # The MultipleHiddenInput widget renders multiple values as hidden + # fields. class SongFormHidden(Form): name = CharField() composers = MultipleChoiceField( @@ -1379,8 +1386,8 @@ aria-describedby="id_birthday_error"> <input type="hidden" name="composers" value="P"></li>""", ) - # When using CheckboxSelectMultiple, the framework expects a list of input and - # returns a list of input. + # When using CheckboxSelectMultiple, the framework expects a list of + # input and returns a list of input. f = SongForm({"name": "Yesterday"}, auto_id=False) self.assertEqual(f.errors["composers"], ["This field is required."]) f = SongForm({"name": "Yesterday", "composers": ["J"]}, auto_id=False) @@ -1780,9 +1787,9 @@ aria-describedby="id_birthday_error"> ) def test_dynamic_construction(self): - # It's possible to construct a Form dynamically by adding to the self.fields - # dictionary in __init__(). Don't forget to call Form.__init__() within the - # subclass' __init__(). + # It's possible to construct a Form dynamically by adding to the + # self.fields dictionary in __init__(). Don't forget to call + # Form.__init__() within the subclass' __init__(). class Person(Form): first_name = CharField() last_name = CharField() @@ -1804,8 +1811,8 @@ aria-describedby="id_birthday_error"> """, ) - # Instances of a dynamic Form do not persist fields from one Form instance to - # the next. + # Instances of a dynamic Form do not persist fields from one Form + # instance to the next. class MyForm(Form): def __init__(self, data=None, auto_id=False, field_list=[]): Form.__init__(self, data, auto_id=auto_id) @@ -1869,8 +1876,8 @@ aria-describedby="id_birthday_error"> """, ) - # Similarly, changes to field attributes do not persist from one Form instance - # to the next. + # Similarly, changes to field attributes do not persist from one Form + # instance to the next. class Person(Form): first_name = CharField(required=False) last_name = CharField(required=False) @@ -1981,10 +1988,10 @@ aria-describedby="id_birthday_error"> ) def test_hidden_widget(self): - # HiddenInput widgets are displayed differently in the as_table(), as_ul()) - # and as_p() output of a Form -- their verbose names are not displayed, and a - # separate row is not displayed. They're displayed in the last row of the - # form, directly after that row's form element. + # HiddenInput widgets are displayed differently in the as_table(), + # as_ul()) and as_p() output of a Form -- their verbose names are not + # displayed, and a separate row is not displayed. They're displayed in + # the last row of the form, directly after that row's form element. class Person(Form): first_name = CharField() last_name = CharField() @@ -2030,7 +2037,8 @@ aria-describedby="id_birthday_error"> 'type="hidden" name="hidden_text"></div>', ) - # With auto_id set, a HiddenInput still gets an ID, but it doesn't get a label. + # With auto_id set, a HiddenInput still gets an ID, but it doesn't get + # a label. p = Person(auto_id="id_%s") self.assertHTMLEqual( p.as_table(), @@ -2072,10 +2080,10 @@ aria-describedby="id_birthday_error"> '<input type="hidden" name="hidden_text" id="id_hidden_text"></div>', ) - # If a field with a HiddenInput has errors, the as_table() and as_ul() output - # will include the error message(s) with the text "(Hidden field [fieldname]) " - # prepended. This message is displayed at the top of the output, regardless of - # its field's order in the form. + # If a field with a HiddenInput has errors, the as_table() and as_ul() + # output will include the error message(s) with the text "(Hidden field + # [fieldname]) " prepended. This message is displayed at the top of the + # output, regardless of its field's order in the form. p = Person( {"first_name": "John", "last_name": "Lennon", "birthday": "1940-10-9"}, auto_id=False, @@ -2159,7 +2167,8 @@ aria-describedby="id_birthday_error"> ) def test_field_order(self): - # A Form's fields are displayed in the same order in which they were defined. + # A Form's fields are displayed in the same order in which they were + # defined. class TestForm(Form): field1 = CharField() field2 = CharField() @@ -2232,10 +2241,10 @@ aria-describedby="id_birthday_error"> ) def test_form_html_attributes(self): - # Some Field classes have an effect on the HTML attributes of their associated - # Widget. If you set max_length in a CharField and its associated widget is - # either a TextInput or PasswordInput, then the widget's rendered HTML will - # include the "maxlength" attribute. + # Some Field classes have an effect on the HTML attributes of their + # associated Widget. If you set max_length in a CharField and its + # associated widget is either a TextInput or PasswordInput, then the + # widget's rendered HTML will include the "maxlength" attribute. class UserRegistration(Form): username = CharField(max_length=10) # uses TextInput by default password = CharField(max_length=10, widget=PasswordInput) @@ -2277,9 +2286,10 @@ aria-describedby="id_birthday_error"> ) def test_specifying_labels(self): - # You can specify the label for a field by using the 'label' argument to a Field - # class. If you don't specify 'label', Django will use the field name with - # underscores converted to spaces, and the initial letter capitalized. + # You can specify the label for a field by using the 'label' argument + # to a Field class. If you don't specify 'label', Django will use the + # field name with underscores converted to spaces, and the initial + # letter capitalized. class UserRegistration(Form): username = CharField(max_length=10, label="Your username") password1 = CharField(widget=PasswordInput) @@ -2297,8 +2307,8 @@ aria-describedby="id_birthday_error"> """, ) - # Labels for as_* methods will only end in a colon if they don't end in other - # punctuation already. + # Labels for as_* methods will only end in a colon if they don't end in + # other punctuation already. class Questions(Form): q1 = CharField(label="The first question") q2 = CharField(label="What is your name?") @@ -2354,8 +2364,8 @@ aria-describedby="id_birthday_error"> """, ) - # If label is None, Django will auto-create the label from the field name. This - # is default behavior. + # If label is None, Django will auto-create the label from the field + # name. This is default behavior. class UserRegistration(Form): username = CharField(max_length=10, label=None) password = CharField(widget=PasswordInput) @@ -2422,16 +2432,18 @@ aria-describedby="id_birthday_error"> ) def test_initial_data(self): - # You can specify initial data for a field by using the 'initial' argument to a - # Field class. This initial data is displayed when a Form is rendered with *no* - # data. It is not displayed when a Form is rendered with any data (including an - # empty dictionary). Also, the initial value is *not* used if data for a - # particular required field isn't provided. + # You can specify initial data for a field by using the 'initial' + # argument to a Field class. This initial data is displayed when a Form + # is rendered with *no* data. It is not displayed when a Form is + # rendered with any data (including an empty dictionary). Also, the + # initial value is *not* used if data for a particular required field + # isn't provided. class UserRegistration(Form): username = CharField(max_length=10, initial="django") password = CharField(widget=PasswordInput) - # Here, we're not submitting any data, so the initial value will be displayed.) + # Here, we're not submitting any data, so the initial value will be + # displayed.) p = UserRegistration(auto_id=False) self.assertHTMLEqual( p.as_ul(), @@ -2442,7 +2454,8 @@ aria-describedby="id_birthday_error"> """, ) - # Here, we're submitting data, so the initial value will *not* be displayed. + # Here, we're submitting data, so the initial value will *not* be + # displayed. p = UserRegistration({}, auto_id=False) self.assertHTMLEqual( p.as_ul(), @@ -2480,16 +2493,17 @@ Password: <input type="password" name="password" aria-invalid="true" required></ self.assertFalse(p.is_valid()) def test_dynamic_initial_data(self): - # The previous technique dealt with "hard-coded" initial data, but it's also - # possible to specify initial data after you've already created the Form class - # (i.e., at runtime). Use the 'initial' parameter to the Form constructor. This - # should be a dictionary containing initial values for one or more fields in the - # form, keyed by field name. + # The previous technique dealt with "hard-coded" initial data, but it's + # also possible to specify initial data after you've already created + # the Form class (i.e., at runtime). Use the 'initial' parameter to the + # Form constructor. This should be a dictionary containing initial + # values for one or more fields in the form, keyed by field name. class UserRegistration(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) - # Here, we're not submitting any data, so the initial value will be displayed.) + # Here, we're not submitting any data, so the initial value will be + # displayed.) p = UserRegistration(initial={"username": "django"}, auto_id=False) self.assertHTMLEqual( p.as_ul(), @@ -2542,10 +2556,10 @@ Password: <input type="password" name="password" aria-invalid="true" required></ """, ) - # A dynamic 'initial' value is *not* used as a fallback if data is not provided. - # In this example, we don't provide a value for 'username', and the - # form raises a validation error rather than using the initial value - # for 'username'. + # A dynamic 'initial' value is *not* used as a fallback if data is not + # provided. In this example, we don't provide a value for 'username', + # and the form raises a validation error rather than using the initial + # value for 'username'. p = UserRegistration({"password": "secret"}, initial={"username": "django"}) self.assertEqual(p.errors["username"], ["This field is required."]) self.assertFalse(p.is_valid()) @@ -2567,8 +2581,8 @@ Password: <input type="password" name="password" aria-invalid="true" required></ ) def test_callable_initial_data(self): - # The previous technique dealt with raw values as initial data, but it's also - # possible to specify callable data. + # The previous technique dealt with raw values as initial data, but + # it's also possible to specify callable data. class UserRegistration(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) @@ -2589,7 +2603,8 @@ Password: <input type="password" name="password" aria-invalid="true" required></ def initial_other_options(): return ["b", "w"] - # Here, we're not submitting any data, so the initial value will be displayed.) + # Here, we're not submitting any data, so the initial value will be + # displayed.) p = UserRegistration( initial={"username": initial_django, "options": initial_options}, auto_id=False, @@ -2987,7 +3002,8 @@ Options: <select multiple name="options" aria-invalid="true" required> '<input type="password" name="password" required></div>', ) - # The help text is displayed whether or not data is provided for the form. + # The help text is displayed whether or not data is provided for the + # form. p = UserRegistration({"username": "foo"}, auto_id=False) self.assertHTMLEqual( p.as_ul(), @@ -2999,8 +3015,8 @@ Options: <select multiple name="options" aria-invalid="true" required> 'required><span class="helptext">Wählen Sie mit Bedacht.</span></li>', ) - # help_text is not displayed for hidden fields. It can be used for documentation - # purposes, though. + # help_text is not displayed for hidden fields. It can be used for + # documentation purposes, though. class UserRegistration(Form): username = CharField(max_length=10, help_text="e.g., user@example.com") password = CharField(widget=PasswordInput) @@ -3299,9 +3315,9 @@ Options: <select multiple name="options" aria-invalid="true" required> ) def test_subclassing_forms(self): - # You can subclass a Form to add fields. The resulting form subclass will have - # all of the fields of the parent Form, plus whichever fields you define in the - # subclass. + # You can subclass a Form to add fields. The resulting form subclass + # will have all of the fields of the parent Form, plus whichever fields + # you define in the subclass. class Person(Form): first_name = CharField() last_name = CharField() @@ -3326,8 +3342,8 @@ Options: <select multiple name="options" aria-invalid="true" required> <li>Instrument: <input type="text" name="instrument" required></li>""", ) - # Yes, you can subclass multiple forms. The fields are added in the order in - # which the parent classes are listed. + # Yes, you can subclass multiple forms. The fields are added in the + # order in which the parent classes are listed. class Person(Form): first_name = CharField() last_name = CharField() @@ -3404,8 +3420,8 @@ Options: <select multiple name="options" aria-invalid="true" required> self.assertEqual(p.cleaned_data["last_name"], "Lennon") self.assertEqual(p.cleaned_data["birthday"], datetime.date(1940, 10, 9)) - # Let's try submitting some bad data to make sure form.errors and field.errors - # work as expected. + # Let's try submitting some bad data to make sure form.errors and + # field.errors work as expected. data = { "person1-first_name": "", "person1-last_name": "", @@ -3420,16 +3436,16 @@ Options: <select multiple name="options" aria-invalid="true" required> with self.assertRaises(KeyError): p["person1-first_name"].errors - # In this example, the data doesn't have a prefix, but the form requires it, so - # the form doesn't "see" the fields. + # In this example, the data doesn't have a prefix, but the form + # requires it, so the form doesn't "see" the fields. data = {"first_name": "John", "last_name": "Lennon", "birthday": "1940-10-9"} p = Person(data, prefix="person1") self.assertEqual(p.errors["first_name"], ["This field is required."]) self.assertEqual(p.errors["last_name"], ["This field is required."]) self.assertEqual(p.errors["birthday"], ["This field is required."]) - # With prefixes, a single data dictionary can hold data for multiple instances - # of the same form. + # With prefixes, a single data dictionary can hold data for multiple + # instances of the same form. data = { "person1-first_name": "John", "person1-last_name": "Lennon", @@ -3449,10 +3465,10 @@ Options: <select multiple name="options" aria-invalid="true" required> self.assertEqual(p2.cleaned_data["last_name"], "Morrison") self.assertEqual(p2.cleaned_data["birthday"], datetime.date(1943, 12, 8)) - # By default, forms append a hyphen between the prefix and the field name, but a - # form can alter that behavior by implementing the add_prefix() method. This - # method takes a field name and returns the prefixed field, according to - # self.prefix. + # By default, forms append a hyphen between the prefix and the field + # name, but a form can alter that behavior by implementing the + # add_prefix() method. This method takes a field name and returns the + # prefixed field, according to self.prefix. class Person(Form): first_name = CharField() last_name = CharField() @@ -3504,8 +3520,9 @@ Options: <select multiple name="options" aria-invalid="true" required> self.assertEqual(p.prefix, "bar") def test_forms_with_null_boolean(self): - # NullBooleanField is a bit of a special case because its presentation (widget) - # is different than its data. This is handled transparently, though. + # NullBooleanField is a bit of a special case because its presentation + # (widget) is different than its data. This is handled transparently, + # though. class Person(Form): name = CharField() is_cool = NullBooleanField() @@ -3693,9 +3710,9 @@ Options: <select multiple name="options" aria-invalid="true" required> ) def test_empty_permitted(self): - # Sometimes (pretty much in formsets) we want to allow a form to pass validation - # if it is completely empty. We can accomplish this by using the empty_permitted - # argument to a form constructor. + # Sometimes (pretty much in formsets) we want to allow a form to pass + # validation if it is completely empty. We can accomplish this by using + # the empty_permitted argument to a form constructor. class SongForm(Form): artist = CharField() name = CharField() @@ -3713,29 +3730,31 @@ Options: <select multiple name="options" aria-invalid="true" required> ) self.assertEqual(form.cleaned_data, {}) - # Now let's show what happens when empty_permitted=True and the form is empty. + # Now let's show what happens when empty_permitted=True and the form is + # empty. form = SongForm(data, empty_permitted=True, use_required_attribute=False) self.assertTrue(form.is_valid()) self.assertEqual(form.errors, {}) self.assertEqual(form.cleaned_data, {}) - # But if we fill in data for one of the fields, the form is no longer empty and - # the whole thing must pass validation. + # But if we fill in data for one of the fields, the form is no longer + # empty and the whole thing must pass validation. data = {"artist": "The Doors", "song": ""} form = SongForm(data, empty_permitted=False) self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {"name": ["This field is required."]}) self.assertEqual(form.cleaned_data, {"artist": "The Doors"}) - # If a field is not given in the data then None is returned for its data. Lets - # make sure that when checking for empty_permitted that None is treated - # accordingly. + # If a field is not given in the data then None is returned for its + # data. Lets make sure that when checking for empty_permitted that None + # is treated accordingly. data = {"artist": None, "song": ""} form = SongForm(data, empty_permitted=True, use_required_attribute=False) self.assertTrue(form.is_valid()) - # However, we *really* need to be sure we are checking for None as any data in - # initial that returns False on a boolean call needs to be treated literally. + # However, we *really* need to be sure we are checking for None as any + # data in initial that returns False on a boolean call needs to be + # treated literally. class PriceForm(Form): amount = FloatField() qty = IntegerField() @@ -4647,7 +4666,8 @@ aria-describedby="id_age_error"></td></tr>""", def clean(self): data = self.cleaned_data - # Return a different dict. We have not changed self.cleaned_data. + # Return a different dict. We have not changed + # self.cleaned_data. return { "username": data["username"].lower(), "password": "this_is_not_a_secret", @@ -4808,7 +4828,8 @@ aria-describedby="id_age_error"></td></tr>""", ) def test_only_hidden_fields(self): - # A form with *only* hidden fields that has errors is going to be very unusual. + # A form with *only* hidden fields that has errors is going to be very + # unusual. class HiddenForm(Form): data = IntegerField(widget=HiddenInput) diff --git a/tests/forms_tests/tests/test_formsets.py b/tests/forms_tests/tests/test_formsets.py index 9f7012a11f..24068e79cb 100644 --- a/tests/forms_tests/tests/test_formsets.py +++ b/tests/forms_tests/tests/test_formsets.py @@ -192,8 +192,8 @@ class FormsFormsetTestCase(SimpleTestCase): self.assertIs(formset.empty_form.empty_permitted, True) def test_formset_validation(self): - # FormSet instances can also have an error attribute if validation failed for - # any of the forms. + # FormSet instances can also have an error attribute if validation + # failed for any of the forms. formset = self.make_choiceformset([("Calexico", "")]) self.assertFalse(formset.is_valid()) self.assertEqual(formset.errors, [{"votes": ["This field is required."]}]) diff --git a/tests/forms_tests/tests/test_input_formats.py b/tests/forms_tests/tests/test_input_formats.py index 30ca53ba4b..d1ffc296b1 100644 --- a/tests/forms_tests/tests/test_input_formats.py +++ b/tests/forms_tests/tests/test_input_formats.py @@ -65,7 +65,10 @@ class LocalizedTimeTests(SimpleTestCase): self.assertEqual(text, "13:30:00") def test_timeField_with_inputformat(self): - "TimeFields with manually specified input formats can accept those formats" + """ + TimeFields with manually specified input formats can accept those + formats + """ f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"]) # Parse a time in an unaccepted format; get an error with self.assertRaises(ValidationError): @@ -172,7 +175,10 @@ class CustomTimeInputFormatsTests(SimpleTestCase): self.assertEqual(text, "01:30:00 PM") def test_timeField_with_inputformat(self): - "TimeFields with manually specified input formats can accept those formats" + """ + TimeFields with manually specified input formats can accept those + formats + """ f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"]) # Parse a time in an unaccepted format; get an error with self.assertRaises(ValidationError): @@ -250,7 +256,10 @@ class SimpleTimeFormatTests(SimpleTestCase): self.assertEqual(text, "13:30:00") def test_localized_timeField(self): - "Localized TimeFields in a non-localized environment act as unlocalized widgets" + """ + Localized TimeFields in a non-localized environment act as unlocalized + widgets + """ f = forms.TimeField() # Parse a time in an unaccepted format; get an error with self.assertRaises(ValidationError): @@ -273,7 +282,10 @@ class SimpleTimeFormatTests(SimpleTestCase): self.assertEqual(text, "13:30:00") def test_timeField_with_inputformat(self): - "TimeFields with manually specified input formats can accept those formats" + """ + TimeFields with manually specified input formats can accept those + formats + """ f = forms.TimeField(input_formats=["%I:%M:%S %p", "%I:%M %p"]) # Parse a time in an unaccepted format; get an error with self.assertRaises(ValidationError): @@ -378,7 +390,10 @@ class LocalizedDateTests(SimpleTestCase): self.assertEqual(text, "21.12.2010") def test_dateField_with_inputformat(self): - "DateFields with manually specified input formats can accept those formats" + """ + DateFields with manually specified input formats can accept those + formats + """ f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"]) # Parse a date in an unaccepted format; get an error with self.assertRaises(ValidationError): @@ -489,7 +504,10 @@ class CustomDateInputFormatsTests(SimpleTestCase): self.assertEqual(text, "21.12.2010") def test_dateField_with_inputformat(self): - "DateFields with manually specified input formats can accept those formats" + """ + DateFields with manually specified input formats can accept those + formats + """ f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"]) # Parse a date in an unaccepted format; get an error with self.assertRaises(ValidationError): @@ -567,7 +585,10 @@ class SimpleDateFormatTests(SimpleTestCase): self.assertEqual(text, "2010-12-21") def test_localized_dateField(self): - "Localized DateFields in a non-localized environment act as unlocalized widgets" + """ + Localized DateFields in a non-localized environment act as unlocalized + widgets + """ f = forms.DateField() # Parse a date in an unaccepted format; get an error with self.assertRaises(ValidationError): @@ -590,7 +611,10 @@ class SimpleDateFormatTests(SimpleTestCase): self.assertEqual(text, "2010-12-21") def test_dateField_with_inputformat(self): - "DateFields with manually specified input formats can accept those formats" + """ + DateFields with manually specified input formats can accept those + formats + """ f = forms.DateField(input_formats=["%d.%m.%Y", "%d-%m-%Y"]) # Parse a date in an unaccepted format; get an error with self.assertRaises(ValidationError): @@ -697,7 +721,10 @@ class LocalizedDateTimeTests(SimpleTestCase): self.assertEqual(text, "21.12.2010 13:30:00") def test_dateTimeField_with_inputformat(self): - "DateTimeFields with manually specified input formats can accept those formats" + """ + DateTimeFields with manually specified input formats can accept those + formats + """ f = forms.DateTimeField(input_formats=["%H.%M.%S %m.%d.%Y", "%H.%M %m-%d-%Y"]) # Parse a date in an unaccepted format; get an error with self.assertRaises(ValidationError): @@ -814,7 +841,10 @@ class CustomDateTimeInputFormatsTests(SimpleTestCase): self.assertEqual(text, "01:30:00 PM 21/12/2010") def test_dateTimeField_with_inputformat(self): - "DateTimeFields with manually specified input formats can accept those formats" + """ + DateTimeFields with manually specified input formats can accept those + formats + """ f = forms.DateTimeField(input_formats=["%m.%d.%Y %H:%M:%S", "%m-%d-%Y %H:%M"]) # Parse a date in an unaccepted format; get an error with self.assertRaises(ValidationError): @@ -920,7 +950,10 @@ class SimpleDateTimeFormatTests(SimpleTestCase): self.assertEqual(text, "2010-12-21 13:30:05") def test_dateTimeField_with_inputformat(self): - "DateTimeFields with manually specified input formats can accept those formats" + """ + DateTimeFields with manually specified input formats can accept those + formats + """ f = forms.DateTimeField( input_formats=["%I:%M:%S %p %d.%m.%Y", "%I:%M %p %d-%m-%Y"] ) diff --git a/tests/forms_tests/tests/test_media.py b/tests/forms_tests/tests/test_media.py index 9f6b15074c..c1d0c0a568 100644 --- a/tests/forms_tests/tests/test_media.py +++ b/tests/forms_tests/tests/test_media.py @@ -193,8 +193,8 @@ class FormsMediaTestCase(SimpleTestCase): ) def test_combine_media(self): - # Media objects can be combined. Any given media resource will appear only - # once. Duplicated media definitions are ignored. + # Media objects can be combined. Any given media resource will appear + # only once. Duplicated media definitions are ignored. class MyWidget1(TextInput): class Media: css = {"all": ("path/to/css1", "/path/to/css2")} diff --git a/tests/forms_tests/tests/tests.py b/tests/forms_tests/tests/tests.py index 086627c9e7..866faaac4d 100644 --- a/tests/forms_tests/tests/tests.py +++ b/tests/forms_tests/tests/tests.py @@ -79,7 +79,8 @@ class TestTicket14567(TestCase): self.assertIsInstance( form.cleaned_data["multi_choice_optional"], models.query.QuerySet ) - # While we're at it, test whether a QuerySet is returned if there *is* a value. + # While we're at it, test whether a QuerySet is returned if there *is* + # a value. self.assertIsInstance(form.cleaned_data["multi_choice"], models.query.QuerySet) @@ -293,8 +294,8 @@ class FormsModelTestCase(TestCase): r2 = DefaultsForm()["callable_default"].as_widget() self.assertNotEqual(r1, r2) - # In a ModelForm that is passed an instance, the initial values come from the - # instance's values, not the model's defaults. + # In a ModelForm that is passed an instance, the initial values come + # from the instance's values, not the model's defaults. foo_instance = Defaults( name="instance value", def_date=datetime.date(1969, 4, 4), value=12 ) @@ -370,7 +371,8 @@ class RelatedModelFormTests(SimpleTestCase): class ManyToManyExclusionTestCase(TestCase): def test_m2m_field_exclusion(self): - # Issue 12337. save_instance should honor the passed-in exclude keyword. + # Issue 12337. save_instance should honor the passed-in exclude + # keyword. opt1 = ChoiceOptionModel.objects.create(id=1, name="default") opt2 = ChoiceOptionModel.objects.create(id=2, name="option 2") opt3 = ChoiceOptionModel.objects.create(id=3, name="option 3") diff --git a/tests/forms_tests/widget_tests/base.py b/tests/forms_tests/widget_tests/base.py index c29099abf2..e30b2237c6 100644 --- a/tests/forms_tests/widget_tests/base.py +++ b/tests/forms_tests/widget_tests/base.py @@ -29,7 +29,8 @@ class WidgetTest(SimpleTestCase): ) # Django escapes quotes with '"' while Jinja2 uses '"'. output = output.replace(""", """) - # Django escapes single quotes with ''' while Jinja2 uses '''. + # Django escapes single quotes with ''' while Jinja2 uses + # '''. output = output.replace("'", "'") assertEqual(output, html) diff --git a/tests/generic_relations/tests.py b/tests/generic_relations/tests.py index e0c6fe2db7..1b53dbd8f4 100644 --- a/tests/generic_relations/tests.py +++ b/tests/generic_relations/tests.py @@ -197,10 +197,10 @@ class GenericRelationsTests(TestCase): """ Test lookups over an object without GenericRelations. """ - # Recall that the Mineral class doesn't have an explicit GenericRelation - # defined. That's OK, because you can create TaggedItems explicitly. - # However, excluding GenericRelations means your lookups have to be a - # bit more explicit. + # Recall that the Mineral class doesn't have an explicit + # GenericRelation defined. That's OK, because you can create + # TaggedItems explicitly. However, excluding GenericRelations means + # your lookups have to be a bit more explicit. shiny = TaggedItem.objects.create(content_object=self.quartz, tag="shiny") clearish = TaggedItem.objects.create(content_object=self.quartz, tag="clearish") diff --git a/tests/generic_relations_regress/tests.py b/tests/generic_relations_regress/tests.py index c670f44680..f10407b060 100644 --- a/tests/generic_relations_regress/tests.py +++ b/tests/generic_relations_regress/tests.py @@ -92,7 +92,8 @@ class GenericRelationTests(TestCase): CharLink.objects.create(content_object=restaurant) charlink = CharLink.objects.latest("pk") self.assertIs(charlink.content_object, charlink.content_object) - # If the model (Cafe) uses more than one level of multi-table inheritance. + # If the model (Cafe) uses more than one level of multi-table + # inheritance. cafe = Cafe.objects.create() CharLink.objects.create(content_object=cafe) charlink = CharLink.objects.latest("pk") diff --git a/tests/generic_views/test_base.py b/tests/generic_views/test_base.py index acd938935a..cc5dcf4e39 100644 --- a/tests/generic_views/test_base.py +++ b/tests/generic_views/test_base.py @@ -114,7 +114,8 @@ class ViewTest(LoggingAssertionMixin, SimpleTestCase): def test_get_and_head(self): """ - Test a view which supplies a GET method also responds correctly to HEAD. + Test a view which supplies a GET method also responds correctly to + HEAD. """ self._assert_simple(SimpleView.as_view()(self.rf.get("/"))) response = SimpleView.as_view()(self.rf.head("/")) @@ -129,7 +130,8 @@ class ViewTest(LoggingAssertionMixin, SimpleTestCase): def test_head_no_get(self): """ - Test a view which supplies no GET method responds to HEAD with HTTP 405. + Test a view which supplies no GET method responds to HEAD with HTTP + 405. """ response = PostOnlyView.as_view()(self.rf.head("/")) self.assertEqual(response.status_code, 405) @@ -608,7 +610,8 @@ class GetContextDataTest(SimpleTestCase): self.assertEqual(context["pony"], test_view.object) def test_object_in_get_context_data(self): - # Checks 'object' key presence in dict returned by get_context_date #20234 + # Checks 'object' key presence in dict returned by get_context_date + # #20234 test_view = views.CustomSingleObjectView() context = test_view.get_context_data() self.assertEqual(context["object"], test_view.object) diff --git a/tests/generic_views/test_dates.py b/tests/generic_views/test_dates.py index cfed82a586..140083d315 100644 --- a/tests/generic_views/test_dates.py +++ b/tests/generic_views/test_dates.py @@ -137,7 +137,8 @@ class ArchiveIndexViewTests(TestDataMixin, TestCase): # 1 query for years list + 1 query for books with self.assertNumQueries(2): self.client.get("/dates/books/") - # same as above + 1 query to test if books exist + 1 query to count them + # same as above + 1 query to test if books exist + 1 query to count + # them with self.assertNumQueries(4): self.client.get("/dates/books/paginated/") @@ -242,7 +243,8 @@ class YearArchiveViewTests(TestDataMixin, TestCase): self.assertEqual(list(res.context["date_list"]), []) self.assertEqual(list(res.context["book_list"]), []) - # Since allow_empty=True, next/prev are allowed to be empty years (#7164) + # Since allow_empty=True, next/prev are allowed to be empty years + # (#7164) self.assertEqual(res.context["next_year"], datetime.date(2000, 1, 1)) self.assertEqual(res.context["previous_year"], datetime.date(1998, 1, 1)) @@ -409,7 +411,8 @@ class MonthArchiveViewTests(TestDataMixin, TestCase): self.assertEqual(list(res.context["book_list"]), []) self.assertEqual(res.context["month"], datetime.date(2000, 1, 1)) - # Since allow_empty=True, next/prev are allowed to be empty months (#7164) + # Since allow_empty=True, next/prev are allowed to be empty months + # (#7164) self.assertEqual(res.context["next_month"], datetime.date(2000, 2, 1)) self.assertEqual(res.context["previous_month"], datetime.date(1999, 12, 1)) @@ -484,7 +487,8 @@ class MonthArchiveViewTests(TestDataMixin, TestCase): res = self.client.get("/dates/books/2010/nov/") self.assertEqual(res.status_code, 200) self.assertEqual(res.context["previous_month"], datetime.date(2010, 10, 1)) - # The bug does not occur here because a Book with pubdate of Sep 1 exists + # The bug does not occur here because a Book with pubdate of Sep 1 + # exists res = self.client.get("/dates/books/2010/oct/") self.assertEqual(res.status_code, 200) self.assertEqual(res.context["previous_month"], datetime.date(2010, 9, 1)) @@ -578,7 +582,8 @@ class WeekArchiveViewTests(TestDataMixin, TestCase): self.assertIsNone(res.context["next_week"]) def test_week_view_allow_future(self): - # January 7th always falls in week 1, given Python's definition of week numbers + # January 7th always falls in week 1, given Python's definition of week + # numbers future = datetime.date(datetime.date.today().year + 1, 1, 7) future_sunday = future - datetime.timedelta(days=(future.weekday() + 1) % 7) b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future) @@ -696,7 +701,8 @@ class DayArchiveViewTests(TestDataMixin, TestCase): self.assertEqual(list(res.context["book_list"]), []) self.assertEqual(res.context["day"], datetime.date(2000, 1, 1)) - # Since it's allow empty, next/prev are allowed to be empty months (#7164) + # Since it's allow empty, next/prev are allowed to be empty months + # (#7164) self.assertEqual(res.context["next_day"], datetime.date(2000, 1, 2)) self.assertEqual(res.context["previous_day"], datetime.date(1999, 12, 31)) diff --git a/tests/generic_views/test_list.py b/tests/generic_views/test_list.py index 25f6553a8a..421609d76d 100644 --- a/tests/generic_views/test_list.py +++ b/tests/generic_views/test_list.py @@ -229,7 +229,8 @@ class ListViewTests(TestCase): # 1 query for authors with self.assertNumQueries(1): self.client.get("/list/authors/notempty/") - # same as above + 1 query to test if authors exist + 1 query for pagination + # same as above + 1 query to test if authors exist + 1 query for + # pagination with self.assertNumQueries(3): self.client.get("/list/authors/notempty/paginated/") diff --git a/tests/get_earliest_or_latest/tests.py b/tests/get_earliest_or_latest/tests.py index 21692590cc..49c803b73a 100644 --- a/tests/get_earliest_or_latest/tests.py +++ b/tests/get_earliest_or_latest/tests.py @@ -50,8 +50,8 @@ class EarliestOrLatestTests(TestCase): Article.objects.filter(pub_date__gt=datetime(2005, 7, 26)).earliest(), a2 ) - # Pass a custom field name to earliest() to change the field that's used - # to determine the earliest object. + # Pass a custom field name to earliest() to change the field that's + # used to determine the earliest object. self.assertEqual(Article.objects.earliest("expire_date"), a2) self.assertEqual( Article.objects.filter(pub_date__gt=datetime(2005, 7, 26)).earliest( @@ -143,7 +143,8 @@ class EarliestOrLatestTests(TestCase): a3, ) - # latest() overrides any other ordering specified on the query (#11283). + # latest() overrides any other ordering specified on the query + # (#11283). self.assertEqual(Article.objects.order_by("id").latest(), a4) # Error is raised if get_latest_by isn't in Model.Meta. diff --git a/tests/get_or_create/tests.py b/tests/get_or_create/tests.py index 59f84be221..d4e03d4461 100644 --- a/tests/get_or_create/tests.py +++ b/tests/get_or_create/tests.py @@ -125,7 +125,8 @@ class GetOrCreateTests(TestCase): # Create an Author not tied to any books. Author.objects.create(name="Ted") - # There should be three Authors in total. The book object should have two. + # There should be three Authors in total. The book object should have + # two. self.assertEqual(Author.objects.count(), 3) self.assertEqual(book.authors.count(), 2) @@ -371,9 +372,9 @@ class UpdateOrCreateTests(TestCase): def test_error_contains_full_traceback(self): """ update_or_create should raise IntegrityErrors with the full traceback. - This is tested by checking that a known method call is in the traceback. - We cannot use assertRaises/assertRaises here because we need to inspect - the actual traceback. Refs #16340. + This is tested by checking that a known method call is in the + traceback. We cannot use assertRaises/assertRaises here because we need + to inspect the actual traceback. Refs #16340. """ try: ManualPrimaryKeyTest.objects.update_or_create(id=1, data="Different") diff --git a/tests/gis_tests/distapp/tests.py b/tests/gis_tests/distapp/tests.py index f5879beeff..f8eae7ba37 100644 --- a/tests/gis_tests/distapp/tests.py +++ b/tests/gis_tests/distapp/tests.py @@ -159,7 +159,7 @@ class DistanceTest(TestCase): Test distance lookups on geodetic coordinate systems. """ # Line is from Canberra to Sydney. Query is for all other cities within - # a 100km of that line (which should exclude only Hobart & Adelaide). + # a 100km of that line (which should exclude only Hobart & # Adelaide). line = GEOSGeometry("LINESTRING(144.9630 -37.8143,151.2607 -33.8870)", 4326) dist_qs = AustraliaCity.objects.filter(point__distance_lte=(line, D(km=100))) expected_cities = [ @@ -221,9 +221,9 @@ class DistanceTest(TestCase): gq2 = Q(point__distance_gte=(wollongong.point, d2)) qs1 = AustraliaCity.objects.exclude(name="Wollongong").filter(gq1 | gq2) - # Geodetic distance lookup but telling GeoDjango to use `distance_spheroid` - # instead (we should get the same results b/c accuracy variance won't matter - # in this test case). + # Geodetic distance lookup but telling GeoDjango to use + # `distance_spheroid` instead (we should get the same results b/c + # accuracy variance won't matter in this test case). querysets = [qs1] if connection.features.has_DistanceSpheroid_function: gq3 = Q(point__distance_lte=(wollongong.point, d1, "spheroid")) @@ -532,7 +532,8 @@ class DistanceFunctionsTests(FuncTestMixin, TestCase): ) for city, distance in zip(qs, distances): with self.subTest(city=city, distance=distance): - # Testing equivalence to within a meter (kilometer on SpatiaLite). + # Testing equivalence to within a meter (kilometer on + # SpatiaLite). tol = -3 if connection.ops.spatialite else 0 self.assertAlmostEqual(distance, city.distance.m, tol) @@ -588,7 +589,8 @@ class DistanceFunctionsTests(FuncTestMixin, TestCase): with self.subTest(c=c): self.assertAlmostEqual(spheroid_distances[i], c.distance.m, tol) if connection.ops.postgis or connection.ops.spatialite: - # PostGIS uses sphere-only distances by default, testing these as well. + # PostGIS uses sphere-only distances by default, testing these as + # well. qs = ( AustraliaCity.objects.exclude(id=hillsdale.id) .annotate(distance=Distance("point", hillsdale.point)) @@ -663,7 +665,8 @@ class DistanceFunctionsTests(FuncTestMixin, TestCase): @skipUnlessDBFeature("has_Distance_function", "has_Transform_function") def test_distance_transform(self): """ - Test the `Distance` function used with `Transform` on a geographic field. + Test the `Distance` function used with `Transform` on a geographic + field. """ # We'll be using a Polygon (created by buffering the centroid # of 77005 to 100m) -- which aren't allowed in geographic distance diff --git a/tests/gis_tests/gdal_tests/test_ds.py b/tests/gis_tests/gdal_tests/test_ds.py index ca7295be00..5bc71acf3e 100644 --- a/tests/gis_tests/gdal_tests/test_ds.py +++ b/tests/gis_tests/gdal_tests/test_ds.py @@ -264,8 +264,8 @@ class DataSourceTest(SimpleTestCase): # Making sure the fields match to an appropriate OFT type. for k, v in source.fields.items(): - # Making sure we get the proper OGR Field instance, using - # a string value index for the feature. + # Making sure we get the proper OGR Field instance, + # using a string value index for the feature. self.assertIsInstance(feat[k], v) self.assertIsInstance(feat.fields[0], str) diff --git a/tests/gis_tests/gdal_tests/test_envelope.py b/tests/gis_tests/gdal_tests/test_envelope.py index d55aae168b..03882f7a0a 100644 --- a/tests/gis_tests/gdal_tests/test_envelope.py +++ b/tests/gis_tests/gdal_tests/test_envelope.py @@ -64,7 +64,10 @@ class EnvelopeTest(unittest.TestCase): self.assertEqual((-1, -1, 5, 6), self.e) def test05_expand_to_include_pt_2_tuple(self): - "Testing Envelope expand_to_include -- point as a single 2-tuple parameter." + """ + Testing Envelope expand_to_include -- point as a single 2-tuple + parameter. + """ self.e.expand_to_include((10, 10)) self.assertEqual((0, 0, 10, 10), self.e) self.e.expand_to_include((-10, -10)) @@ -76,7 +79,10 @@ class EnvelopeTest(unittest.TestCase): self.assertEqual((-1, 0, 5, 7), self.e) def test06_expand_to_include_extent_4_tuple(self): - "Testing Envelope expand_to_include -- extent as a single 4-tuple parameter." + """ + Testing Envelope expand_to_include -- extent as a single 4-tuple + parameter. + """ self.e.expand_to_include((-1, 1, 3, 7)) self.assertEqual((-1, 0, 5, 7), self.e) diff --git a/tests/gis_tests/gdal_tests/test_geom.py b/tests/gis_tests/gdal_tests/test_geom.py index 919e547511..6951f2f354 100644 --- a/tests/gis_tests/gdal_tests/test_geom.py +++ b/tests/gis_tests/gdal_tests/test_geom.py @@ -521,7 +521,8 @@ class OGRGeomTest(SimpleTestCase, TestDataMixin): self.assertEqual(3, geom[0].coord_dim) self.assertEqual(wkt_3d, geom.wkt) - # Testing binary predicates, `assertIs` is used to check that bool is returned. + # Testing binary predicates, `assertIs` is used to check that bool is + # returned. def test_equivalence_regression(self): "Testing equivalence methods with non-OGRGeometry instances." diff --git a/tests/gis_tests/gdal_tests/test_raster.py b/tests/gis_tests/gdal_tests/test_raster.py index a5f1edef0b..2c2b77d5cd 100644 --- a/tests/gis_tests/gdal_tests/test_raster.py +++ b/tests/gis_tests/gdal_tests/test_raster.py @@ -334,7 +334,8 @@ class GDALRasterTests(SimpleTestCase): result = rast.bands[0].data() if numpy: result = result.flatten().tolist() - # Band data is equal to zero because no nodata value has been specified. + # Band data is equal to zero because no nodata value has been + # specified. self.assertEqual(result, [0] * 4) def test_raster_metadata_property(self): @@ -556,7 +557,8 @@ class GDALRasterTests(SimpleTestCase): ], } ) - # Warp raster onto a location that does not cover any pixels of the original. + # Warp raster onto a location that does not cover any pixels of the + # original. result = source.warp({"origin": (200000, 200000)}).bands[0].data() if numpy: result = result.flatten().tolist() @@ -828,7 +830,8 @@ class GDALBandTests(SimpleTestCase): band = rs.bands[0] self.addCleanup(self._remove_aux_file) - # Setting attributes in write mode raises exception in the _flush method + # Setting attributes in write mode raises exception in the _flush + # method with self.assertRaises(GDALException): setattr(band, "nodata_value", 10) diff --git a/tests/gis_tests/gdal_tests/test_srs.py b/tests/gis_tests/gdal_tests/test_srs.py index 62ebc63889..20ad834338 100644 --- a/tests/gis_tests/gdal_tests/test_srs.py +++ b/tests/gis_tests/gdal_tests/test_srs.py @@ -110,7 +110,8 @@ srlist = ( (("projcs", 11), "AXIS"), ), ), - # This is really ESRI format, not WKT -- but the import should work the same + # This is really ESRI format, not WKT -- but the import should work the + # same TestSRS( 'LOCAL_CS["Non-Earth (Meter)",LOCAL_DATUM["Local Datum",32767],' 'UNIT["Meter",1],AXIS["X",EAST],AXIS["Y",NORTH]]', diff --git a/tests/gis_tests/geo3d/tests.py b/tests/gis_tests/geo3d/tests.py index 7b7e6fea5a..6871a7ba93 100644 --- a/tests/gis_tests/geo3d/tests.py +++ b/tests/gis_tests/geo3d/tests.py @@ -226,7 +226,8 @@ class Geo3DTest(Geo3DLoadingHelper, TestCase): ref_union = GEOSGeometry(ref_ewkt) union = City3D.objects.aggregate(Union("point"))["point__union"] self.assertTrue(union.hasz) - # Ordering of points in the resulting geometry may vary between implementations + # Ordering of points in the resulting geometry may vary between + # implementations self.assertEqual({p.ewkt for p in ref_union}, {p.ewkt for p in union}) @skipUnlessDBFeature("supports_3d_functions") diff --git a/tests/gis_tests/geoapp/test_feeds.py b/tests/gis_tests/geoapp/test_feeds.py index e247d88535..b927571a29 100644 --- a/tests/gis_tests/geoapp/test_feeds.py +++ b/tests/gis_tests/geoapp/test_feeds.py @@ -98,7 +98,8 @@ class GeoFeedTest(TestCase): items = chan.getElementsByTagName("item") self.assertEqual(len(items), City.objects.count()) - # Ensuring the geo:lat and geo:lon element was added to each item in the feed. + # Ensuring the geo:lat and geo:lon element was added to each item in + # the feed. for item in items: self.assertChildNodes( item, ["title", "link", "description", "guid", "geo:lat", "geo:lon"] diff --git a/tests/gis_tests/geoapp/test_functions.py b/tests/gis_tests/geoapp/test_functions.py index 047d6948b1..70c462a78e 100644 --- a/tests/gis_tests/geoapp/test_functions.py +++ b/tests/gis_tests/geoapp/test_functions.py @@ -210,8 +210,8 @@ class GISFunctionsTests(FuncTestMixin, TestCase): # SELECT AsSVG(geoapp_city.point, 0, 8) FROM geoapp_city # WHERE name = 'Pueblo'; svg1 = 'cx="-104.609252" cy="-38.255001"' - # Even though relative, only one point so it's practically the same except for - # the 'c' letter prefix on the x,y values. + # Even though relative, only one point so it's practically the same + # except for the 'c' letter prefix on the x,y values. svg2 = svg1.replace("c", "") self.assertEqual( svg1, @@ -463,7 +463,8 @@ class GISFunctionsTests(FuncTestMixin, TestCase): @skipUnlessDBFeature("has_Area_function") def test_area_with_regular_aggregate(self): - # Create projected country objects, for this test to work on all backends. + # Create projected country objects, for this test to work on all + # backends. for c in Country.objects.all(): CountryWebMercator.objects.create( name=c.name, mpoly=c.mpoly.transform(3857, clone=True) @@ -684,7 +685,8 @@ class GISFunctionsTests(FuncTestMixin, TestCase): @skipUnlessDBFeature("has_SnapToGrid_function") def test_snap_to_grid(self): - # Let's try and break snap_to_grid() with bad combinations of arguments. + # Let's try and break snap_to_grid() with bad combinations of + # arguments. for bad_args in ((), range(3), range(5)): with self.assertRaises(ValueError): Country.objects.annotate(snap=functions.SnapToGrid("mpoly", *bad_args)) @@ -692,8 +694,8 @@ class GISFunctionsTests(FuncTestMixin, TestCase): with self.assertRaises(TypeError): Country.objects.annotate(snap=functions.SnapToGrid("mpoly", *bad_args)) - # Boundary for San Marino, courtesy of Bjorn Sandvik of thematicmapping.org - # from the world borders dataset he provides. + # Boundary for San Marino, courtesy of Bjorn Sandvik of + # thematicmapping.org from the world borders dataset he provides. wkt = ( "MULTIPOLYGON(((12.41580 43.95795,12.45055 43.97972,12.45389 43.98167," "12.46250 43.98472,12.47167 43.98694,12.49278 43.98917," @@ -817,9 +819,9 @@ class GISFunctionsTests(FuncTestMixin, TestCase): ) if connection.ops.oracle: - # Should be able to execute the queries; however, they won't be the same - # as GEOS (because Oracle doesn't use GEOS internally like PostGIS or - # SpatiaLite). + # Should be able to execute the queries; however, they won't be the + # same as GEOS (because Oracle doesn't use GEOS internally like + # PostGIS or SpatiaLite). return for c in qs: self.assertTrue(c.mpoly.difference(geom).equals(c.difference)) diff --git a/tests/gis_tests/geoapp/test_regress.py b/tests/gis_tests/geoapp/test_regress.py index 9a9226f341..8ac0ed3049 100644 --- a/tests/gis_tests/geoapp/test_regress.py +++ b/tests/gis_tests/geoapp/test_regress.py @@ -67,7 +67,10 @@ class GeoRegressionTests(TestCase): ) def test_empty_count(self): - "Testing that PostGISAdapter.__eq__ does check empty strings. See #13670." + """ + Testing that PostGISAdapter.__eq__ does check empty strings. See + #13670. + """ # contrived example, but need a geo lookup paired with an id__in lookup pueblo = City.objects.get(name="Pueblo") state = State.objects.filter(poly__contains=pueblo.point) @@ -78,7 +81,10 @@ class GeoRegressionTests(TestCase): @skipUnlessDBFeature("allows_group_by_lob") def test_defer_or_only_with_annotate(self): - "Regression for #16409. Make sure defer() and only() work with annotate()" + """ + Regression for #16409. Make sure defer() and only() work with + annotate() + """ self.assertIsInstance( list(City.objects.annotate(Count("point")).defer("name")), list ) @@ -87,7 +93,9 @@ class GeoRegressionTests(TestCase): ) def test_boolean_conversion(self): - "Testing Boolean value conversion with the spatial backend, see #15169." + """ + Testing Boolean value conversion with the spatial backend, see #15169. + """ t1 = Truth.objects.create(val=True) t2 = Truth.objects.create(val=False) diff --git a/tests/gis_tests/geoapp/tests.py b/tests/gis_tests/geoapp/tests.py index 395043e832..84138eb431 100644 --- a/tests/gis_tests/geoapp/tests.py +++ b/tests/gis_tests/geoapp/tests.py @@ -367,14 +367,15 @@ class GeoLookupTest(TestCase): "Testing the 'left' and 'right' lookup types." # Left: A << B => true if xmax(A) < xmin(B) # Right: A >> B => true if xmin(A) > xmax(B) - # See: BOX2D_left() and BOX2D_right() in lwgeom_box2dfloat4.c in PostGIS source. + # See: BOX2D_left() and BOX2D_right() in lwgeom_box2dfloat4.c in + # PostGIS source. # Getting the borders for Colorado & Kansas co_border = State.objects.get(name="Colorado").poly ks_border = State.objects.get(name="Kansas").poly - # Note: Wellington has an 'X' value of 174, so it will not be considered - # to the left of CO. + # Note: Wellington has an 'X' value of 174, so it will not be + # considered to the left of CO. # These cities should be strictly to the right of the CO border. cities = [ @@ -397,7 +398,8 @@ class GeoLookupTest(TestCase): for c in qs: self.assertIn(c.name, cities) - # Note: Wellington has an 'X' value of 174, so it will not be considered + # Note: Wellington has an 'X' value of 174, so it will not be + # considered # to the left of CO. vic = City.objects.get(point__left=co_border) self.assertEqual("Victoria", vic.name) @@ -441,7 +443,8 @@ class GeoLookupTest(TestCase): nullqs = State.objects.filter(poly__isnull=True) validqs = State.objects.filter(poly__isnull=False) - # Puerto Rico should be NULL (it's a commonwealth unincorporated territory) + # Puerto Rico should be NULL (it's a commonwealth unincorporated + # territory) self.assertEqual(1, len(nullqs)) self.assertEqual("Puerto Rico", nullqs[0].name) # GeometryField=None is an alias for __isnull=True. @@ -535,8 +538,8 @@ class GeoLookupTest(TestCase): @skipUnlessDBFeature("supports_relate_lookup") def test_relate_lookup(self): "Testing the 'relate' lookup type." - # To make things more interesting, we will have our Texas reference point in - # different SRIDs. + # To make things more interesting, we will have our Texas reference + # point in different SRIDs. pnt1 = fromstr("POINT (649287.0363174 4177429.4494686)", srid=2847) pnt2 = fromstr("POINT(-98.4919715741052 29.4333344025053)", srid=4326) @@ -653,7 +656,8 @@ class GeoQuerySetTest(TestCase): # SELECT ST_extent(point) # FROM geoapp_city # WHERE (name='Houston' or name='Dallas');` - # => BOX(-96.8016128540039 29.7633724212646,-95.3631439208984 32.7820587158203) + # => BOX(-96.8016128540039 29.7633724212646,-95.3631439208984 + # 32.7820587158203) expected = ( -96.8016128540039, 29.7633724212646, @@ -710,7 +714,8 @@ class GeoQuerySetTest(TestCase): Testing the `Union` aggregate. """ tx = Country.objects.get(name="Texas").mpoly - # Houston, Dallas -- Ordering may differ depending on backend or GEOS version. + # Houston, Dallas -- Ordering may differ depending on backend or GEOS + # version. union = GEOSGeometry("MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)") qs = City.objects.filter(point__within=tx) with self.assertRaises(ValueError): diff --git a/tests/gis_tests/geogapp/tests.py b/tests/gis_tests/geogapp/tests.py index cb783b9465..229ce88586 100644 --- a/tests/gis_tests/geogapp/tests.py +++ b/tests/gis_tests/geogapp/tests.py @@ -147,8 +147,8 @@ class GeographyFunctionTests(FuncTestMixin, TestCase): ref_dists = [0, 4899.68, 8081.30, 9115.15] elif connection.ops.spatialite: if connection.ops.spatial_version < (5,): - # SpatiaLite < 5 returns non-zero distance for polygons and points - # covered by that polygon. + # SpatiaLite < 5 returns non-zero distance for polygons and + # points covered by that polygon. ref_dists = [326.61, 4899.68, 8081.30, 9115.15] else: ref_dists = [0, 4899.68, 8081.30, 9115.15] diff --git a/tests/gis_tests/geos_tests/test_geos.py b/tests/gis_tests/geos_tests/test_geos.py index ac67d6ccad..5ec997556b 100644 --- a/tests/gis_tests/geos_tests/test_geos.py +++ b/tests/gis_tests/geos_tests/test_geos.py @@ -280,7 +280,8 @@ class GEOSTest(SimpleTestCase, TestDataMixin): ("POINT EMPTY", "LINESTRING EMPTY", False), # Empty inputs of different dimensions are not equals_identical. ("POINT EMPTY", "POINT Z EMPTY", False), - # Non-empty inputs of different dimensions are not equals_identical. + # Non-empty inputs of different dimensions are not + # equals_identical. ("POINT Z (1 2 3)", "POINT M (1 2 3)", False), ("POINT ZM (1 2 3 4)", "POINT Z (1 2 3)", False), # Inputs with different structure are not equals_identical. @@ -371,7 +372,8 @@ class GEOSTest(SimpleTestCase, TestDataMixin): self.assertEqual(pnt, fromstr(p.wkt)) self.assertIs(pnt == prev, False) # Use assertIs() to test __eq__. - # Making sure that the point's X, Y components are what we expect + # Making sure that the point's X, Y components are what we + # expect self.assertAlmostEqual(p.x, pnt.tuple[0], 9) self.assertAlmostEqual(p.y, pnt.tuple[1], 9) @@ -753,12 +755,14 @@ class GEOSTest(SimpleTestCase, TestDataMixin): # These tests are needed to ensure sanity with writable geometries. - # Getting a polygon with interior rings, and pulling out the interior rings + # Getting a polygon with interior rings, and pulling out the interior + # rings poly = fromstr(self.geometries.polygons[1].wkt) ring1 = poly[0] ring2 = poly[1] - # These deletes should be 'harmless' since they are done on child geometries + # These deletes should be 'harmless' since they are done on child + # geometries del ring1 del ring2 ring1 = poly[0] @@ -776,7 +780,8 @@ class GEOSTest(SimpleTestCase, TestDataMixin): for p in self.geometries.polygons: with self.subTest(p=p): if p.ext_ring_cs: - # Constructing the polygon and getting the coordinate sequence + # Constructing the polygon and getting the coordinate + # sequence poly = fromstr(p.wkt) cs = poly.exterior_ring.coord_seq @@ -791,7 +796,8 @@ class GEOSTest(SimpleTestCase, TestDataMixin): for expected_value, coord_sequence in zip(p.ext_ring_cs, cs): self.assertEqual(expected_value, coord_sequence) - # Construct the test value to set the coordinate sequence with + # Construct the test value to set the coordinate + # sequence with if len(expected_value) == 2: tset = (5, 23) else: @@ -956,8 +962,8 @@ class GEOSTest(SimpleTestCase, TestDataMixin): # Now assuring that each point in the buffer is almost equal for exp_ring, buf_ring in zip(exp_buf, buf, strict=True): for exp_point, buf_point in zip(exp_ring, buf_ring, strict=True): - # Asserting the X, Y of each point are almost equal (due to - # floating point imprecision). + # Asserting the X, Y of each point are almost equal + # (due to floating point imprecision). self.assertAlmostEqual(exp_point[0], buf_point[0], 9) self.assertAlmostEqual(exp_point[1], buf_point[1], 9) @@ -1064,11 +1070,13 @@ class GEOSTest(SimpleTestCase, TestDataMixin): "initialize to LinearRings" ) with self.subTest(p=p): - # Should only be able to use __setitem__ with LinearRing geometries. + # Should only be able to use __setitem__ with LinearRing + # geometries. with self.assertRaisesMessage(TypeError, msg): poly.__setitem__(0, LineString((1, 1), (2, 2))) - # Construct the new shell by adding 500 to every point in the old shell. + # Construct the new shell by adding 500 to every point in the + # old shell. shell_tup = poly.shell.tuple new_coords = [] for point in shell_tup: @@ -1615,8 +1623,8 @@ class GEOSTest(SimpleTestCase, TestDataMixin): def test_subclassing(self): """ - GEOSGeometry subclass may itself be subclassed without being forced-cast - to the parent class during `__init__`. + GEOSGeometry subclass may itself be subclassed without being + forced-cast to the parent class during `__init__`. """ class ExtendedPolygon(Polygon): @@ -1629,7 +1637,8 @@ class GEOSTest(SimpleTestCase, TestDataMixin): ext_poly = ExtendedPolygon(((0, 0), (0, 1), (1, 1), (0, 0)), data=3) self.assertEqual(type(ext_poly), ExtendedPolygon) - # ExtendedPolygon.__str__ should be called (instead of Polygon.__str__). + # ExtendedPolygon.__str__ should be called (instead of + # Polygon.__str__). self.assertEqual( str(ext_poly), "EXT_POLYGON - data: 3 - POLYGON ((0 0, 0 1, 1 1, 0 0))" ) diff --git a/tests/gis_tests/inspectapp/tests.py b/tests/gis_tests/inspectapp/tests.py index ca5d5214dd..00a3507b59 100644 --- a/tests/gis_tests/inspectapp/tests.py +++ b/tests/gis_tests/inspectapp/tests.py @@ -118,8 +118,8 @@ class OGRInspectTest(SimpleTestCase): self.skipTest("Unable to setup an OGR connection to your database") try: - # Writing shapefiles via GDAL currently does not support writing OGRTime - # fields, so we need to actually use a database + # Writing shapefiles via GDAL currently does not support writing + # OGRTime fields, so we need to actually use a database model_def = ogrinspect( ogr_db, "Measurement", @@ -143,7 +143,8 @@ class OGRInspectTest(SimpleTestCase): # The ordering of model fields might vary depending on several factors # (version of GDAL, etc.). if connection.vendor == "sqlite" and GDAL_VERSION < (3, 4): - # SpatiaLite introspection is somewhat lacking on GDAL < 3.4 (#29461). + # SpatiaLite introspection is somewhat lacking on GDAL < 3.4 + # (#29461). self.assertIn(" f_decimal = models.CharField(max_length=0)", model_def) else: self.assertIn( @@ -205,8 +206,8 @@ def get_ogr_db_string(): """ db = connections.settings["default"] - # Map from the django backend into the OGR driver name and database identifier - # https://gdal.org/drivers/vector/ + # Map from the django backend into the OGR driver name and database + # identifier https://gdal.org/drivers/vector/ # # TODO: Support Oracle (OCI). drivers = { diff --git a/tests/gis_tests/layermap/tests.py b/tests/gis_tests/layermap/tests.py index c5ed84d39c..c590defbd0 100644 --- a/tests/gis_tests/layermap/tests.py +++ b/tests/gis_tests/layermap/tests.py @@ -130,7 +130,8 @@ class LayerMapTest(TestCase): self.assertEqual(Decimal(str(feat["Length"])), istate.length) elif feat.fid == 1: # Everything but the first two decimal digits were truncated, - # because the Interstate model's `length` field has decimal_places=2. + # because the Interstate model's `length` field has + # decimal_places=2. self.assertAlmostEqual(feat.get("Length"), float(istate.length), 2) for p1, p2 in zip(feat.geom, istate.path): @@ -138,7 +139,9 @@ class LayerMapTest(TestCase): self.assertAlmostEqual(p1[1], p2[1], 6) def county_helper(self, county_feat=True): - "Helper function for ensuring the integrity of the mapped County models." + """ + Helper function for ensuring the integrity of the mapped County models. + """ for name, n, st in zip(NAMES, NUMS, STATES): # Should only be one record b/c of `unique` keyword. c = County.objects.get(name=name) @@ -157,10 +160,12 @@ class LayerMapTest(TestCase): """ # All the following should work. - # Telling LayerMapping that we want no transformations performed on the data. + # Telling LayerMapping that we want no transformations performed on the + # data. lm = LayerMapping(County, co_shp, co_mapping, transform=False) - # Specifying the source spatial reference system via the `source_srs` keyword. + # Specifying the source spatial reference system via the `source_srs` + # keyword. lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269) lm = LayerMapping(County, co_shp, co_mapping, source_srs="NAD83") @@ -179,13 +184,14 @@ class LayerMapTest(TestCase): with self.assertRaises(e): LayerMapping(County, co_shp, co_mapping, transform=False, unique=arg) - # No source reference system defined in the shapefile, should raise an error. + # No source reference system defined in the shapefile, should raise an + # error. if connection.features.supports_transform: with self.assertRaises(LayerMapError): LayerMapping(County, co_shp, co_mapping) - # Passing in invalid ForeignKey mapping parameters -- must be a dictionary - # mapping for the model the ForeignKey points to. + # Passing in invalid ForeignKey mapping parameters -- must be a + # dictionary mapping for the model the ForeignKey points to. bad_fk_map1 = copy(co_mapping) bad_fk_map1["state"] = "name" bad_fk_map2 = copy(co_mapping) @@ -195,9 +201,9 @@ class LayerMapTest(TestCase): with self.assertRaises(LayerMapError): LayerMapping(County, co_shp, bad_fk_map2, transform=False) - # There exist no State models for the ForeignKey mapping to work -- should raise - # a MissingForeignKey exception (this error would be ignored if the `strict` - # keyword is not set). + # There exist no State models for the ForeignKey mapping to work -- + # should raise a MissingForeignKey exception (this error would be + # ignored if the `strict` keyword is not set). lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique="name") with self.assertRaises(MissingForeignKey): lm.save(silent=True, strict=True) @@ -208,12 +214,13 @@ class LayerMapTest(TestCase): ) # If a mapping is specified as a collection, all OGR fields that - # are not collections will be converted into them. For example, - # a Point column would be converted to MultiPoint. Other things being done + # are not collections will be converted into them. For example, a Point + # column would be converted to MultiPoint. Other things being done # w/the keyword args: # `transform=False`: Specifies that no transform is to be done; this - # has the effect of ignoring the spatial reference check (because the - # county shapefile does not have implicit spatial reference info). + # has the effect of ignoring the spatial reference check (because + # the county shapefile does not have implicit spatial reference + # info). # # `unique='name'`: Creates models on the condition that they have # unique county names; geometries from each feature however will be @@ -223,8 +230,8 @@ class LayerMapTest(TestCase): lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique="name") lm.save(silent=True, strict=True) - # A reference that doesn't use the unique keyword; a new database record will - # created for each polygon. + # A reference that doesn't use the unique keyword; a new database + # record will created for each polygon. lm = LayerMapping(CountyFeat, co_shp, cofeat_mapping, transform=False) lm.save(silent=True, strict=True) diff --git a/tests/gis_tests/relatedapp/tests.py b/tests/gis_tests/relatedapp/tests.py index 86d7488341..34dc2bba15 100644 --- a/tests/gis_tests/relatedapp/tests.py +++ b/tests/gis_tests/relatedapp/tests.py @@ -51,7 +51,8 @@ class RelatedGeoModelTest(TestCase): e3 = aggs["location__point__extent"] # The tolerance value is to four decimal places because of differences - # between the Oracle and PostGIS spatial backends on the extent calculation. + # between the Oracle and PostGIS spatial backends on the extent + # calculation. tol = 4 for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]: for ref_val, e_val in zip(ref, e): @@ -85,8 +86,8 @@ class RelatedGeoModelTest(TestCase): p5 = Point(-95.363151, 29.763374) # The second union aggregate is for a union - # query that includes limiting information in the WHERE clause (in other - # words a `.filter()` precedes the call to `.aggregate(Union()`). + # query that includes limiting information in the WHERE clause (in + # other words a `.filter()` precedes the call to `.aggregate(Union()`). ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326) ref_u2 = MultiPoint(p2, p3, srid=4326) @@ -187,8 +188,8 @@ class RelatedGeoModelTest(TestCase): # Incrementing through each of the models, dictionaries, and tuples # returned by each QuerySet. for m, d, t in zip(gqs, gvqs, gvlqs): - # The values should be Geometry objects and not raw strings returned - # by the spatial database. + # The values should be Geometry objects and not raw strings + # returned by the spatial database. self.assertIsInstance(d["point"], GEOSGeometry) self.assertIsInstance(t[1], GEOSGeometry) self.assertEqual(m.point, d["point"]) @@ -208,7 +209,10 @@ class RelatedGeoModelTest(TestCase): self.assertEqual(loc.point, def_loc.point) def test09_pk_relations(self): - "Ensuring correct primary key column is selected across relations. See #10757." + """ + Ensuring correct primary key column is selected across relations. See + #10757. + """ # The expected ID values -- notice the last two location IDs # are out of order. Dallas and Houston have location IDs that differ # from their PKs -- this is done to ensure that the related location @@ -426,13 +430,16 @@ class RelatedGeoModelTest(TestCase): select_related on the related name manager of a unique FK. """ qs = Article.objects.select_related("author__article") - # This triggers TypeError when `get_default_columns` has no `local_only` - # keyword. The TypeError is swallowed if QuerySet is actually - # evaluated as list generation swallows TypeError in CPython. + # This triggers TypeError when `get_default_columns` has no + # `local_only` keyword. The TypeError is swallowed if QuerySet is + # actually evaluated as list generation swallows TypeError in CPython. str(qs.query) def test16_annotated_date_queryset(self): - "Ensure annotated date querysets work if spatial backend is used. See #14648." + """ + Ensure annotated date querysets work if spatial backend is used. See + #14648. + """ birth_years = [ dt.year for dt in list( diff --git a/tests/gis_tests/test_geoforms.py b/tests/gis_tests/test_geoforms.py index 23f94edd0e..2a1f585906 100644 --- a/tests/gis_tests/test_geoforms.py +++ b/tests/gis_tests/test_geoforms.py @@ -34,7 +34,8 @@ class GeometryFieldTest(SimpleTestCase): xform_geom = GEOSGeometry( "POINT (951640.547328465 4219369.26171664)", srid=32140 ) - # The cleaned geometry is transformed to 32140 (the widget map_srid is 3857). + # The cleaned geometry is transformed to 32140 (the widget map_srid is + # 3857). cleaned_geom = fld.clean( "SRID=3857;POINT (-10615777.40976205 3473169.895707852)" ) @@ -73,7 +74,8 @@ class GeometryFieldTest(SimpleTestCase): GEOSGeometry("POINT(5 23)", srid=pnt_fld.widget.map_srid), pnt_fld.clean("POINT(5 23)"), ) - # a WKT for any other geom_type will be properly transformed by `to_python` + # a WKT for any other geom_type will be properly transformed by + # `to_python` self.assertEqual( GEOSGeometry("LINESTRING(0 0, 1 1)", srid=pnt_fld.widget.map_srid), pnt_fld.to_python("LINESTRING(0 0, 1 1)"), diff --git a/tests/gis_tests/test_ptr.py b/tests/gis_tests/test_ptr.py index a09679f5b9..cfe58ae1a8 100644 --- a/tests/gis_tests/test_ptr.py +++ b/tests/gis_tests/test_ptr.py @@ -37,9 +37,9 @@ class CPointerBaseTests(SimpleTestCase): fg.ptr # Anything that's either not None or the acceptable pointer type - # results in a TypeError when trying to assign it to the `ptr` property. - # Thus, memory addresses (integers) and pointers of the incorrect type - # (in `bad_ptrs`) aren't allowed. + # results in a TypeError when trying to assign it to the `ptr` + # property. Thus, memory addresses (integers) and pointers of the + # incorrect type (in `bad_ptrs`) aren't allowed. bad_ptrs = (5, ctypes.c_char_p(b"foobar")) for bad_ptr in bad_ptrs: for fg in (fg1, fg2): diff --git a/tests/gis_tests/test_spatialrefsys.py b/tests/gis_tests/test_spatialrefsys.py index d936ac8c89..e49db86d50 100644 --- a/tests/gis_tests/test_spatialrefsys.py +++ b/tests/gis_tests/test_spatialrefsys.py @@ -10,7 +10,8 @@ test_srs = ( "srid": 4326, "auth_name": ("EPSG", True), "auth_srid": 4326, - # Only the beginning, because there are differences depending on installed libs + # Only the beginning, because there are differences depending on + # installed libs "srtext": 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84"', "proj_re": ( r"\+proj=longlat (\+datum=WGS84 |\+towgs84=0,0,0,0,0,0,0 )\+no_defs ?" diff --git a/tests/handlers/tests.py b/tests/handlers/tests.py index e73fc15195..83dfd95713 100644 --- a/tests/handlers/tests.py +++ b/tests/handlers/tests.py @@ -58,7 +58,9 @@ class HandlerTests(SimpleTestCase): self.assertEqual(got, ["café", "café", "caf\ufffd", "café"]) def test_non_ascii_cookie(self): - """Non-ASCII cookies set in JavaScript are properly decoded (#20557).""" + """ + Non-ASCII cookies set in JavaScript are properly decoded (#20557). + """ environ = self.request_factory.get("/").environ raw_cookie = 'want="café"'.encode("utf-8").decode("iso-8859-1") environ["HTTP_COOKIE"] = raw_cookie diff --git a/tests/httpwrappers/tests.py b/tests/httpwrappers/tests.py index f85d33e823..f1caec6b71 100644 --- a/tests/httpwrappers/tests.py +++ b/tests/httpwrappers/tests.py @@ -320,8 +320,8 @@ class HttpResponseTests(SimpleTestCase): self.assertEqual(r.headers["key"], "=?utf-8?b?4oCg?=") self.assertIn(b"=?utf-8?b?4oCg?=", r.serialize_headers()) - # The response also converts string or bytes keys to strings, but requires - # them to contain ASCII + # The response also converts string or bytes keys to strings, but + # requires them to contain ASCII r = HttpResponse() del r.headers["Content-Type"] r.headers["foo"] = "bar" @@ -350,8 +350,8 @@ class HttpResponseTests(SimpleTestCase): f = b"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz a\xcc\x88" f = f.decode("utf-8") h.headers["Content-Disposition"] = 'attachment; filename="%s"' % f - # This one is triggering https://bugs.python.org/issue20747, that is Python - # will itself insert a newline in the header + # This one is triggering https://bugs.python.org/issue20747, that is + # Python will itself insert a newline in the header h.headers["Content-Disposition"] = ( 'attachment; filename="EdelRot_Blu\u0308te (3)-0.JPG"' ) diff --git a/tests/humanize_tests/tests.py b/tests/humanize_tests/tests.py index 7a44c6d89b..b9f1e58ef7 100644 --- a/tests/humanize_tests/tests.py +++ b/tests/humanize_tests/tests.py @@ -610,8 +610,8 @@ class HumanizeTests(SimpleTestCase): def test_inflection_for_timedelta(self): """ - Translation of '%d day'/'%d month'/… may differ depending on the context - of the string it is inserted in. + Translation of '%d day'/'%d month'/… may differ depending on the + context of the string it is inserted in. """ test_list = [ # "%(delta)s ago" translations diff --git a/tests/i18n/sampleproject/update_catalogs.py b/tests/i18n/sampleproject/update_catalogs.py index 8780f629e4..af8953c312 100755 --- a/tests/i18n/sampleproject/update_catalogs.py +++ b/tests/i18n/sampleproject/update_catalogs.py @@ -13,7 +13,8 @@ blocktranslate. This issue is difficult to debug, it could be a problem with extraction, interpolation, or both. How this script helps: - * Add {% translate "Two %% Three %%%" %} and blocktranslate equivalent to templates. + * Add {% translate "Two %% Three %%%" %} and blocktranslate equivalent to + templates. * Run this script. * Test extraction - verify the new msgid in sampleproject's django.po. * Add a translation to sampleproject's django.po. diff --git a/tests/i18n/test_compilation.py b/tests/i18n/test_compilation.py index 3a57dbf076..e99e3e4acc 100644 --- a/tests/i18n/test_compilation.py +++ b/tests/i18n/test_compilation.py @@ -199,18 +199,18 @@ class IgnoreDirectoryCompilationTests(MessageCompilationTests): def test_no_dirs_accidentally_skipped(self): os_walk_results = [ - # To discover .po filepaths, compilemessages uses with a starting list of - # basedirs to inspect, which in this scenario are: + # To discover .po filepaths, compilemessages uses with a starting + # list of basedirs to inspect, which in this scenario are: # ["conf/locale", "locale"] - # Then os.walk is used to discover other locale dirs, ignoring dirs matching - # `ignore_patterns`. Mock the results to place an ignored directory directly - # before and after a directory named "locale". + # Then os.walk is used to discover other locale dirs, ignoring dirs + # matching `ignore_patterns`. Mock the results to place an ignored + # directory directly before and after a directory named "locale". [("somedir", ["ignore", "locale", "ignore"], [])], # This will result in three basedirs discovered: # ["conf/locale", "locale", "somedir/locale"] - # os.walk is called for each locale in each basedir looking for .po files. - # In this scenario, we need to mock os.walk results for "en", "fr", and "it" - # locales for each basedir: + # os.walk is called for each locale in each basedir looking for .po + # files. In this scenario, we need to mock os.walk results for + # "en", "fr", and "it" locales for each basedir: [("exclude/locale/LC_MESSAGES", [], ["en.po"])], [("exclude/locale/LC_MESSAGES", [], ["fr.po"])], [("exclude/locale/LC_MESSAGES", [], ["it.po"])], @@ -277,8 +277,9 @@ class CompilationErrorHandling(MessageCompilationTests): call_command("compilemessages", locale=["ja"], verbosity=0) def test_msgfmt_error_including_non_ascii(self): - # po file contains invalid msgstr content (triggers non-ascii error content). - # Make sure the output of msgfmt is unaffected by the current locale. + # po file contains invalid msgstr content (triggers non-ascii error + # content). Make sure the output of msgfmt is unaffected by the current + # locale. env = os.environ.copy() env.update({"LC_ALL": "C"}) with mock.patch( diff --git a/tests/i18n/test_extraction.py b/tests/i18n/test_extraction.py index e4a6260c33..b02b24ab5b 100644 --- a/tests/i18n/test_extraction.py +++ b/tests/i18n/test_extraction.py @@ -101,7 +101,8 @@ class ExtractorTests(POFileAssertionMixin, RunInTmpDirMixin, SimpleTestCase): def assertLocationCommentPresent(self, po_filename, line_number, *comment_parts): r""" - self.assertLocationCommentPresent('django.po', 42, 'dirA', 'dirB', 'foo.py') + self.assertLocationCommentPresent('django.po', 42, 'dirA', 'dirB', + 'foo.py') verifies that the django.po file has a gettext-style location comment of the form @@ -368,7 +369,9 @@ class BasicExtractorTests(ExtractorTests): management.call_command("makemessages", locale=[LOCALE], verbosity=0) def test_extraction_warning(self): - """test xgettext warning about multiple bare interpolation placeholders""" + """ + test xgettext warning about multiple bare interpolation placeholders + """ shutil.copyfile("./code.sample", "./code_sample.py") out = StringIO() management.call_command("makemessages", locale=[LOCALE], stdout=out) @@ -438,7 +441,9 @@ class BasicExtractorTests(ExtractorTests): ) def test_template_comments(self): - """Template comment tags on the same line of other constructs (#19552)""" + """ + Template comment tags on the same line of other constructs (#19552) + """ # Test detection/end user reporting of old, incorrect templates # translator comments syntax with warnings.catch_warnings(record=True) as ws: @@ -656,7 +661,8 @@ class JavaScriptExtractorTests(ExtractorTests): def test_i18n_catalog_not_ignored_when_not_invoked_for_django(self): # Create target file so it exists in the filesystem but is NOT ignored. - # "invoked_for_django" is False when "conf/locale" folder does not exist. + # "invoked_for_django" is False when "conf/locale" folder does not + # exist. self.assertIs(os.path.exists(os.path.join("conf", "locale")), False) i18n_catalog_js = os.path.join("views", "templates", "i18n_catalog.js") os.makedirs(os.path.dirname(i18n_catalog_js)) @@ -757,9 +763,9 @@ class CopyPluralFormsExtractorTests(ExtractorTests): def test_translate_and_plural_blocktranslate_collision(self): """ - Ensures a correct workaround for the gettext bug when handling a literal - found inside a {% translate %} tag and also in another file inside a - {% blocktranslate %} with a plural (#17375). + Ensures a correct workaround for the gettext bug when handling a + literal found inside a {% translate %} tag and also in another file + inside a {% blocktranslate %} with a plural (#17375). """ management.call_command( "makemessages", locale=[LOCALE], extensions=["html", "djtpl"], verbosity=0 @@ -810,7 +816,9 @@ class NoWrapExtractorTests(ExtractorTests): class LocationCommentsTests(ExtractorTests): def test_no_location_enabled(self): - """Behavior is correct if --no-location switch is specified. See #16903.""" + """ + Behavior is correct if --no-location switch is specified. See #16903. + """ management.call_command( "makemessages", locale=[LOCALE], verbosity=0, no_location=True ) @@ -823,7 +831,8 @@ class LocationCommentsTests(ExtractorTests): "makemessages", locale=[LOCALE], verbosity=0, no_location=False ) self.assertTrue(os.path.exists(self.PO_FILE)) - # #16903 -- Standard comment with source file relative path should be present + # #16903 -- Standard comment with source file relative path should be + # present self.assertLocationCommentPresent( self.PO_FILE, "Translatable literal #6b", "templates", "test.html" ) @@ -942,7 +951,8 @@ class ExcludedLocaleExtractionTests(ExtractorTests): def _set_times_for_all_po_files(self): """ - Set access and modification times to the Unix epoch time for all the .po files. + Set access and modification times to the Unix epoch time for all the + .po files. """ for locale in self.LOCALES: os.utime(self.PO_FILE % locale, (0, 0)) @@ -1010,7 +1020,8 @@ class CustomLayoutExtractionTests(ExtractorTests): def _test_project_locale_paths(self, locale_path): """ - * translations for an app containing a locale folder are stored in that folder + * translations for an app containing a locale folder are stored in that + folder * translations outside of that app are in LOCALE_PATHS[0] """ with override_settings(LOCALE_PATHS=[locale_path]): diff --git a/tests/i18n/tests.py b/tests/i18n/tests.py index 1f50ba1112..b4bdf160d6 100644 --- a/tests/i18n/tests.py +++ b/tests/i18n/tests.py @@ -92,7 +92,8 @@ class TranslationTests(SimpleTestCase): @translation.override("fr") def test_plural(self): """ - Test plurals with ngettext. French differs from English in that 0 is singular. + Test plurals with ngettext. French differs from English in that 0 is + singular. """ self.assertEqual( ngettext("%(num)d year", "%(num)d years", 0) % {"num": 0}, @@ -119,9 +120,9 @@ class TranslationTests(SimpleTestCase): @translation.override("fr") def test_multiple_plurals_per_language(self): """ - Normally, French has 2 plurals. As other/locale/fr/LC_MESSAGES/django.po - has a different plural equation with 3 plurals, this tests if those - plural are honored. + Normally, French has 2 plurals. As + other/locale/fr/LC_MESSAGES/django.po has a different plural equation + with 3 plurals, this tests if those plural are honored. """ self.assertEqual(ngettext("%d singular", "%d plural", 0) % 0, "0 pluriel1") self.assertEqual(ngettext("%d singular", "%d plural", 1) % 1, "1 singulier") @@ -415,8 +416,8 @@ class TranslationTests(SimpleTestCase): @override_settings(LOCALE_PATHS=extended_locale_paths) def test_safe_status(self): """ - Translating a string requiring no auto-escaping with gettext or pgettext - shouldn't change the "safe" status. + Translating a string requiring no auto-escaping with gettext or + pgettext shouldn't change the "safe" status. """ trans_real._active = Local() trans_real._translations = {} @@ -1278,8 +1279,8 @@ class FormattingTests(SimpleTestCase): self.assertEqual(sanitize_separators(123), 123) with translation.override("ru", deactivate=True): - # Russian locale has non-breaking space (\xa0) as thousand separator - # Usual space is accepted too when sanitizing inputs + # Russian locale has non-breaking space (\xa0) as thousand + # separator Usual space is accepted too when sanitizing inputs with self.settings(USE_THOUSAND_SEPARATOR=True): self.assertEqual(sanitize_separators("1\xa0234\xa0567"), "1234567") self.assertEqual(sanitize_separators("77\xa0777,777"), "77777.777") @@ -1347,7 +1348,8 @@ class FormattingTests(SimpleTestCase): def test_iter_format_modules_stability(self): """ Tests the iter_format_modules function always yields format modules in - a stable and correct order in presence of both base ll and ll_CC formats. + a stable and correct order in presence of both base ll and ll_CC + formats. """ en_format_mod = import_module("django.conf.locale.en.formats") en_gb_format_mod = import_module("django.conf.locale.en_GB.formats") @@ -1364,7 +1366,8 @@ class FormattingTests(SimpleTestCase): def test_localize_templatetag_and_filter(self): """ - Test the {% localize %} templatetag and the localize/unlocalize filters. + Test the {% localize %} templatetag and the localize/unlocalize + filters. """ context = Context( {"int": 1455, "float": 3.14, "date": datetime.date(2016, 12, 31)} @@ -1618,11 +1621,11 @@ class MiscTests(SimpleTestCase): ) def test_support_for_deprecated_chinese_language_codes(self): """ - Some browsers (Firefox, IE, etc.) use deprecated language codes. As these - language codes will be removed in Django 1.9, these will be incorrectly - matched. For example zh-tw (traditional) will be interpreted as zh-hans - (simplified), which is wrong. So we should also accept these deprecated - language codes. + Some browsers (Firefox, IE, etc.) use deprecated language codes. As + these language codes will be removed in Django 1.9, these will be + incorrectly matched. For example zh-tw (traditional) will be + interpreted as zh-hans (simplified), which is wrong. So we should also + accept these deprecated language codes. refs #18419 -- this is explicitly for browser compatibility """ @@ -1915,8 +1918,8 @@ class TestLanguageInfo(SimpleTestCase): def test_fallback_language_code(self): """ - get_language_info return the first fallback language info if the lang_info - struct does not contain the 'name' key. + get_language_info return the first fallback language info if the + lang_info struct does not contain the 'name' key. """ li = get_language_info("zh-my") self.assertEqual(li["code"], "zh-hans") @@ -1963,7 +1966,8 @@ class UnprefixedDefaultLanguageTests(SimpleTestCase): def test_default_lang_without_prefix(self): """ With i18n_patterns(..., prefix_default_language=False), the default - language (settings.LANGUAGE_CODE) should be accessible without a prefix. + language (settings.LANGUAGE_CODE) should be accessible without a + prefix. """ response = self.client.get("/simple/") self.assertEqual(response.content, b"Yes") @@ -1990,8 +1994,8 @@ class UnprefixedDefaultLanguageTests(SimpleTestCase): def test_no_redirect_on_404(self): """ A request for a nonexistent URL shouldn't cause a redirect to - /<default_language>/<request_url> when prefix_default_language=False and - /<default_language>/<request_url> has a URL match (#27402). + /<default_language>/<request_url> when prefix_default_language=False + and /<default_language>/<request_url> has a URL match (#27402). """ # A match for /group1/group2/ must exist for this to act as a # regression test. diff --git a/tests/inline_formsets/tests.py b/tests/inline_formsets/tests.py index 0fe9766dc6..eaabc350b4 100644 --- a/tests/inline_formsets/tests.py +++ b/tests/inline_formsets/tests.py @@ -57,8 +57,8 @@ class DeletionTests(TestCase): def test_change_form_deletion_when_invalid(self): """ - Make sure that a change form that is filled out, but marked for deletion - doesn't cause validation errors. + Make sure that a change form that is filled out, but marked for + deletion doesn't cause validation errors. """ PoemFormSet = inlineformset_factory( Poet, Poem, can_delete=True, fields="__all__" diff --git a/tests/inspectdb/tests.py b/tests/inspectdb/tests.py index 35cabd346f..c554488c10 100644 --- a/tests/inspectdb/tests.py +++ b/tests/inspectdb/tests.py @@ -300,7 +300,9 @@ class InspectDBTestCase(TestCase): ) def test_digits_column_name_introspection(self): - """Introspection of column names consist/start with digits (#16536/#17676)""" + """ + Introspection of column names consist/start with digits (#16536/#17676) + """ char_field_type = connection.features.introspected_field_types["CharField"] out = StringIO() call_command("inspectdb", "inspectdb_digitsincolumnname", stdout=out) diff --git a/tests/lookup/tests.py b/tests/lookup/tests.py index e89316a270..25336cbee7 100644 --- a/tests/lookup/tests.py +++ b/tests/lookup/tests.py @@ -174,7 +174,8 @@ class LookupTests(TestCase): ) def test_in_bulk(self): - # in_bulk() takes a list of IDs and returns a dictionary mapping IDs to objects. + # in_bulk() takes a list of IDs and returns a dictionary mapping IDs to + # objects. arts = Article.objects.in_bulk([self.a1.id, self.a2.id]) self.assertEqual(arts[self.a1.id], self.a1) self.assertEqual(arts[self.a2.id], self.a2) @@ -375,7 +376,8 @@ class LookupTests(TestCase): {"headline": "Article 1", "id": self.a1.id}, ], ) - # The values() method works with "extra" fields specified in extra(select). + # The values() method works with "extra" fields specified in + # extra(select). self.assertSequenceEqual( Article.objects.extra(select={"id_plus_one": "id + 1"}).values( "id", "id_plus_one" @@ -415,7 +417,8 @@ class LookupTests(TestCase): } ], ) - # You can specify fields from forward and reverse relations, just like filter(). + # You can specify fields from forward and reverse relations, just like + # filter(). self.assertSequenceEqual( Article.objects.values("headline", "author__name"), [ @@ -660,8 +663,9 @@ class LookupTests(TestCase): ) def test_escaping(self): - # Underscores, percent signs and backslashes have special meaning in the - # underlying SQL code, but Django handles the quoting of them automatically. + # Underscores, percent signs and backslashes have special meaning in + # the underlying SQL code, but Django handles the quoting of them + # automatically. a8 = Article.objects.create( headline="Article_ with underscore", pub_date=datetime(2005, 11, 20) ) diff --git a/tests/m2m_regress/models.py b/tests/m2m_regress/models.py index 0e6cec3b6a..43addfd23f 100644 --- a/tests/m2m_regress/models.py +++ b/tests/m2m_regress/models.py @@ -39,7 +39,8 @@ class Entry(models.Model): return self.name -# Two models both inheriting from a base model with a self-referential m2m field +# Two models both inheriting from a base model with a self-referential m2m +# field class SelfReferChild(SelfRefer): pass @@ -48,7 +49,8 @@ class SelfReferChildSibling(SelfRefer): pass -# Many-to-Many relation between models, where one of the PK's isn't an Autofield +# Many-to-Many relation between models, where one of the PK's isn't an +# Autofield class Line(models.Model): name = models.CharField(max_length=100) diff --git a/tests/m2m_through/tests.py b/tests/m2m_through/tests.py index 81a47a2083..d89d89ed8f 100644 --- a/tests/m2m_through/tests.py +++ b/tests/m2m_through/tests.py @@ -408,7 +408,8 @@ class M2mThroughReferentialTests(TestCase): self.assertQuerySetEqual(chris.friends.all(), []) - # Since this isn't a symmetrical relation, Tony's friend link still exists. + # Since this isn't a symmetrical relation, Tony's friend link still + # exists. self.assertQuerySetEqual(tony.friends.all(), ["Chris"], attrgetter("name")) def test_self_referential_non_symmetrical_both(self): diff --git a/tests/mail/tests.py b/tests/mail/tests.py index bab1b4be83..2bf3890941 100644 --- a/tests/mail/tests.py +++ b/tests/mail/tests.py @@ -167,7 +167,8 @@ class MailTestsMixin: def assertEndsWith(self, first, second): if not first.endswith(second): # Use assertEqual() for failure message with diffs. If first value - # is much longer than second, truncate start and prepend an ellipsis. + # is much longer than second, truncate start and prepend an + # ellipsis. self.longMessage = True max_len = len(second) + self.START_END_EXTRA_CONTEXT end_of_first = ( @@ -249,7 +250,8 @@ class MailTests(MailTestsMixin, SimpleTestCase): @mock.patch("django.core.mail.message.MIMEText.set_payload") def test_nonascii_as_string_with_ascii_charset(self, mock_set_payload): - """Line length check should encode the payload supporting `surrogateescape`. + """Line length check should encode the payload supporting + `surrogateescape`. Following https://github.com/python/cpython/issues/76511, newer versions of Python (3.12.3 and 3.13) ensure that a message's @@ -260,7 +262,8 @@ class MailTests(MailTestsMixin, SimpleTestCase): Line length checks in SafeMIMEText's set_payload should also use the same error handling strategy to avoid errors such as: - UnicodeEncodeError: 'utf-8' codec can't encode <...>: surrogates not allowed + UnicodeEncodeError: 'utf-8' codec can't encode <...>: surrogates not + allowed """ # This test is specific to Python's legacy MIMEText. This can be safely @@ -965,8 +968,9 @@ class MailTests(MailTestsMixin, SimpleTestCase): self.assertEqual(email.attachments[0].content, expected_content) # Check attachments in the generated message. - # (The actual content is not checked as variations in platform - # line endings and rfc822 refolding complicate the logic.) + # (The actual content is not checked as variations in + # platform line endings and rfc822 refolding complicate the + # logic.) attachments = self.get_decoded_attachments(email) self.assertEqual(len(attachments), 1) actual = attachments[0] @@ -979,7 +983,8 @@ class MailTests(MailTestsMixin, SimpleTestCase): if possible and changes to DEFAULT_ATTACHMENT_MIME_TYPE if not. """ email = EmailMessage() - # Mimetype guessing identifies these as text/plain from the .txt extensions. + # Mimetype guessing identifies these as text/plain from the .txt + # extensions. email.attach("utf8.txt", "ütƒ-8\n".encode()) email.attach("not-utf8.txt", b"\x86unknown-encoding\n") attachments = self.get_decoded_attachments(email) @@ -1093,8 +1098,9 @@ class MailTests(MailTestsMixin, SimpleTestCase): def test_attach_rfc822_message(self): """ - EmailMessage.attach() docs: "If you specify a mimetype of message/rfc822, - it will also accept django.core.mail.EmailMessage and email.message.Message." + EmailMessage.attach() docs: "If you specify a mimetype of + message/rfc822, it will also accept django.core.mail.EmailMessage and + email.message.Message." """ # django.core.mail.EmailMessage django_email = EmailMessage("child subject", "child body") @@ -1124,8 +1130,9 @@ class MailTests(MailTestsMixin, SimpleTestCase): self.assertIsInstance(email.attachments[0], EmailAttachment) self.assertEqual(email.attachments[0].mimetype, "message/rfc822") - # Make sure it is serialized correctly: a message/rfc822 attachment - # whose "body" content (payload) is the "encapsulated" (child) message. + # Make sure it is serialized correctly: a message/rfc822 + # attachment whose "body" content (payload) is the + # "encapsulated" (child) message. attachments = self.get_raw_attachments(email) self.assertEqual(len(attachments), 1) rfc822_attachment = attachments[0] @@ -1141,9 +1148,9 @@ class MailTests(MailTestsMixin, SimpleTestCase): cte = rfc822_attachment.get("Content-Transfer-Encoding", "7bit") self.assertIn(cte, ("7bit", "8bit", "binary")) - # Any properly declared CTE is allowed for the attached message itself - # (including quoted-printable or base64). For the plain ASCII content - # in this test, we'd expect 7bit. + # Any properly declared CTE is allowed for the attached message + # itself (including quoted-printable or base64). For the plain + # ASCII content in this test, we'd expect 7bit. child_cte = attached_message.get("Content-Transfer-Encoding", "7bit") self.assertEqual(child_cte, "7bit") self.assertEqual(attached_message.get_content_type(), "text/plain") @@ -1302,10 +1309,10 @@ class MailTests(MailTestsMixin, SimpleTestCase): s = msg.message().as_bytes() self.assertIn(b"Content-Transfer-Encoding: 8bit", s) - # Long body lines that require folding should use quoted-printable or base64, - # whichever is shorter. However, Python's legacy email API avoids re-folding - # non-ASCII text and just uses CTE 8bit. (The modern API would correctly choose - # base64 here. Any of these is deliverable.) + # Long body lines that require folding should use quoted-printable or + # base64, whichever is shorter. However, Python's legacy email API + # avoids re-folding non-ASCII text and just uses CTE 8bit. (The modern + # API would correctly choose base64 here. Any of these is deliverable.) msg = EmailMessage( body=( "Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.\n" @@ -1435,7 +1442,8 @@ class MailTests(MailTestsMixin, SimpleTestCase): # EmailMessage.message() will not catch these cases, as it only calls # sanitize_address() if an address also includes non-ASCII chars. # Django detects these cases in the SMTP EmailBackend during sending. - # See SMTPBackendTests.test_avoids_sending_to_invalid_addresses() below. + # See SMTPBackendTests.test_avoids_sending_to_invalid_addresses() + # below. for email_address in ( # Invalid address with two @ signs. "to@other.com@example.com", @@ -1562,8 +1570,9 @@ class MailTests(MailTestsMixin, SimpleTestCase): for header in headers: for email_address in cases: with self.subTest(header=header, email_address=email_address): - # Construct an EmailMessage with header set to email_address. - # Specific constructor params vary by header. + # Construct an EmailMessage with header set to + # email_address. Specific constructor params vary by + # header. if header == "From": email = EmailMessage(from_email=email_address) elif header in ("To", "Cc", "Bcc", "Reply-To"): @@ -1740,8 +1749,8 @@ class MailTests(MailTestsMixin, SimpleTestCase): def test_positional_arguments_order(self): """ - EmailMessage class docs: "… is initialized with the following parameters - (in the given order, if positional arguments are used)." + EmailMessage class docs: "… is initialized with the following + parameters (in the given order, if positional arguments are used)." """ connection = mail.get_connection() email = EmailMessage( @@ -2064,10 +2073,10 @@ class BaseEmailBackendTests(MailTestsMixin): def test_send_long_lines(self): """ - Email line length is limited to 998 chars by the RFC 5322 Section 2.1.1. - A message body containing longer lines is converted to quoted-printable - or base64 (whichever is shorter), to avoid having to insert newlines - in a way that alters the intended text. + Email line length is limited to 998 chars by the RFC 5322 Section + 2.1.1. A message body containing longer lines is converted to + quoted-printable or base64 (whichever is shorter), to avoid having to + insert newlines in a way that alters the intended text. """ # Django with Python's legacy email API uses quoted-printable for both # cases below. Python's modern API would prefer shorter base64 for the @@ -2270,8 +2279,9 @@ class BaseEmailBackendTests(MailTestsMixin): gettext_lazy("test@example.com"), # RemovedInDjango70Warning: uncomment these cases when support for # deprecated (name, address) tuples is removed. - # [("nobody", "nobody@example.com"), ("other", "other@example.com")], - # [["nobody", "nobody@example.com"], ["other", "other@example.com"]], + # [("nobody", "nobody@example.com"), ("other", + # "other@example.com")], [["nobody", "nobody@example.com"], + # ["other", "other@example.com"]], [("name", "test", "example.com")], [("Name <test@example.com",)], [[]], @@ -2576,7 +2586,8 @@ class SMTPHandler: data = envelope.content mail_from = envelope.mail_from - # Convert SMTP's CRNL to NL, to simplify content checks in shared test cases. + # Convert SMTP's CRNL to NL, to simplify content checks in shared test + # cases. message = message_from_bytes(data.replace(b"\r\n", b"\n")) try: header_from = message["from"].addresses[0].addr_spec @@ -2836,7 +2847,8 @@ class SMTPBackendTests(BaseEmailBackendTests, SMTPBackendTestsBase): self.assertTrue(msg) msg = msg.decode() - # The message only contains CRLF and not combinations of CRLF, LF, and CR. + # The message only contains CRLF and not combinations of CRLF, LF, + # and CR. msg = msg.replace("\r\n", "") self.assertNotIn("\r", msg) self.assertNotIn("\n", msg) @@ -2873,7 +2885,8 @@ class SMTPBackendTests(BaseEmailBackendTests, SMTPBackendTestsBase): def test_avoids_sending_to_invalid_addresses(self): """ Verify invalid addresses can't sneak into SMTP commands through - EmailMessage.all_recipients() (which is distinct from message header fields). + EmailMessage.all_recipients() (which is distinct from message header + fields). """ backend = smtp.EmailBackend() backend.connection = mock.Mock() @@ -2892,7 +2905,8 @@ class SMTPBackendTests(BaseEmailBackendTests, SMTPBackendTestsBase): ): with self.subTest(email_address=email_address): # Use bcc (which is only processed by SMTP backend) to ensure - # error is coming from SMTP backend, not EmailMessage.message(). + # error is coming from SMTP backend, not + # EmailMessage.message(). email = EmailMessage(bcc=[email_address]) with self.assertRaisesMessage(ValueError, "Invalid address"): backend.send_messages([email]) diff --git a/tests/many_to_many/tests.py b/tests/many_to_many/tests.py index 1535ef4105..34b7ffc67d 100644 --- a/tests/many_to_many/tests.py +++ b/tests/many_to_many/tests.py @@ -73,7 +73,8 @@ class ManyToManyTests(TestCase): with transaction.atomic(): a6.publications.add(a5) - # Add a Publication directly via publications.add by using keyword arguments. + # Add a Publication directly via publications.add by using keyword + # arguments. p5 = a6.publications.create(title="Highlights for Adults") self.assertSequenceEqual( a6.publications.all(), @@ -256,8 +257,8 @@ class ManyToManyTests(TestCase): [self.a1, self.a3, self.a2, self.a4], ) - # Excluding a related item works as you would expect, too (although the SQL - # involved is a little complex). + # Excluding a related item works as you would expect, too (although the + # SQL involved is a little complex). self.assertSequenceEqual( Article.objects.exclude(publications=self.p2), [self.a1], @@ -324,7 +325,8 @@ class ManyToManyTests(TestCase): ) def test_bulk_delete(self): - # Bulk delete some Publications - references to deleted publications should go + # Bulk delete some Publications - references to deleted publications + # should go Publication.objects.filter(title__startswith="Science").delete() self.assertSequenceEqual( Publication.objects.all(), diff --git a/tests/many_to_one/tests.py b/tests/many_to_one/tests.py index 5e31ea1760..ac43c0da95 100644 --- a/tests/many_to_one/tests.py +++ b/tests/many_to_one/tests.py @@ -98,7 +98,8 @@ class ManyToOneTests(TestCase): [new_article, new_article2, self.a], ) - # Add the same article to a different article set - check that it moves. + # Add the same article to a different article set - check that it + # moves. self.r2.article_set.add(new_article2) self.assertEqual(new_article2.reporter.id, self.r2.id) self.assertSequenceEqual(self.r2.article_set.all(), [new_article2]) @@ -193,7 +194,8 @@ class ManyToOneTests(TestCase): [new_article, self.a], ) self.assertSequenceEqual(self.r2.article_set.all(), [new_article2]) - # Reporter cannot be null - there should not be a clear or remove method + # Reporter cannot be null - there should not be a clear or remove + # method self.assertFalse(hasattr(self.r2.article_set, "remove")) self.assertFalse(hasattr(self.r2.article_set, "clear")) @@ -384,7 +386,8 @@ class ManyToOneTests(TestCase): john_smith, ) - # Counting in the opposite direction works in conjunction with distinct() + # Counting in the opposite direction works in conjunction with + # distinct() self.assertEqual( Reporter.objects.filter(article__headline__startswith="T").count(), 2 ) @@ -578,7 +581,8 @@ class ManyToOneTests(TestCase): ) def test_fk_assignment_and_related_object_cache(self): - # Tests of ForeignKey assignment and the related-object cache (see #6886). + # Tests of ForeignKey assignment and the related-object cache (see + # #6886). p = Parent.objects.create(name="Parent") c = Child.objects.create(name="Child", parent=p) @@ -594,7 +598,8 @@ class ManyToOneTests(TestCase): del c._state.fields_cache["parent"] self.assertIsNot(c.parent, p) - # Assigning a new object results in that object getting cached immediately. + # Assigning a new object results in that object getting cached + # immediately. p2 = Parent.objects.create(name="Parent 2") c.parent = p2 self.assertIs(c.parent, p2) @@ -773,7 +778,8 @@ class ManyToOneTests(TestCase): private_school = School.objects.create(is_public=False) private_student = Student.objects.create(school=private_school) - # Only one school is available via all() due to the custom default manager. + # Only one school is available via all() due to the custom default + # manager. self.assertSequenceEqual(School.objects.all(), [public_school]) self.assertEqual(public_student.school, public_school) diff --git a/tests/many_to_one_null/tests.py b/tests/many_to_one_null/tests.py index f92d49f0a9..a6c56c9ada 100644 --- a/tests/many_to_one_null/tests.py +++ b/tests/many_to_one_null/tests.py @@ -48,7 +48,8 @@ class ManyToOneNullTests(TestCase): # Accessing an article's 'reporter' attribute returns None # if the reporter is set to None. self.assertIsNone(a3.reporter) - # To retrieve the articles with no reporters set, use "reporter__isnull=True". + # To retrieve the articles with no reporters set, use + # "reporter__isnull=True". self.assertSequenceEqual( Article.objects.filter(reporter__isnull=True), [self.a3] ) @@ -78,7 +79,8 @@ class ManyToOneNullTests(TestCase): def test_set(self): # Use manager.set() to allocate ForeignKey. Null is legal, so existing - # members of the set that are not in the assignment set are set to null. + # members of the set that are not in the assignment set are set to + # null. self.r2.article_set.set([self.a2, self.a3]) self.assertSequenceEqual(self.r2.article_set.all(), [self.a2, self.a3]) # Use manager.set(clear=True) diff --git a/tests/messages_tests/test_cookie.py b/tests/messages_tests/test_cookie.py index 8fd6fa3d61..f1b3a20c3b 100644 --- a/tests/messages_tests/test_cookie.py +++ b/tests/messages_tests/test_cookie.py @@ -131,8 +131,8 @@ class CookieTests(BaseTests, SimpleTestCase): # When storing as a cookie, the cookie has constant overhead of approx # 54 chars, and each message has a constant overhead of about 37 chars - # and a variable overhead of zero in the best case. We aim for a message - # size which will fit 4 messages into the cookie, but not 5. + # and a variable overhead of zero in the best case. We aim for a + # message size which will fit 4 messages into the cookie, but not 5. # See also FallbackTest.test_session_fallback msg_size = int((CookieStorage.max_cookie_size - 54) / 4.5 - 37) first_msg = None diff --git a/tests/middleware/test_csp.py b/tests/middleware/test_csp.py index de55f0c6a0..c3321b76a5 100644 --- a/tests/middleware/test_csp.py +++ b/tests/middleware/test_csp.py @@ -47,7 +47,8 @@ class CSPMiddlewareTest(SimpleTestCase): @override_settings(SECURE_CSP={"default-src": [CSP.SELF, CSP.NONCE]}) def test_csp_basic_with_nonce_but_unused(self): """ - Test if `request.csp_nonce` is never accessed, it is not added to the header. + Test if `request.csp_nonce` is never accessed, it is not added to the + header. """ response = self.client.get("/csp-base/") nonce = response.text diff --git a/tests/middleware/test_security.py b/tests/middleware/test_security.py index 339b9181c3..142f1311ae 100644 --- a/tests/middleware/test_security.py +++ b/tests/middleware/test_security.py @@ -130,9 +130,9 @@ class SecurityMiddlewareTest(SimpleTestCase): def test_sts_subdomains_and_preload(self): """ With SECURE_HSTS_SECONDS non-zero, SECURE_HSTS_INCLUDE_SUBDOMAINS and - SECURE_HSTS_PRELOAD True, the middleware adds a "Strict-Transport-Security" - header containing both the "includeSubDomains" and "preload" directives - to the response. + SECURE_HSTS_PRELOAD True, the middleware adds a + "Strict-Transport-Security" header containing both the + "includeSubDomains" and "preload" directives to the response. """ response = self.process_response(secure=True) self.assertEqual( diff --git a/tests/migrations/test_autodetector.py b/tests/migrations/test_autodetector.py index dae8a7f3ba..c5d630293e 100644 --- a/tests/migrations/test_autodetector.py +++ b/tests/migrations/test_autodetector.py @@ -2080,8 +2080,9 @@ class AutodetectorTests(BaseAutodetectorTests): def test_rename_field_preserve_db_column_preserve_constraint(self): """ - Renaming a field that already had a db_column attribute and a constraint - generates two no-op operations: RenameField and AlterConstraint. + Renaming a field that already had a db_column attribute and a + constraint generates two no-op operations: RenameField and + AlterConstraint. """ before = [ ModelState( @@ -2566,7 +2567,8 @@ class AutodetectorTests(BaseAutodetectorTests): def test_circular_fk_dependency(self): """ Having a circular ForeignKey dependency automatically - resolves the situation into 2 migrations on one side and 1 on the other. + resolves the situation into 2 migrations on one side and 1 on the + other. """ changes = self.get_changes( [], [self.author_with_book, self.book, self.publisher_with_book] @@ -2665,7 +2667,8 @@ class AutodetectorTests(BaseAutodetectorTests): def test_alter_db_table_no_changes(self): """ - Alter_db_table doesn't generate a migration if no changes have been made. + Alter_db_table doesn't generate a migration if no changes have been + made. """ changes = self.get_changes( [self.author_with_db_table_options], [self.author_with_db_table_options] @@ -2675,8 +2678,8 @@ class AutodetectorTests(BaseAutodetectorTests): def test_keep_db_table_with_model_change(self): """ - Tests when model changes but db_table stays as-is, autodetector must not - create more than one operation. + Tests when model changes but db_table stays as-is, autodetector must + not create more than one operation. """ changes = self.get_changes( [self.author_with_db_table_options], @@ -4089,16 +4092,16 @@ class AutodetectorTests(BaseAutodetectorTests): def test_deconstructible_list(self): """Nested deconstruction descends into lists.""" - # When lists contain items that deconstruct to identical values, those lists - # should be considered equal for the purpose of detecting state changes - # (even if the original items are unequal). + # When lists contain items that deconstruct to identical values, those + # lists should be considered equal for the purpose of detecting state + # changes (even if the original items are unequal). changes = self.get_changes( [self.author_name_deconstructible_list_1], [self.author_name_deconstructible_list_2], ) self.assertEqual(changes, {}) - # Legitimate differences within the deconstructed lists should be reported - # as a change + # Legitimate differences within the deconstructed lists should be + # reported as a change changes = self.get_changes( [self.author_name_deconstructible_list_1], [self.author_name_deconstructible_list_3], @@ -4107,16 +4110,16 @@ class AutodetectorTests(BaseAutodetectorTests): def test_deconstructible_tuple(self): """Nested deconstruction descends into tuples.""" - # When tuples contain items that deconstruct to identical values, those tuples - # should be considered equal for the purpose of detecting state changes - # (even if the original items are unequal). + # When tuples contain items that deconstruct to identical values, those + # tuples should be considered equal for the purpose of detecting state + # changes (even if the original items are unequal). changes = self.get_changes( [self.author_name_deconstructible_tuple_1], [self.author_name_deconstructible_tuple_2], ) self.assertEqual(changes, {}) - # Legitimate differences within the deconstructed tuples should be reported - # as a change + # Legitimate differences within the deconstructed tuples should be + # reported as a change changes = self.get_changes( [self.author_name_deconstructible_tuple_1], [self.author_name_deconstructible_tuple_3], @@ -4125,16 +4128,16 @@ class AutodetectorTests(BaseAutodetectorTests): def test_deconstructible_dict(self): """Nested deconstruction descends into dict values.""" - # When dicts contain items whose values deconstruct to identical values, - # those dicts should be considered equal for the purpose of detecting - # state changes (even if the original values are unequal). + # When dicts contain items whose values deconstruct to identical + # values, those dicts should be considered equal for the purpose of + # detecting state changes (even if the original values are unequal). changes = self.get_changes( [self.author_name_deconstructible_dict_1], [self.author_name_deconstructible_dict_2], ) self.assertEqual(changes, {}) - # Legitimate differences within the deconstructed dicts should be reported - # as a change + # Legitimate differences within the deconstructed dicts should be + # reported as a change changes = self.get_changes( [self.author_name_deconstructible_dict_1], [self.author_name_deconstructible_dict_3], @@ -4146,16 +4149,17 @@ class AutodetectorTests(BaseAutodetectorTests): Nested deconstruction is applied recursively to the args/kwargs of deconstructed objects. """ - # If the items within a deconstructed object's args/kwargs have the same - # deconstructed values - whether or not the items themselves are different - # instances - then the object as a whole is regarded as unchanged. + # If the items within a deconstructed object's args/kwargs have the + # same deconstructed values - whether or not the items themselves are + # different instances - then the object as a whole is regarded as + # unchanged. changes = self.get_changes( [self.author_name_nested_deconstructible_1], [self.author_name_nested_deconstructible_2], ) self.assertEqual(changes, {}) - # Differences that exist solely within the args list of a deconstructed object - # should be reported as changes + # Differences that exist solely within the args list of a deconstructed + # object should be reported as changes changes = self.get_changes( [self.author_name_nested_deconstructible_1], [self.author_name_nested_deconstructible_changed_arg], @@ -4167,8 +4171,8 @@ class AutodetectorTests(BaseAutodetectorTests): [self.author_name_nested_deconstructible_extra_arg], ) self.assertEqual(len(changes), 1) - # Differences that exist solely within the kwargs dict of a deconstructed object - # should be reported as changes + # Differences that exist solely within the kwargs dict of a + # deconstructed object should be reported as changes changes = self.get_changes( [self.author_name_nested_deconstructible_1], [self.author_name_nested_deconstructible_changed_kwarg], @@ -4183,8 +4187,8 @@ class AutodetectorTests(BaseAutodetectorTests): def test_deconstruct_type(self): """ - #22951 -- Uninstantiated classes with deconstruct are correctly returned - by deep_deconstruct during serialization. + #22951 -- Uninstantiated classes with deconstruct are correctly + returned by deep_deconstruct during serialization. """ author = ModelState( "testapp", @@ -4243,7 +4247,9 @@ class AutodetectorTests(BaseAutodetectorTests): side_effect=AssertionError("Should not have prompted for not null addition"), ) def test_add_many_to_many(self, mocked_ask_method): - """#22435 - Adding a ManyToManyField should not prompt for a default.""" + """ + #22435 - Adding a ManyToManyField should not prompt for a default. + """ changes = self.get_changes( [self.author_empty, self.publisher], [self.author_with_m2m, self.publisher] ) @@ -4362,9 +4368,9 @@ class AutodetectorTests(BaseAutodetectorTests): def test_many_to_many_removed_before_through_model_2(self): """ - Removing a model that contains a ManyToManyField and the "through" model - in the same change must remove the field before the model to maintain - consistency. + Removing a model that contains a ManyToManyField and the "through" + model in the same change must remove the field before the model to + maintain consistency. """ changes = self.get_changes( [ @@ -4459,8 +4465,9 @@ class AutodetectorTests(BaseAutodetectorTests): def test_non_circular_foreignkey_dependency_removal(self): """ - If two models with a ForeignKey from one to the other are removed at the - same time, the autodetector should remove them in the correct order. + If two models with a ForeignKey from one to the other are removed at + the same time, the autodetector should remove them in the correct + order. """ changes = self.get_changes( [self.author_with_publisher, self.publisher_with_author], [] diff --git a/tests/migrations/test_base.py b/tests/migrations/test_base.py index 41041f51e8..b636d18ec4 100644 --- a/tests/migrations/test_base.py +++ b/tests/migrations/test_base.py @@ -21,7 +21,8 @@ from django.utils.module_loading import module_dir class MigrationTestBase(TransactionTestCase): """ - Contains an extended set of asserts for testing migrations and schema operations. + Contains an extended set of asserts for testing migrations and schema + operations. """ available_apps = ["migrations"] diff --git a/tests/migrations/test_commands.py b/tests/migrations/test_commands.py index cd49a5a8fc..b5817081d2 100644 --- a/tests/migrations/test_commands.py +++ b/tests/migrations/test_commands.py @@ -463,7 +463,8 @@ class MigrateTests(MigrationTestBase): call_command("migrate", "migrations", "0001", verbosity=0) out = io.StringIO() - # Giving the explicit app_label tests for selective `show_list` in the command + # Giving the explicit app_label tests for selective `show_list` in the + # command call_command( "showmigrations", "migrations", @@ -1696,7 +1697,8 @@ class MakeMigrationsTests(MigrationTestBase): self.assertEqual(has_table.call_count, 4) def test_failing_migration(self): - # If a migration fails to serialize, it shouldn't generate an empty file. #21280 + # If a migration fails to serialize, it shouldn't generate an empty + # file. #21280 apps.register_model("migrations", UnserializableModel) with self.temporary_migration_module() as migration_dir: @@ -1754,7 +1756,8 @@ class MakeMigrationsTests(MigrationTestBase): with open(initial_file, encoding="utf-8") as fp: content = fp.read() - # Remove all whitespace to check for empty dependencies and operations + # Remove all whitespace to check for empty dependencies and + # operations content = content.replace(" ", "") self.assertIn( "dependencies=[]" if HAS_BLACK else "dependencies=[\n]", content @@ -1778,7 +1781,8 @@ class MakeMigrationsTests(MigrationTestBase): def test_makemigrations_no_changes_no_apps(self): """ - makemigrations exits when there are no changes and no apps are specified. + makemigrations exits when there are no changes and no apps are + specified. """ out = io.StringIO() call_command("makemigrations", stdout=out) @@ -2165,7 +2169,8 @@ class MakeMigrationsTests(MigrationTestBase): def test_makemigrations_handle_merge(self): """ - makemigrations properly merges the conflicting migrations with --noinput. + makemigrations properly merges the conflicting migrations with + --noinput. """ out = io.StringIO() with self.temporary_migration_module( @@ -2965,8 +2970,9 @@ class SquashMigrationsTests(MigrationTestBase): with self.temporary_migration_module( module="migrations.test_migrations_squashed_loop" ): - # Hits a squash replacement cycle check error, but the actual failure is - # dependent on the order in which the files are read on disk. + # Hits a squash replacement cycle check error, but the actual + # failure is dependent on the order in which the files are read on + # disk. with self.assertRaisesRegex( CommandError, r"Cyclical squash replacement found, starting at" @@ -3039,7 +3045,8 @@ class SquashMigrationsTests(MigrationTestBase): interactive=False, ) - # Update the 4th migration to depend on the squash(replacement) migration. + # Update the 4th migration to depend on the squash(replacement) + # migration. loader = MigrationLoader(connection) migration = loader.disk_migrations[ ("migrations", "0004_remove_mymodel1_field_1_mymodel1_field_3_and_more") @@ -3219,7 +3226,8 @@ class SquashMigrationsTests(MigrationTestBase): def test_squashmigrations_invalid_start(self): """ - squashmigrations doesn't accept a starting migration after the ending migration. + squashmigrations doesn't accept a starting migration after the ending + migration. """ with self.temporary_migration_module( module="migrations.test_migrations_no_changes" diff --git a/tests/migrations/test_executor.py b/tests/migrations/test_executor.py index 571cb3e1a2..dd6793b533 100644 --- a/tests/migrations/test_executor.py +++ b/tests/migrations/test_executor.py @@ -78,7 +78,8 @@ class ExecutorTests(MigrationTestBase): ) def test_run_with_squashed(self): """ - Tests running a squashed migration from zero (should ignore what it replaces) + Tests running a squashed migration from zero (should ignore what it + replaces) """ executor = MigrationExecutor(connection) # Check our leaf node is the squashed one @@ -351,7 +352,8 @@ class ExecutorTests(MigrationTestBase): self.assertTableExists("migrations_tribble") # Make sure that was faked self.assertIs(state["faked"], True) - # Finally, migrate forwards; this should fake-apply our initial migration + # Finally, migrate forwards; this should fake-apply our initial + # migration executor.loader.build_graph() self.assertEqual( executor.migration_plan([("migrations", "0001_initial")]), @@ -383,8 +385,8 @@ class ExecutorTests(MigrationTestBase): ) def test_custom_user(self): """ - Regression test for #22325 - references to a custom user model defined in the - same app are not resolved correctly. + Regression test for #22325 - references to a custom user model defined + in the same app are not resolved correctly. """ with isolate_lru_cache(global_apps.get_swappable_settings_name): executor = MigrationExecutor(connection) diff --git a/tests/migrations/test_loader.py b/tests/migrations/test_loader.py index 3b30aed100..7cf11f7faa 100644 --- a/tests/migrations/test_loader.py +++ b/tests/migrations/test_loader.py @@ -515,7 +515,9 @@ class LoaderTests(TestCase): } ) def test_loading_squashed_ref_squashed(self): - "Tests loading a squashed migration with a new migration referencing it" + """ + Tests loading a squashed migration with a new migration referencing it + """ r""" The sample migrations are structured like this: diff --git a/tests/migrations/test_operations.py b/tests/migrations/test_operations.py index f86fe16fe0..a893442ce6 100644 --- a/tests/migrations/test_operations.py +++ b/tests/migrations/test_operations.py @@ -955,7 +955,8 @@ class OperationTests(OperationTestBase): operation.state_forwards("test_rmwsc", new_state) self.assertNotIn(("test_rmwsc", "shetlandpony"), new_state.models) self.assertIn(("test_rmwsc", "littlehorse"), new_state.models) - # RenameModel shouldn't repoint the superclass's relations, only local ones + # RenameModel shouldn't repoint the superclass's relations, only local + # ones self.assertEqual( project_state.models["test_rmwsc", "rider"] .fields["pony"] @@ -2616,7 +2617,8 @@ class OperationTests(OperationTestBase): @skipUnlessDBFeature("supports_foreign_keys") def test_alter_field_pk_fk(self): """ - Tests the AlterField operation on primary keys changes any FKs pointing to it. + Tests the AlterField operation on primary keys changes any FKs pointing + to it. """ project_state = self.set_up_test_model("test_alflpkfk", related_model=True) project_state = self.apply_operations( @@ -5782,7 +5784,8 @@ class OperationTests(OperationTestBase): non_atomic_migration.operations = [ migrations.RunPython(inner_method, reverse_code=inner_method, atomic=False) ] - # If we're a fully-transactional database, both versions should rollback + # If we're a fully-transactional database, both versions should + # rollback if connection.features.can_rollback_ddl: self.assertEqual( project_state.apps.get_model( diff --git a/tests/migrations/test_state.py b/tests/migrations/test_state.py index d6ecaa1c5d..c31f8b80dd 100644 --- a/tests/migrations/test_state.py +++ b/tests/migrations/test_state.py @@ -183,11 +183,12 @@ class StateTests(SimpleTestCase): self.assertTrue(all(isinstance(name, str) for name, mgr in food_state.managers)) self.assertEqual(food_state.managers[0][1].args, ("a", "b", 1, 2)) - # No explicit managers defined. Migrations will fall back to the default + # No explicit managers defined. Migrations will fall back to the + # default self.assertEqual(food_no_managers_state.managers, []) - # food_mgr is used in migration but isn't the default mgr, hence add the - # default + # food_mgr is used in migration but isn't the default mgr, hence add + # the default self.assertEqual( [name for name, mgr in food_no_default_manager_state.managers], ["food_no_mgr", "food_mgr"], @@ -1302,7 +1303,8 @@ class StateRelationsTests(SimpleTestCase): with self.subTest(method=method): project_state = self.get_base_project_state() getattr(project_state, method)(*args) - # ProjectState's `_relations` are populated on `relations` access. + # ProjectState's `_relations` are populated on `relations` + # access. self.assertIsNone(project_state._relations) self.assertEqual(project_state.relations, project_state._relations) self.assertIsNotNone(project_state._relations) diff --git a/tests/model_forms/test_modelchoicefield.py b/tests/model_forms/test_modelchoicefield.py index 83d801768a..8765f1a6d0 100644 --- a/tests/model_forms/test_modelchoicefield.py +++ b/tests/model_forms/test_modelchoicefield.py @@ -52,9 +52,9 @@ class ModelChoiceFieldTests(TestCase): c4 = Category.objects.create(name="Fourth", url="4th") self.assertEqual(f.clean(c4.id).name, "Fourth") - # Delete a Category object *after* the ModelChoiceField has already been - # instantiated. This proves clean() checks the database during clean() - # rather than caching it at instantiation time. + # Delete a Category object *after* the ModelChoiceField has already + # been instantiated. This proves clean() checks the database during + # clean() rather than caching it at instantiation time. Category.objects.get(url="4th").delete() msg = ( "['Select a valid choice. That choice is not one of the available " diff --git a/tests/model_forms/tests.py b/tests/model_forms/tests.py index e7bdd1ac89..f0334e1e86 100644 --- a/tests/model_forms/tests.py +++ b/tests/model_forms/tests.py @@ -236,7 +236,8 @@ class ModelFormBaseTest(TestCase): def test_empty_fields_to_fields_for_model(self): """ - An argument of fields=() to fields_for_model should return an empty dictionary + An argument of fields=() to fields_for_model should return an empty + dictionary """ field_dict = fields_for_model(Person, fields=()) self.assertEqual(len(field_dict), 0) @@ -395,7 +396,8 @@ class ModelFormBaseTest(TestCase): self.assertEqual(form.instance.slug, empty_value) self.assertEqual(form.instance.url, empty_value) - # Save a second form to verify there isn't a unique constraint violation. + # Save a second form to verify there isn't a unique constraint + # violation. form = form_class(data=data) self.assertTrue(form.is_valid()) form.save() @@ -553,9 +555,9 @@ class ModelFormBaseTest(TestCase): exclude = "url" # note the missing comma def test_exclude_and_validation(self): - # This Price instance generated by this form is not valid because the quantity - # field is required, but the form is valid because the field is excluded from - # the form. This is for backwards compatibility. + # This Price instance generated by this form is not valid because the + # quantity field is required, but the form is valid because the field + # is excluded from the form. This is for backwards compatibility. class PriceFormWithoutQuantity(forms.ModelForm): class Meta: model = Price @@ -568,8 +570,8 @@ class ModelFormBaseTest(TestCase): with self.assertRaisesMessage(ValidationError, msg): price.full_clean() - # The form should not validate fields that it doesn't contain even if they are - # specified using 'fields', not 'exclude'. + # The form should not validate fields that it doesn't contain even if + # they are specified using 'fields', not 'exclude'. class PriceFormWithoutQuantity(forms.ModelForm): class Meta: model = Price @@ -578,8 +580,8 @@ class ModelFormBaseTest(TestCase): form = PriceFormWithoutQuantity({"price": "6.00"}) self.assertTrue(form.is_valid()) - # The form should still have an instance of a model that is not complete and - # not saved into a DB yet. + # The form should still have an instance of a model that is not + # complete and not saved into a DB yet. self.assertEqual(form.instance.price, Decimal("6.00")) self.assertIsNone(form.instance.quantity) self.assertIsNone(form.instance.pk) @@ -610,8 +612,8 @@ class ModelFormBaseTest(TestCase): model = Article fields = "__all__" - # MixModelForm is now an Article-related thing, because MixModelForm.Meta - # overrides BaseCategoryForm.Meta. + # MixModelForm is now an Article-related thing, because + # MixModelForm.Meta overrides BaseCategoryForm.Meta. self.assertEqual( list(MixModelForm.base_fields), @@ -1236,7 +1238,9 @@ class UniqueTest(TestCase): self.assertFalse(form.is_valid()) def test_explicitpk_unique(self): - """Ensure keys and blank character strings are tested for uniqueness.""" + """ + Ensure keys and blank character strings are tested for uniqueness. + """ form = ExplicitPKForm({"key": "key1", "desc": ""}) self.assertTrue(form.is_valid()) form.save() @@ -1547,8 +1551,8 @@ class ModelFormBasicTests(TestCase): % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk), ) - # When the ModelForm is passed an instance, that instance's current values are - # inserted as 'initial' data in each Field. + # When the ModelForm is passed an instance, that instance's current + # values are inserted as 'initial' data in each Field. f = RoykoForm(auto_id=False, instance=self.w_royko) self.assertHTMLEqual( str(f), @@ -1631,7 +1635,8 @@ class ModelFormBasicTests(TestCase): kwargs["initial"] = lambda: Category.objects.order_by("name")[:2] return db_field.formfield(**kwargs) - # Create a ModelForm, instantiate it, and check that the output is as expected + # Create a ModelForm, instantiate it, and check that the output is as + # expected ModelForm = modelform_factory( Article, fields=["headline", "categories"], @@ -1673,9 +1678,9 @@ class ModelFormBasicTests(TestCase): self.assertEqual(c1.name, "Entertainment") def test_save_commit_false(self): - # If you call save() with commit=False, then it will return an object that - # hasn't yet been saved to the database. In this case, it's up to you to call - # save() on the resulting model instance. + # If you call save() with commit=False, then it will return an object + # that hasn't yet been saved to the database. In this case, it's up to + # you to call save() on the resulting model instance. f = BaseCategoryForm( {"name": "Third test", "slug": "third-test", "url": "third"} ) @@ -1708,8 +1713,9 @@ class ModelFormBasicTests(TestCase): def test_multi_fields(self): self.create_basic_data() self.maxDiff = None - # ManyToManyFields are represented by a MultipleChoiceField, ForeignKeys and any - # fields with the 'choices' attribute are represented by a ChoiceField. + # ManyToManyFields are represented by a MultipleChoiceField, + # ForeignKeys and any fields with the 'choices' attribute are + # represented by a ChoiceField. f = ArticleForm(auto_id=False) self.assertHTMLEqual( str(f), @@ -1800,9 +1806,9 @@ class ModelFormBasicTests(TestCase): # You can restrict a form to a subset of the complete list of fields # by providing a 'fields' argument. If you try to save a # model created with such a form, you need to ensure that the fields - # that are _not_ on the form have default values, or are allowed to have - # a value of None. If a field isn't specified on a form, the object created - # from the form can't provide a value for that field! + # that are _not_ on the form have default values, or are allowed to + # have a value of None. If a field isn't specified on a form, the + # object created from the form can't provide a value for that field! class PartialArticleForm(forms.ModelForm): class Meta: model = Article @@ -1894,8 +1900,9 @@ class ModelFormBasicTests(TestCase): new_art = Article.objects.get(id=art_id_2) self.assertSequenceEqual(new_art.categories.all(), []) - # Create a new article, with categories, via the form, but use commit=False. - # The m2m data won't be saved until save_m2m() is invoked on the form. + # Create a new article, with categories, via the form, but use + # commit=False. The m2m data won't be saved until save_m2m() is invoked + # on the form. form_data["categories"] = [str(self.c1.id), str(self.c2.id)] f = ArticleForm(form_data) new_art = f.save(commit=False) @@ -1937,9 +1944,10 @@ class ModelFormBasicTests(TestCase): def test_runtime_choicefield_populated(self): self.maxDiff = None - # Here, we demonstrate that choices for a ForeignKey ChoiceField are determined - # at runtime, based on the data in the database when the form is displayed, not - # the data in the database when the form is instantiated. + # Here, we demonstrate that choices for a ForeignKey ChoiceField are + # determined at runtime, based on the data in the database when the + # form is displayed, not the data in the database when the form is + # instantiated. self.create_basic_data() f = ArticleForm(auto_id=False) self.assertHTMLEqual( @@ -2162,18 +2170,18 @@ class ModelMultipleChoiceFieldTests(TestCase): with self.assertRaises(ValidationError): f.clean([{"foo": "bar"}]) - # Add a Category object *after* the ModelMultipleChoiceField has already been - # instantiated. This proves clean() checks the database during clean() rather - # than caching it at time of instantiation. - # Note, we are using an id of 1006 here since tests that run before - # this may create categories with primary keys up to 6. Use - # a number that will not conflict. + # Add a Category object *after* the ModelMultipleChoiceField has + # already been instantiated. This proves clean() checks the database + # during clean() rather than caching it at time of instantiation. Note, + # we are using an id of 1006 here since tests that run before this may + # create categories with primary keys up to 6. Use a number that will + # not conflict. c6 = Category.objects.create(id=1006, name="Sixth", url="6th") self.assertCountEqual(f.clean([c6.id]), [c6]) - # Delete a Category object *after* the ModelMultipleChoiceField has already been - # instantiated. This proves clean() checks the database during clean() rather - # than caching it at time of instantiation. + # Delete a Category object *after* the ModelMultipleChoiceField has + # already been instantiated. This proves clean() checks the database + # during clean() rather than caching it at time of instantiation. Category.objects.get(url="6th").delete() with self.assertRaises(ValidationError): f.clean([c6.id]) @@ -2456,8 +2464,8 @@ class ModelOneToOneFieldTests(TestCase): self.assertTrue(form.is_valid()) self.assertIsNone(form.cleaned_data["publication"]) author = form.save() - # author object returned from form still retains original publication object - # that's why we need to retrieve it from database again + # author object returned from form still retains original publication + # object that's why we need to retrieve it from database again new_author = Author.objects.get(pk=author.pk) self.assertIsNone(new_author.publication) @@ -2607,8 +2615,8 @@ class FileAndImageFieldTests(TestCase): ) self.assertFalse(f.is_valid()) - # Edit an instance that already has the file defined in the model. This will not - # save the file again, but leave it exactly as it is. + # Edit an instance that already has the file defined in the model. This + # will not save the file again, but leave it exactly as it is. f = TextFileForm({"description": "Assistance"}, instance=instance) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data["file"].name, "tests/test1.txt") @@ -2672,8 +2680,9 @@ class FileAndImageFieldTests(TestCase): model = CustomFF fields = "__all__" - # It's enough that the form saves without error -- the custom save routine will - # generate an AssertionError if it is called more than once during save. + # It's enough that the form saves without error -- the custom save + # routine will generate an AssertionError if it is called more than + # once during save. form = CFFForm(data={"f": None}) form.save() @@ -2723,9 +2732,9 @@ class FileAndImageFieldTests(TestCase): @skipUnless(test_images, "Pillow not installed") def test_image_field(self): - # ImageField and FileField are nearly identical, but they differ slightly when - # it comes to validation. This specifically tests that #6302 is fixed for - # both file fields and image fields. + # ImageField and FileField are nearly identical, but they differ + # slightly when it comes to validation. This specifically tests that + # #6302 is fixed for both file fields and image fields. with open(os.path.join(os.path.dirname(__file__), "test.png"), "rb") as fp: image_data = fp.read() @@ -2743,8 +2752,8 @@ class FileAndImageFieldTests(TestCase): self.assertEqual(instance.width, 16) self.assertEqual(instance.height, 16) - # Delete the current file since this is not done by Django, but don't save - # because the dimension fields are not null=True. + # Delete the current file since this is not done by Django, but don't + # save because the dimension fields are not null=True. instance.image.delete(save=False) f = ImageFileForm( data={"description": "An image"}, @@ -2769,8 +2778,8 @@ class FileAndImageFieldTests(TestCase): self.assertEqual(instance.height, 16) self.assertEqual(instance.width, 16) - # Delete the current file since this is not done by Django, but don't save - # because the dimension fields are not null=True. + # Delete the current file since this is not done by Django, but don't + # save because the dimension fields are not null=True. instance.image.delete(save=False) # Override the file by uploading a new one. @@ -2785,8 +2794,8 @@ class FileAndImageFieldTests(TestCase): self.assertEqual(instance.height, 32) self.assertEqual(instance.width, 48) - # Delete the current file since this is not done by Django, but don't save - # because the dimension fields are not null=True. + # Delete the current file since this is not done by Django, but don't + # save because the dimension fields are not null=True. instance.image.delete(save=False) instance.delete() @@ -2800,8 +2809,8 @@ class FileAndImageFieldTests(TestCase): self.assertEqual(instance.height, 32) self.assertEqual(instance.width, 48) - # Delete the current file since this is not done by Django, but don't save - # because the dimension fields are not null=True. + # Delete the current file since this is not done by Django, but don't + # save because the dimension fields are not null=True. instance.image.delete(save=False) instance.delete() @@ -2975,8 +2984,8 @@ class ModelOtherFieldTests(SimpleTestCase): class OtherModelFormTests(TestCase): def test_media_on_modelform(self): - # Similar to a regular Form class you can define custom media to be used on - # the ModelForm. + # Similar to a regular Form class you can define custom media to be + # used on the ModelForm. f = ModelFormWithMedia() self.assertHTMLEqual( str(f.media), diff --git a/tests/model_formsets/models.py b/tests/model_formsets/models.py index a2965395d6..397bf9de53 100644 --- a/tests/model_formsets/models.py +++ b/tests/model_formsets/models.py @@ -195,7 +195,8 @@ class Player(models.Model): return self.name -# Models for testing custom ModelForm save methods in formsets and inline formsets +# Models for testing custom ModelForm save methods in formsets and inline +# formsets class Poet(models.Model): name = models.CharField(max_length=100) diff --git a/tests/model_formsets/tests.py b/tests/model_formsets/tests.py index 748e5f5018..7722d2b3d6 100644 --- a/tests/model_formsets/tests.py +++ b/tests/model_formsets/tests.py @@ -104,8 +104,8 @@ class DeletionTests(TestCase): def test_change_form_deletion_when_invalid(self): """ - Make sure that a change form that is filled out, but marked for deletion - doesn't cause validation errors. + Make sure that a change form that is filled out, but marked for + deletion doesn't cause validation errors. """ PoetFormSet = modelformset_factory(Poet, fields="__all__", can_delete=True) poet = Poet.objects.create(name="test") @@ -1538,8 +1538,8 @@ class ModelFormsetTest(TestCase): ], ) - # unique_together with inlineformset_factory with overridden form fields - # Also see #9494 + # unique_together with inlineformset_factory with overridden form + # fields Also see #9494 FormSet = inlineformset_factory( Repository, Revision, fields=("revision",), extra=1 @@ -1564,9 +1564,10 @@ class ModelFormsetTest(TestCase): ) formset = FormSet(instance=person) - # Django will render a hidden field for model fields that have a callable - # default. This is required to ensure the value is tested for change correctly - # when determine what extra forms have changed to save. + # Django will render a hidden field for model fields that have a + # callable default. This is required to ensure the value is tested for + # change correctly when determine what extra forms have changed to + # save. self.assertEqual(len(formset.forms), 1) # this formset only has one form form = formset.forms[0] @@ -1594,7 +1595,8 @@ class ModelFormsetTest(TestCase): 'id="id_membership_set-0-id"></p>' % person.id, ) - # test for validation with callable defaults. Validations rely on hidden fields + # test for validation with callable defaults. Validations rely on + # hidden fields data = { "membership_set-TOTAL_FORMS": "1", @@ -1728,8 +1730,8 @@ class ModelFormsetTest(TestCase): ) def test_model_formset_with_custom_pk(self): - # a formset for a Model that has a custom primary key that still needs to be - # added to the formset automatically + # a formset for a Model that has a custom primary key that still needs + # to be added to the formset automatically FormSet = modelformset_factory( ClassyMexicanRestaurant, fields=["tacos_are_yummy"] ) diff --git a/tests/model_formsets_regress/tests.py b/tests/model_formsets_regress/tests.py index 0ccc2c0490..794244b6e8 100644 --- a/tests/model_formsets_regress/tests.py +++ b/tests/model_formsets_regress/tests.py @@ -407,7 +407,8 @@ class BaseCustomDeleteFormSet(BaseFormSet): A formset mix-in that lets a form decide if it's to be deleted. Works for BaseFormSets. Also works for ModelFormSets with #14099 fixed. - form.should_delete() is called. The formset delete field is also suppressed. + form.should_delete() is called. The formset delete field is also + suppressed. """ def add_fields(self, form, index): diff --git a/tests/model_inheritance/test_abstract_inheritance.py b/tests/model_inheritance/test_abstract_inheritance.py index 9b9da437da..2dd183a200 100644 --- a/tests/model_inheritance/test_abstract_inheritance.py +++ b/tests/model_inheritance/test_abstract_inheritance.py @@ -61,7 +61,8 @@ class AbstractInheritanceTests(SimpleTestCase): def test_diamond_shaped_multiple_inheritance_is_depth_first(self): """ In contrast to standard Python MRO, resolution of inherited fields is - strictly depth-first, rather than breadth-first in diamond-shaped cases. + strictly depth-first, rather than breadth-first in diamond-shaped + cases. This is because a copy of the parent field descriptor is placed onto the model class in ModelBase.__new__(), rather than the attribute diff --git a/tests/model_inheritance/tests.py b/tests/model_inheritance/tests.py index cc333a9ac2..2b911d4dc5 100644 --- a/tests/model_inheritance/tests.py +++ b/tests/model_inheritance/tests.py @@ -81,7 +81,8 @@ class ModelInheritanceTests(TestCase): Restaurant.objects.filter(supplier__name="foo") def test_model_with_distinct_accessors(self): - # The Post model has distinct accessors for the Comment and Link models. + # The Post model has distinct accessors for the Comment and Link + # models. post = Post.objects.create(title="Lorem Ipsum") post.attached_comment_set.create(content="Save $ on V1agr@", is_spam=True) post.attached_link_set.create( @@ -194,7 +195,8 @@ class ModelInheritanceTests(TestCase): with CaptureQueriesContext(connection) as captured_queries: Place.objects.filter(pk=supplier.pk).update(name=supplier.name) expected_sql = captured_queries[0]["sql"] - # Capture the queries executed when a subclassed model instance is saved. + # Capture the queries executed when a subclassed model instance is + # saved. with CaptureQueriesContext(connection) as captured_queries: supplier.save(update_fields=("name",)) for query in captured_queries: diff --git a/tests/model_inheritance_regress/tests.py b/tests/model_inheritance_regress/tests.py index ba31048ac2..3310497de1 100644 --- a/tests/model_inheritance_regress/tests.py +++ b/tests/model_inheritance_regress/tests.py @@ -439,7 +439,8 @@ class ModelInheritanceTest(TestCase): def test_concrete_abstract_concrete_pk(self): """ - Primary key set correctly with concrete->abstract->concrete inheritance. + Primary key set correctly with concrete->abstract->concrete + inheritance. """ # Regression test for #13987: Primary key is incorrectly determined # when more than one model has a concrete->abstract->concrete diff --git a/tests/model_meta/tests.py b/tests/model_meta/tests.py index 93883b5cf1..98dd132323 100644 --- a/tests/model_meta/tests.py +++ b/tests/model_meta/tests.py @@ -51,8 +51,8 @@ class GetFieldsTests(OptionsBaseTests): def test_get_fields_is_immutable(self): msg = IMMUTABLE_WARNING % "get_fields()" for _ in range(2): - # Running unit test twice to ensure both non-cached and cached result - # are immutable. + # Running unit test twice to ensure both non-cached and cached + # result are immutable. fields = Person._meta.get_fields() with self.assertRaisesMessage(AttributeError, msg): fields += ["errors"] diff --git a/tests/modeladmin/test_checks.py b/tests/modeladmin/test_checks.py index 0592be7b4f..fbd63abfbf 100644 --- a/tests/modeladmin/test_checks.py +++ b/tests/modeladmin/test_checks.py @@ -791,7 +791,8 @@ class ListDisplayLinksCheckTests(CheckTestCase): def test_list_display_links_check_skipped_if_get_list_display_overridden(self): """ - list_display_links check is skipped if get_list_display() is overridden. + list_display_links check is skipped if get_list_display() is + overridden. """ class TestModelAdmin(ModelAdmin): diff --git a/tests/modeladmin/tests.py b/tests/modeladmin/tests.py index ecead78154..4a92514e17 100644 --- a/tests/modeladmin/tests.py +++ b/tests/modeladmin/tests.py @@ -73,12 +73,13 @@ class ModelAdminTests(TestCase): self.assertIsNone(ma.get_exclude(request, self.band)) def test_default_fieldsets(self): - # fieldsets_add and fieldsets_change should return a special data structure that - # is used in the templates. They should generate the "right thing" whether we - # have specified a custom form, the fields argument, or nothing at all. + # fieldsets_add and fieldsets_change should return a special data + # structure that is used in the templates. They should generate the + # "right thing" whether we have specified a custom form, the fields + # argument, or nothing at all. # - # Here's the default case. There are no custom form_add/form_change methods, - # no fields argument, and no fieldsets argument. + # Here's the default case. There are no custom form_add/form_change + # methods, no fields argument, and no fieldsets argument. ma = ModelAdmin(Band, self.site) self.assertEqual( ma.get_fieldsets(request), @@ -294,7 +295,8 @@ class ModelAdminTests(TestCase): # Form class to the fields specified. This may cause errors to be # raised in the db layer if required model fields aren't in fields/ # fieldsets, but that's preferable to ghost errors where a field in the - # Form class isn't being displayed because it's not in fields/fieldsets. + # Form class isn't being displayed because it's not in + # fields/fieldsets. # Using `fields`. class BandAdmin(ModelAdmin): @@ -403,7 +405,8 @@ class ModelAdminTests(TestCase): def test_custom_form_meta_exclude(self): """ The custom ModelForm's `Meta.exclude` is overridden if - `ModelAdmin.exclude` or `InlineModelAdmin.exclude` are defined (#14496). + `ModelAdmin.exclude` or `InlineModelAdmin.exclude` are defined + (#14496). """ # With ModelAdmin @@ -702,9 +705,10 @@ class ModelAdminTests(TestCase): def test_default_foreign_key_widget(self): # First, without any radio_fields specified, the widgets for ForeignKey # and fields with choices specified ought to be a basic Select widget. - # ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so - # they need to be handled properly when type checking. For Select fields, all of - # the choices lists have a first entry of dashes. + # ForeignKey widgets in the admin are wrapped with + # RelatedFieldWidgetWrapper so they need to be handled properly when + # type checking. For Select fields, all of the choices lists have a + # first entry of dashes. cma = ModelAdmin(Concert, self.site) cmafa = cma.get_form(request) @@ -732,9 +736,10 @@ class ModelAdminTests(TestCase): def test_foreign_key_as_radio_field(self): # Now specify all the fields as radio_fields. Widgets should now be - # RadioSelect, and the choices list should have a first entry of 'None' if - # blank=True for the model field. Finally, the widget should have the - # 'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL. + # RadioSelect, and the choices list should have a first entry of 'None' + # if blank=True for the model field. Finally, the widget should have + # the 'radiolist' attr, and 'inline' as well if the field is specified + # HORIZONTAL. class ConcertAdmin(ModelAdmin): radio_fields = { "main_band": HORIZONTAL, @@ -937,8 +942,8 @@ class ModelAdminTests(TestCase): def test_get_deleted_objects_with_custom_has_delete_permission(self): """ - ModelAdmin.get_deleted_objects() uses ModelAdmin.has_delete_permission() - for permissions checking. + ModelAdmin.get_deleted_objects() uses + ModelAdmin.has_delete_permission() for permissions checking. """ mock_request = MockRequest() mock_request.user = User.objects.create_superuser( diff --git a/tests/multiple_database/tests.py b/tests/multiple_database/tests.py index 9587030a46..85091441aa 100644 --- a/tests/multiple_database/tests.py +++ b/tests/multiple_database/tests.py @@ -30,7 +30,9 @@ class QueryTestCase(TestCase): self.assertEqual(Book.objects.db_manager("other").all().db, "other") def test_default_creation(self): - "Objects created on the default database don't leak onto other databases" + """ + Objects created on the default database don't leak onto other databases + """ # Create a book on the default database using create() Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) @@ -60,7 +62,10 @@ class QueryTestCase(TestCase): Book.objects.using("other").get(title="Dive into Python") def test_other_creation(self): - "Objects created on another database don't leak onto the default database" + """ + Objects created on another database don't leak onto the default + database + """ # Create a book on the second database Book.objects.using("other").create( title="Pro Django", published=datetime.date(2008, 12, 16) @@ -419,7 +424,10 @@ class QueryTestCase(TestCase): ) def test_m2m_cross_database_protection(self): - "Operations that involve sharing M2M objects across databases raise an error" + """ + Operations that involve sharing M2M objects across databases raise an + error + """ # Create a book and author on the default database pro = Book.objects.create( title="Pro Django", published=datetime.date(2008, 12, 16) @@ -471,7 +479,9 @@ class QueryTestCase(TestCase): dive.authors.set([mark, marty]) def test_m2m_deletion(self): - "Cascaded deletions of m2m relations issue queries on the right database" + """ + Cascaded deletions of m2m relations issue queries on the right database + """ # Create a book and author on the other database dive = Book.objects.using("other").create( title="Dive into Python", published=datetime.date(2009, 5, 4) @@ -714,7 +724,10 @@ class QueryTestCase(TestCase): ) def test_foreign_key_cross_database_protection(self): - "Operations that involve sharing FK objects across databases raise an error" + """ + Operations that involve sharing FK objects across databases raise an + error + """ # Create a book and author on the default database pro = Book.objects.create( title="Pro Django", published=datetime.date(2008, 12, 16) @@ -854,7 +867,10 @@ class QueryTestCase(TestCase): self.assertEqual(bob_profile.user.username, "bob") def test_o2o_cross_database_protection(self): - "Operations that involve sharing FK objects across databases raise an error" + """ + Operations that involve sharing FK objects across databases raise an + error + """ # Create a user and profile on the default database alice = User.objects.db_manager("default").create_user( "alice", "alice@example.com" @@ -891,7 +907,8 @@ class QueryTestCase(TestCase): self.assertIsNone(new_bob_profile._state.db) self.assertIsNone(charlie._state.db) - # old object comes from 'other', so the new object is set to use 'other'... + # old object comes from 'other', so the new object is set to use + # 'other'... new_bob_profile.user = bob charlie.userprofile = bob_profile self.assertEqual(new_bob_profile._state.db, "other") @@ -1263,9 +1280,9 @@ class QueryTestCase(TestCase): sub = Person.objects.using("other").filter(name="fff") qs = Book.objects.filter(editor__in=sub) - # When you call __str__ on the query object, it doesn't know about using - # so it falls back to the default. If the subquery explicitly uses a - # different database, an error should be raised. + # When you call __str__ on the query object, it doesn't know about + # using so it falls back to the default. If the subquery explicitly + # uses a different database, an error should be raised. msg = ( "Subqueries aren't allowed across different databases. Force the " "inner query to be evaluated using `list(inner_query)`." @@ -1517,7 +1534,10 @@ class RouterTestCase(TestCase): marty.edited.set([dive]) def test_foreign_key_cross_database_protection(self): - "Foreign keys can cross databases if they two databases have a common source" + """ + Foreign keys can cross databases if they two databases have a common + source + """ # Create a book and author on the default database pro = Book.objects.using("default").create( title="Pro Django", published=datetime.date(2008, 12, 16) @@ -1548,7 +1568,8 @@ class RouterTestCase(TestCase): # ...and the source database now has a copy of any object saved Book.objects.using("default").get(title="Dive into Python").delete() - # This isn't a real primary/replica database, so restore the original from other + # This isn't a real primary/replica database, so restore the original + # from other dive = Book.objects.using("other").get(title="Dive into Python") self.assertEqual(dive._state.db, "other") @@ -1565,7 +1586,8 @@ class RouterTestCase(TestCase): # ...and the source database now has a copy of any object saved Book.objects.using("default").get(title="Dive into Python").delete() - # This isn't a real primary/replica database, so restore the original from other + # This isn't a real primary/replica database, so restore the original + # from other dive = Book.objects.using("other").get(title="Dive into Python") self.assertEqual(dive._state.db, "other") @@ -1582,7 +1604,8 @@ class RouterTestCase(TestCase): # ...and the source database now has a copy of any object saved Book.objects.using("default").get(title="Dive into Python").delete() - # This isn't a real primary/replica database, so restore the original from other + # This isn't a real primary/replica database, so restore the original + # from other dive = Book.objects.using("other").get(title="Dive into Python") # If you assign a FK object when the base object hasn't @@ -1629,7 +1652,8 @@ class RouterTestCase(TestCase): ) self.assertEqual(cheesecake._state.db, "default") - # Same goes for get_or_create, regardless of whether getting or creating + # Same goes for get_or_create, regardless of whether getting or + # creating cheesecake, created = mark.edited.get_or_create( title="Dive into Cheesecake", published=datetime.date(2010, 3, 15), @@ -1740,7 +1764,8 @@ class RouterTestCase(TestCase): alice = dive.authors.create(name="Alice", pk=3) self.assertEqual(alice._state.db, "default") - # Same goes for get_or_create, regardless of whether getting or creating + # Same goes for get_or_create, regardless of whether getting or + # creating alice, created = dive.authors.get_or_create(name="Alice") self.assertEqual(alice._state.db, "default") @@ -1748,7 +1773,10 @@ class RouterTestCase(TestCase): self.assertEqual(bob._state.db, "default") def test_o2o_cross_database_protection(self): - "Operations that involve sharing FK objects across databases raise an error" + """ + Operations that involve sharing FK objects across databases raise an + error + """ # Create a user and profile on the default database alice = User.objects.db_manager("default").create_user( "alice", "alice@example.com" @@ -1807,11 +1835,13 @@ class RouterTestCase(TestCase): # ...and the source database now has a copy of any object saved Book.objects.using("default").get(title="Dive into Python").delete() - # This isn't a real primary/replica database, so restore the original from other + # This isn't a real primary/replica database, so restore the original + # from other dive = Book.objects.using("other").get(title="Dive into Python") self.assertEqual(dive._state.db, "other") - # Add to a generic foreign key set with an object from a different database + # Add to a generic foreign key set with an object from a different + # database dive.reviews.add(review1) # Database assignments of original objects haven't changed... @@ -1834,7 +1864,8 @@ class RouterTestCase(TestCase): # initially, no db assigned self.assertIsNone(review3._state.db) - # Dive comes from 'other', so review3 is set to use the source of 'other'... + # Dive comes from 'other', so review3 is set to use the source of + # 'other'... review3.content_object = dive self.assertEqual(review3._state.db, "default") @@ -1846,7 +1877,10 @@ class RouterTestCase(TestCase): self.assertEqual(nyt._state.db, "default") def test_m2m_managers(self): - "M2M relations are represented by managers, and can be controlled like managers" + """ + M2M relations are represented by managers, and can be controlled like + managers + """ pro = Book.objects.using("other").create( pk=1, title="Pro Django", published=datetime.date(2008, 12, 16) ) @@ -1908,8 +1942,8 @@ class RouterTestCase(TestCase): sub = Person.objects.filter(name="Mark Pilgrim") qs = Book.objects.filter(editor__in=sub) - # When you call __str__ on the query object, it doesn't know about using - # so it falls back to the default. Don't let routing instructions + # When you call __str__ on the query object, it doesn't know about + # using so it falls back to the default. Don't let routing instructions # force the subquery to an incompatible database. str(qs.query) @@ -2010,14 +2044,16 @@ class FixtureTestCase(TestCase): @override_settings(DATABASE_ROUTERS=[AntiPetRouter()]) def test_fixture_loading(self): "Multi-db fixtures are loaded correctly" - # "Pro Django" exists on the default database, but not on other database + # "Pro Django" exists on the default database, but not on other + # database Book.objects.get(title="Pro Django") Book.objects.using("default").get(title="Pro Django") with self.assertRaises(Book.DoesNotExist): Book.objects.using("other").get(title="Pro Django") - # "Dive into Python" exists on the default database, but not on other database + # "Dive into Python" exists on the default database, but not on other + # database Book.objects.using("other").get(title="Dive into Python") with self.assertRaises(Book.DoesNotExist): diff --git a/tests/nested_foreign_keys/tests.py b/tests/nested_foreign_keys/tests.py index 840d5f21fa..16823ae9b8 100644 --- a/tests/nested_foreign_keys/tests.py +++ b/tests/nested_foreign_keys/tests.py @@ -11,10 +11,10 @@ from .models import ( ) -# These are tests for #16715. The basic scheme is always the same: 3 models with -# 2 relations. The first relation may be null, while the second is non-nullable. -# In some cases, Django would pick the wrong join type for the second relation, -# resulting in missing objects in the queryset. +# These are tests for #16715. The basic scheme is always the same: 3 models +# with 2 relations. The first relation may be null, while the second is +# non-nullable. In some cases, Django would pick the wrong join type for the +# second relation, resulting in missing objects in the queryset. # # Model A # | (Relation A/B : nullable) diff --git a/tests/null_fk/tests.py b/tests/null_fk/tests.py index 9bf5f93f6c..ac7f7a8289 100644 --- a/tests/null_fk/tests.py +++ b/tests/null_fk/tests.py @@ -13,10 +13,10 @@ class NullFkTests(TestCase): c1 = Comment.objects.create(post=p, comment_text="My first comment") c2 = Comment.objects.create(comment_text="My second comment") - # Starting from comment, make sure that a .select_related(...) with a specified - # set of fields will properly LEFT JOIN multiple levels of NULLs (and the things - # that come after the NULLs, or else data that should exist won't). Regression - # test for #7369. + # Starting from comment, make sure that a .select_related(...) with a + # specified set of fields will properly LEFT JOIN multiple levels of + # NULLs (and the things that come after the NULLs, or else data that + # should exist won't). Regression test for #7369. c = Comment.objects.select_related().get(id=c1.id) self.assertEqual(c.post, p) self.assertIsNone(Comment.objects.select_related().get(id=c2.id).post) diff --git a/tests/null_fk_ordering/tests.py b/tests/null_fk_ordering/tests.py index 506c8b4086..bf4c218a8c 100644 --- a/tests/null_fk_ordering/tests.py +++ b/tests/null_fk_ordering/tests.py @@ -20,9 +20,9 @@ class NullFkOrderingTests(TestCase): author=author_2, title="This article written by Bob Smith" ) - # We can't compare results directly (since different databases sort NULLs to - # different ends of the ordering), but we can check that all results are - # returned. + # We can't compare results directly (since different databases sort + # NULLs to different ends of the ordering), but we can check that all + # results are returned. self.assertEqual(len(list(Article.objects.all())), 3) s = SystemInfo.objects.create(system_name="System Info") @@ -36,8 +36,8 @@ class NullFkOrderingTests(TestCase): Comment.objects.create(comment_text="Another first comment") Comment.objects.create(post=p2, comment_text="Another second comment") - # We have to test this carefully. Some databases sort NULL values before - # everything else, some sort them afterward. So we extract the ordered list - # and check the length. Before the fix, this list was too short (some values - # were omitted). + # We have to test this carefully. Some databases sort NULL values + # before everything else, some sort them afterward. So we extract the + # ordered list and check the length. Before the fix, this list was too + # short (some values were omitted). self.assertEqual(len(list(Comment.objects.all())), 4) diff --git a/tests/one_to_one/tests.py b/tests/one_to_one/tests.py index 451e97c274..d9bcb5d4dc 100644 --- a/tests/one_to_one/tests.py +++ b/tests/one_to_one/tests.py @@ -66,7 +66,8 @@ class OneToOneTests(TestCase): self.assertEqual(repr(r.place), "<Place: Demon Dogs the place>") def test_manager_all(self): - # Restaurant.objects.all() just returns the Restaurants, not the Places. + # Restaurant.objects.all() just returns the Restaurants, not the + # Places. self.assertSequenceEqual(Restaurant.objects.all(), [self.r1]) # Place.objects.all() returns all Places, regardless of whether they # have Restaurants. @@ -265,9 +266,10 @@ class OneToOneTests(TestCase): del p._state.fields_cache["restaurant"] self.assertIsNot(p.restaurant, r) - # Reassigning the Restaurant object results in an immediate cache update - # We can't use a new Restaurant because that'll violate one-to-one, but - # with a new *instance* the is test below will fail if #6886 regresses. + # Reassigning the Restaurant object results in an immediate cache + # update We can't use a new Restaurant because that'll violate + # one-to-one, but with a new *instance* the is test below will fail if + # #6886 regresses. r2 = Restaurant.objects.get(pk=r.pk) p.restaurant = r2 self.assertIs(p.restaurant, r2) @@ -298,8 +300,8 @@ class OneToOneTests(TestCase): r = Restaurant(place=p) self.assertIs(r.place, p) - # Creation using attname keyword argument and an id will cause the related - # object to be fetched. + # Creation using attname keyword argument and an id will cause the + # related object to be fetched. p = Place.objects.get(name="Demon Dogs") r = Restaurant(place_id=p.id) self.assertIsNot(r.place, p) @@ -483,10 +485,12 @@ class OneToOneTests(TestCase): private_school = School.objects.create(is_public=False) private_director = Director.objects.create(school=private_school, is_temp=True) - # Only one school is available via all() due to the custom default manager. + # Only one school is available via all() due to the custom default + # manager. self.assertSequenceEqual(School.objects.all(), [public_school]) - # Only one director is available via all() due to the custom default manager. + # Only one director is available via all() due to the custom default + # manager. self.assertSequenceEqual(Director.objects.all(), [public_director]) self.assertEqual(public_director.school, public_school) @@ -497,9 +501,9 @@ class OneToOneTests(TestCase): # allow it. self.assertEqual(private_director.school, private_school) - # Make sure the base manager is used so that an student can still access - # its related school even if the default manager doesn't normally - # allow it. + # Make sure the base manager is used so that an student can still + # access its related school even if the default manager doesn't + # normally allow it. self.assertEqual(private_school.director, private_director) School._meta.base_manager_name = "objects" diff --git a/tests/order_with_respect_to/base_tests.py b/tests/order_with_respect_to/base_tests.py index 05c614f8fa..ec3793411d 100644 --- a/tests/order_with_respect_to/base_tests.py +++ b/tests/order_with_respect_to/base_tests.py @@ -129,7 +129,8 @@ class BaseOrderWithRespectToTests: def test_bulk_create_with_empty_parent(self): """ - bulk_create() should properly set _order when parent has no existing children. + bulk_create() should properly set _order when parent has no existing + children. """ question = self.Question.objects.create(text="Test Question") answers = [self.Answer(question=question, text=f"Answer {i}") for i in range(3)] @@ -157,7 +158,8 @@ class BaseOrderWithRespectToTests: def test_bulk_create_multiple_parents(self): """ - bulk_create() should maintain separate _order sequences for different parents. + bulk_create() should maintain separate _order sequences for different + parents. """ question0 = self.Question.objects.create(text="Question 0") question1 = self.Question.objects.create(text="Question 1") @@ -251,7 +253,8 @@ class BaseOrderWithRespectToTests: # Existing answer to set initial _order=0. self.Answer.objects.create(question=question, text="Existing Answer") - # Two manually set _order=1 and one auto (which may also be assigned 1). + # Two manually set _order=1 and one auto (which may also be assigned + # 1). answers = [ self.Answer(question=question, text="Manual Order 1", _order=1), self.Answer(question=question, text="Auto Order 1"), @@ -265,7 +268,8 @@ class BaseOrderWithRespectToTests: # Manual values are as assigned, even if duplicated. self.assertEqual(manual_1._order, 1) self.assertEqual(manual_2._order, 1) - # Auto-assigned orders may also use 1 or any value, depending on implementation. - # If no collision logic, they may overlap with manual values. + # Auto-assigned orders may also use 1 or any value, depending on + # implementation. If no collision logic, they may overlap with manual + # values. self.assertEqual(auto_1._order, 1) self.assertEqual(auto_2._order, 2) diff --git a/tests/pagination/tests.py b/tests/pagination/tests.py index ce39c63288..267e1a5d84 100644 --- a/tests/pagination/tests.py +++ b/tests/pagination/tests.py @@ -170,7 +170,8 @@ class PaginationTests(SimpleTestCase): for paginator_class in [Paginator, AsyncPaginator]: for orphans in [2, 3]: with self.subTest(paginator_class=paginator_class, msg=msg): - # RemovedInDjango70Warning: When the deprecation ends, replace with: + # RemovedInDjango70Warning: When the deprecation ends, + # replace with: # with self.assertRaisesMessage(ValueError, msg): with self.assertWarnsMessage(RemovedInDjango70Warning, msg): paginator_class([1, 2, 3], 2, orphans) @@ -511,8 +512,8 @@ class PaginationTests(SimpleTestCase): self, ): """ - AsyncPaginator.aget_page() raises EmptyPage if allow_empty_first_page=False - and object_list is empty. + AsyncPaginator.aget_page() raises EmptyPage if + allow_empty_first_page=False and object_list is empty. """ paginator = AsyncPaginator([], 2, allow_empty_first_page=False) with self.assertRaises(EmptyPage): @@ -856,7 +857,8 @@ class ModelPaginationTests(TestCase): self.assertIsNone(p.object_list._result_cache) self.assertNotIsInstance(p.object_list, list) - # Make sure slicing the Page object with numbers and slice objects work. + # Make sure slicing the Page object with numbers and slice objects + # work. self.assertEqual(p[0], self.articles[0]) self.assertSequenceEqual(p[slice(2)], self.articles[:2]) # After __getitem__ is called, object_list is a list @@ -899,7 +901,8 @@ class ModelPaginationTests(TestCase): with self.assertWarnsMessage(UnorderedObjectListWarning, msg) as cm: AsyncPaginator(Article.objects.all(), 5) # The warning points at the BasePaginator caller. - # The reason is that the UnorderedObjectListWarning occurs in BasePaginator. + # The reason is that the UnorderedObjectListWarning occurs in + # BasePaginator. base_paginator_path = pathlib.Path(inspect.getfile(BasePaginator)) self.assertIn( cm.filename, diff --git a/tests/postgres_tests/test_array.py b/tests/postgres_tests/test_array.py index 745ae6090a..392b8f946c 100644 --- a/tests/postgres_tests/test_array.py +++ b/tests/postgres_tests/test_array.py @@ -973,7 +973,8 @@ class TestMigrations(TransactionTestCase): ) def test_adding_arrayfield_with_index(self): """ - ArrayField shouldn't have varchar_patterns_ops or text_patterns_ops indexes. + ArrayField shouldn't have varchar_patterns_ops or text_patterns_ops + indexes. """ table_name = "postgres_tests_chartextarrayindexmodel" call_command("migrate", "postgres_tests", verbosity=0) diff --git a/tests/postgres_tests/test_search.py b/tests/postgres_tests/test_search.py index 472dca6c7b..a7118e7c79 100644 --- a/tests/postgres_tests/test_search.py +++ b/tests/postgres_tests/test_search.py @@ -602,8 +602,8 @@ class TestRankingAndWeights(GrailTestData, PostgreSQLTestCase): rank=SearchRank( SearchVector("dialogue"), SearchQuery("brave sir robin"), - # Divide the rank by the document length and by the number of - # unique words in document. + # Divide the rank by the document length and by the number + # of unique words in document. normalization=Value(2).bitor(Value(8)), ), ) diff --git a/tests/prefetch_related/test_uuid.py b/tests/prefetch_related/test_uuid.py index f22b2158a5..a1593ea2a7 100644 --- a/tests/prefetch_related/test_uuid.py +++ b/tests/prefetch_related/test_uuid.py @@ -77,7 +77,8 @@ class UUIDPrefetchRelatedLookups(TestCase): def test_from_uuid_pk_lookup_integer_pk2_uuid_pk2(self): # From uuid-pk model, prefetch - # <integer-pk model>.<integer-pk model>.<uuid-pk model>.<uuid-pk model>: + # <integer-pk model>.<integer-pk model>.<uuid-pk model>.<uuid-pk + # model>: with self.assertNumQueries(5): spooky = Pet.objects.prefetch_related("people__houses__rooms__fleas").get( name="Spooky" diff --git a/tests/prefetch_related/tests.py b/tests/prefetch_related/tests.py index 1955809aec..5db383c746 100644 --- a/tests/prefetch_related/tests.py +++ b/tests/prefetch_related/tests.py @@ -486,9 +486,10 @@ class CustomPrefetchTests(TestCase): @classmethod def traverse_qs(cls, obj_iter, path): """ - Helper method that returns a list containing a list of the objects in the - obj_iter. Then for each object in the obj_iter, the path will be - recursively travelled and the found objects are added to the return value. + Helper method that returns a list containing a list of the objects in + the obj_iter. Then for each object in the obj_iter, the path will be + recursively travelled and the found objects are added to the return + value. """ ret_val = [] @@ -1064,7 +1065,8 @@ class CustomPrefetchTests(TestCase): Prefetch("houses", House.objects.values("pk")) with self.assertRaisesMessage(ValueError, msg): Prefetch("houses", House.objects.values_list("pk")) - # That error doesn't affect managers with custom ModelIterable subclasses + # That error doesn't affect managers with custom ModelIterable + # subclasses self.assertIs( Teacher.objects_custom.all()._iterable_class, ModelIterableSubclass ) @@ -1134,8 +1136,8 @@ class DefaultManagerTests(TestCase): def test_m2m_then_m2m(self): with self.assertNumQueries(3): # When we prefetch the teachers, and force the query, we don't want - # the default manager on teachers to immediately get all the related - # qualifications, since this will do one query per teacher. + # the default manager on teachers to immediately get all the + # related qualifications, since this will do one query per teacher. qs = Department.objects.prefetch_related("teachers") depts = "".join( "%s department: %s\n" @@ -1396,9 +1398,9 @@ class MultiTableInheritanceTest(TestCase): for a in Author.objects.prefetch_related("authorwithage") ] - # Regression for #18090: the prefetching query must include an IN clause. - # Note that on Oracle the table name is upper case in the generated SQL, - # thus the .lower() call. + # Regression for #18090: the prefetching query must include an IN + # clause. Note that on Oracle the table name is upper case in the + # generated SQL, thus the .lower() call. self.assertIn("authorwithage", connection.queries[-1]["sql"].lower()) self.assertIn(" IN ", connection.queries[-1]["sql"]) @@ -1492,8 +1494,9 @@ class LookupOrderingTest(TestCase): def test_order(self): with self.assertNumQueries(4): - # The following two queries must be done in the same order as written, - # otherwise 'primary_house' will cause non-prefetched lookups + # The following two queries must be done in the same order as + # written, otherwise 'primary_house' will cause non-prefetched + # lookups qs = Person.objects.prefetch_related( "houses__rooms", "primary_house__occupants" ) @@ -1509,7 +1512,8 @@ class NullableTest(TestCase): def test_traverse_nullable(self): # Because we use select_related() for 'boss', it doesn't need to be - # prefetched, but we can still traverse it although it contains some nulls + # prefetched, but we can still traverse it although it contains some + # nulls with self.assertNumQueries(2): qs = Employee.objects.select_related("boss").prefetch_related("boss__serfs") co_serfs = [ @@ -1816,8 +1820,8 @@ class DirectPrefetchedObjectCacheReuseTests(TestCase): def test_detect_is_fetched(self): """ - Nested prefetch_related() shouldn't trigger duplicate queries for the same - lookup. + Nested prefetch_related() shouldn't trigger duplicate queries for the + same lookup. """ with self.assertNumQueries(3): books = Book.objects.filter(title__in=["book1", "book2"]).prefetch_related( diff --git a/tests/proxy_model_inheritance/tests.py b/tests/proxy_model_inheritance/tests.py index fc1826e9e9..2e2d186c3c 100644 --- a/tests/proxy_model_inheritance/tests.py +++ b/tests/proxy_model_inheritance/tests.py @@ -14,9 +14,10 @@ from .models import ( class ProxyModelInheritanceTests(TransactionTestCase): """ - Proxy model inheritance across apps can result in migrate not creating the table - for the proxied model (as described in #12286). This test creates two dummy - apps and calls migrate, then verifies that the table has been created. + Proxy model inheritance across apps can result in migrate not creating the + table for the proxied model (as described in #12286). This test creates two + dummy apps and calls migrate, then verifies that the table has been + created. """ available_apps = [] diff --git a/tests/proxy_models/models.py b/tests/proxy_models/models.py index c0277e093f..ac6444fbef 100644 --- a/tests/proxy_models/models.py +++ b/tests/proxy_models/models.py @@ -130,7 +130,8 @@ class MultiUserProxy(UserProxy, AnotherUserProxy): proxy = True -# We can still use `select_related()` to include related models in our querysets. +# We can still use `select_related()` to include related models in our +# querysets. class Country(models.Model): diff --git a/tests/queries/tests.py b/tests/queries/tests.py index 38b0a5ddfa..4158a9a596 100644 --- a/tests/queries/tests.py +++ b/tests/queries/tests.py @@ -136,8 +136,8 @@ class Queries1Tests(TestCase): ann2 = Annotation.objects.create(name="a2", tag=cls.t4) ann2.notes.add(cls.n2, cls.n3) - # Create these out of order so that sorting by 'id' will be different to sorting - # by 'info'. Helps detect some problems later. + # Create these out of order so that sorting by 'id' will be different + # to sorting by 'info'. Helps detect some problems later. cls.e2 = ExtraInfo.objects.create( info="e2", note=cls.n2, value=41, filterable=False ) @@ -176,8 +176,8 @@ class Queries1Tests(TestCase): cls.r2 = Report.objects.create(name="r2", creator=cls.a3) cls.r3 = Report.objects.create(name="r3") - # Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering - # will be rank3, rank2, rank1. + # Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the + # Meta.ordering will be rank3, rank2, rank1. cls.rank1 = Ranking.objects.create(rank=2, author=cls.a2) cls.c1 = Cover.objects.create(title="first", item=cls.i4) @@ -240,9 +240,9 @@ class Queries1Tests(TestCase): [self.i1], ) - # Each filter call is processed "at once" against a single table, so this is - # different from the previous example as it tries to find tags that are two - # things at once (rather than two tags). + # Each filter call is processed "at once" against a single table, so + # this is different from the previous example as it tries to find tags + # that are two things at once (rather than two tags). self.assertSequenceEqual( Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)), [] ) @@ -306,16 +306,17 @@ class Queries1Tests(TestCase): ) def test_ticket6074(self): - # Merging two empty result sets shouldn't leave a queryset with no constraints - # (which would match everything). + # Merging two empty result sets shouldn't leave a queryset with no + # constraints (which would match everything). self.assertSequenceEqual(Author.objects.filter(Q(id__in=[])), []) self.assertSequenceEqual(Author.objects.filter(Q(id__in=[]) | Q(id__in=[])), []) def test_tickets_1878_2939(self): self.assertEqual(Item.objects.values("creator").distinct().count(), 3) - # Create something with a duplicate 'name' so that we can test multi-column - # cases (which require some tricky SQL transformations under the covers). + # Create something with a duplicate 'name' so that we can test + # multi-column cases (which require some tricky SQL transformations + # under the covers). xx = Item(name="four", created=self.time1, creator=self.a2, note=self.n1) xx.save() self.assertEqual( @@ -491,7 +492,8 @@ class Queries1Tests(TestCase): [self.i3], ) - # Excluding from a relation that cannot be NULL should not use outer joins. + # Excluding from a relation that cannot be NULL should not use outer + # joins. query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()]) @@ -586,8 +588,8 @@ class Queries1Tests(TestCase): ) def test_heterogeneous_qs_combination(self): - # Combining querysets built on different models should behave in a well-defined - # fashion. We raise an error. + # Combining querysets built on different models should behave in a + # well-defined fashion. We raise an error. msg = "Cannot combine queries on two different base models." with self.assertRaisesMessage(TypeError, msg): Author.objects.all() & Tag.objects.all() @@ -641,8 +643,8 @@ class Queries1Tests(TestCase): # case, Cover is ordered by Item's default, which uses Note's default). self.assertSequenceEqual(Cover.objects.all(), [self.c1, self.c2]) - # If the remote model does not have a default ordering, we order by its 'id' - # field. + # If the remote model does not have a default ordering, we order by its + # 'id' field. self.assertSequenceEqual( Item.objects.order_by("creator", "name"), [self.i1, self.i3, self.i2, self.i4], @@ -1139,7 +1141,8 @@ class Queries1Tests(TestCase): self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0) self.assertEqual(str(q.query).count("INNER JOIN"), 1) - # Querying without isnull should not convert anything to left outer join. + # Querying without isnull should not convert anything to left outer + # join. q = Tag.objects.filter(parent__parent=self.t1) self.assertSequenceEqual(q, [self.t4, self.t5]) self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0) @@ -1173,7 +1176,8 @@ class Queries1Tests(TestCase): self.assertNotIn("INNER JOIN", str(q.query)) def test_ticket_10790_5(self): - # Querying with isnull=False across m2m field should not create outer joins + # Querying with isnull=False across m2m field should not create outer + # joins q = Author.objects.filter(item__tags__isnull=False) self.assertSequenceEqual(q, [self.a1, self.a1, self.a2, self.a2, self.a4]) self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0) @@ -1190,8 +1194,8 @@ class Queries1Tests(TestCase): self.assertEqual(str(q.query).count("INNER JOIN"), 4) def test_ticket_10790_6(self): - # Querying with isnull=True across m2m field should not create inner joins - # and strip last outer join + # Querying with isnull=True across m2m field should not create inner + # joins and strip last outer join q = Author.objects.filter(item__tags__parent__parent__isnull=True) self.assertSequenceEqual( q, @@ -1218,7 +1222,8 @@ class Queries1Tests(TestCase): self.assertEqual(str(q.query).count("INNER JOIN"), 1) def test_ticket_10790_8(self): - # Querying with combined q-objects should also strip the left outer join + # Querying with combined q-objects should also strip the left outer + # join q = Tag.objects.filter(Q(parent__isnull=True) | Q(parent=self.t1)) self.assertSequenceEqual(q, [self.t1, self.t2, self.t3]) self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0) @@ -1656,14 +1661,15 @@ class Queries4Tests(TestCase): self.assertEqual(obj.person.details.data, "d2") def test_order_by_resetting(self): - # Calling order_by() with no parameters removes any existing ordering on the - # model. But it should still be possible to add new ordering after that. + # Calling order_by() with no parameters removes any existing ordering + # on the model. But it should still be possible to add new ordering + # after that. qs = Author.objects.order_by().order_by("name") self.assertIn("ORDER BY", qs.query.get_compiler(qs.db).as_sql()[0]) def test_order_by_reverse_fk(self): - # It is possible to order by reverse of foreign key, although that can lead - # to duplicate results. + # It is possible to order by reverse of foreign key, although that can + # lead to duplicate results. c1 = SimpleCategory.objects.create(name="category1") c2 = SimpleCategory.objects.create(name="category2") CategoryItem.objects.create(category=c1) @@ -2051,8 +2057,8 @@ class DisjunctiveFilterTests(TestCase): def test_ticket7872(self): # Another variation on the disjunctive filtering theme. - # For the purposes of this regression test, it's important that there is no - # Join object related to the LeafA we create. + # For the purposes of this regression test, it's important that there + # is no Join object related to the LeafA we create. l1 = LeafA.objects.create(data="first") self.assertSequenceEqual(LeafA.objects.all(), [l1]) self.assertSequenceEqual( @@ -2108,8 +2114,9 @@ class Queries6Tests(TestCase): self.assertFalse(qs) def test_nested_queries_sql(self): - # Nested queries should not evaluate the inner query as part of constructing the - # SQL (so we should see a nested query here, indicated by two "SELECT" calls). + # Nested queries should not evaluate the inner query as part of + # constructing the SQL (so we should see a nested query here, indicated + # by two "SELECT" calls). qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy")) self.assertEqual(qs.query.get_compiler(qs.db).as_sql()[0].count("SELECT"), 2) @@ -2130,8 +2137,9 @@ class Queries6Tests(TestCase): [self.t1, self.t3], ) - # This example is tricky because the parent could be NULL, so only checking - # parents with annotations omits some results (tag t1, in this case). + # This example is tricky because the parent could be NULL, so only + # checking parents with annotations omits some results (tag t1, in this + # case). self.assertSequenceEqual( Tag.objects.exclude(parent__annotation__name="a1"), [self.t1, self.t4, self.t5], @@ -2215,7 +2223,8 @@ class RawQueriesTests(TestCase): Note.objects.create(note="n1", misc="foo", id=1) def test_ticket14729(self): - # Test representation of raw query with one or few parameters passed as list + # Test representation of raw query with one or few parameters passed as + # list query = "SELECT * FROM queries_note WHERE note = %s" params = ["n1"] qs = Note.objects.raw(query, params=params) @@ -2579,7 +2588,8 @@ class QuerySetBitwiseOperationTests(TestCase): class CloneTests(TestCase): def test_evaluated_queryset_as_argument(self): """ - If a queryset is already evaluated, it can still be used as a query arg. + If a queryset is already evaluated, it can still be used as a query + arg. """ n = Note(note="Test1", misc="misc") n.save() @@ -3124,8 +3134,8 @@ class ConditionalTests(TestCase): # recursive). self.assertSequenceEqual(LoopX.objects.order_by("y__x__y__x__id"), []) - # When grouping without specifying ordering, we add an explicit "ORDER BY NULL" - # portion in MySQL to prevent unnecessary sorting. + # When grouping without specifying ordering, we add an explicit "ORDER BY + # NULL" portion in MySQL to prevent unnecessary sorting. @skipUnlessDBFeature("requires_explicit_null_ordering_when_grouping") def test_null_ordering_added(self): query = Tag.objects.values_list("parent_id", flat=True).order_by().query diff --git a/tests/queryset_pickle/tests.py b/tests/queryset_pickle/tests.py index 337c5193ce..acdb582a0a 100644 --- a/tests/queryset_pickle/tests.py +++ b/tests/queryset_pickle/tests.py @@ -181,8 +181,8 @@ class PickleabilityTestCase(TestCase): def test_pickle_prefetch_related_with_m2m_and_objects_deletion(self): """ - #24831 -- Cached properties on ManyToOneRel created in QuerySet.delete() - caused subsequent QuerySet pickling to fail. + #24831 -- Cached properties on ManyToOneRel created in + QuerySet.delete() caused subsequent QuerySet pickling to fail. """ g = Group.objects.create(name="foo") m2m = M2MModel.objects.create() diff --git a/tests/requests_tests/test_accept_header.py b/tests/requests_tests/test_accept_header.py index f6febc937a..2699e7a390 100644 --- a/tests/requests_tests/test_accept_header.py +++ b/tests/requests_tests/test_accept_header.py @@ -346,7 +346,8 @@ class AcceptHeaderTests(TestCase): def test_quality_for_media_type_rfc9110(self): """ - Taken from https://www.rfc-editor.org/rfc/rfc9110.html#section-12.5.1-18. + Taken from + https://www.rfc-editor.org/rfc/rfc9110.html#section-12.5.1-18. """ request = HttpRequest() request.META["HTTP_ACCEPT"] = ( diff --git a/tests/requests_tests/tests.py b/tests/requests_tests/tests.py index e4540de6ee..7e615617d7 100644 --- a/tests/requests_tests/tests.py +++ b/tests/requests_tests/tests.py @@ -424,9 +424,9 @@ class RequestsTests(SimpleTestCase): """ Reading body after parsing multipart/form-data is not allowed """ - # Because multipart is used for large amounts of data i.e. file uploads, - # we don't want the data held in memory twice, and we don't want to - # silence the error by setting body = '' either. + # Because multipart is used for large amounts of data i.e. file + # uploads, we don't want the data held in memory twice, and we don't + # want to silence the error by setting body = '' either. payload = FakePayload( "\r\n".join( [ @@ -881,7 +881,8 @@ class RequestsTests(SimpleTestCase): def test_POST_after_body_read_and_stream_read_multipart(self): """ POST should be populated even if body is read first, and then - the stream is read second. Using multipart/form-data instead of urlencoded. + the stream is read second. Using multipart/form-data instead of + urlencoded. """ payload = FakePayload( "\r\n".join( diff --git a/tests/runtests.py b/tests/runtests.py index ceb88853b7..679f5269ca 100755 --- a/tests/runtests.py +++ b/tests/runtests.py @@ -715,7 +715,8 @@ if __name__ == "__main__": if options.screenshots and options.tags: parser.error("--screenshots and --tag are mutually exclusive.") - # Allow including a trailing slash on app_labels for tab completion convenience + # Allow including a trailing slash on app_labels for tab completion + # convenience options.modules = [os.path.normpath(labels) for labels in options.modules] mutually_exclusive_options = [ diff --git a/tests/schema/fields.py b/tests/schema/fields.py index 24a26b2c2c..10b6451cbd 100644 --- a/tests/schema/fields.py +++ b/tests/schema/fields.py @@ -84,7 +84,8 @@ class CustomManyToManyField(RelatedField): def get_internal_type(self): return "ManyToManyField" - # Copy those methods from ManyToManyField because they don't call super() internally + # Copy those methods from ManyToManyField because they don't call super() + # internally contribute_to_related_class = models.ManyToManyField.__dict__[ "contribute_to_related_class" ] diff --git a/tests/schema/tests.py b/tests/schema/tests.py index dbed4b709d..a06553e680 100644 --- a/tests/schema/tests.py +++ b/tests/schema/tests.py @@ -677,7 +677,8 @@ class SchemaTests(TransactionTestCase): def test_add_field_remove_field(self): """ - Adding a field and removing it removes all deferred sql referring to it. + Adding a field and removing it removes all deferred sql referring to + it. """ with connection.schema_editor() as editor: # Create a table with a unique constraint on the slug field. @@ -762,7 +763,8 @@ class SchemaTests(TransactionTestCase): # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") - # Add the field with a default it needs to cast (to string in this case) + # Add the field with a default it needs to cast (to string in this + # case) new_field = TestTransformField(default={1: 2}) new_field.set_attributes_from_name("thing") with connection.schema_editor() as editor: @@ -1317,7 +1319,8 @@ class SchemaTests(TransactionTestCase): def test_alter_null_to_not_null(self): """ - #23609 - Tests handling of default values when altering from NULL to NOT NULL. + #23609 - Tests handling of default values when altering from NULL to + NOT NULL. """ # Create the table with connection.schema_editor() as editor: @@ -3642,8 +3645,8 @@ class SchemaTests(TransactionTestCase): @skipIfDBFeature("supports_expression_indexes") def test_func_unique_constraint_unsupported(self): - # UniqueConstraint is ignored on databases that don't support indexes on - # expressions. + # UniqueConstraint is ignored on databases that don't support indexes + # on expressions. with connection.schema_editor() as editor: editor.create_model(Author) constraint = UniqueConstraint(F("name"), name="func_name_uq") @@ -4530,7 +4533,8 @@ class SchemaTests(TransactionTestCase): def test_remove_constraints_capital_letters(self): """ - #23065 - Constraint names must be quoted if they contain capital letters. + #23065 - Constraint names must be quoted if they contain capital + letters. """ def get_field(*args, field_class=IntegerField, **kwargs): @@ -4622,7 +4626,8 @@ class SchemaTests(TransactionTestCase): self.assertNotIn("surname", columns) # Create a row Author.objects.create(name="Anonymous1") - # Add new CharField to ensure default will be used from effective_default + # Add new CharField to ensure default will be used from + # effective_default new_field = CharField(max_length=15, blank=True) new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: @@ -5175,7 +5180,8 @@ class SchemaTests(TransactionTestCase): self.get_constraints_for_column(BookWithoutAuthor, "title"), ["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"], ) - # Alter to remove both unique=True and db_index=True (should drop all indexes) + # Alter to remove both unique=True and db_index=True (should drop all + # indexes) new_field2 = CharField(max_length=100) new_field2.set_attributes_from_name("title") with connection.schema_editor() as editor: @@ -5193,7 +5199,8 @@ class SchemaTests(TransactionTestCase): self.get_constraints_for_column(BookWithoutAuthor, "title"), ["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"], ) - # Alter to set unique=True and remove db_index=True (should replace the index) + # Alter to set unique=True and remove db_index=True (should replace the + # index) old_field = BookWithoutAuthor._meta.get_field("title") new_field = CharField(max_length=100, unique=True) new_field.set_attributes_from_name("title") @@ -5203,7 +5210,8 @@ class SchemaTests(TransactionTestCase): self.get_constraints_for_column(BookWithoutAuthor, "title"), ["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"], ) - # Alter to set db_index=True and remove unique=True (should restore index) + # Alter to set db_index=True and remove unique=True (should restore + # index) new_field2 = CharField(max_length=100, db_index=True) new_field2.set_attributes_from_name("title") with connection.schema_editor() as editor: diff --git a/tests/select_for_update/tests.py b/tests/select_for_update/tests.py index 1bc87113ba..460e279770 100644 --- a/tests/select_for_update/tests.py +++ b/tests/select_for_update/tests.py @@ -602,8 +602,8 @@ class SelectForUpdateTests(TransactionTestCase): # find that it has updated the person's name. self.assertFalse(thread.is_alive()) - # We must commit the transaction to ensure that MySQL gets a fresh read, - # since by default it runs in REPEATABLE READ mode + # We must commit the transaction to ensure that MySQL gets a fresh + # read, since by default it runs in REPEATABLE READ mode transaction.commit() p = Person.objects.get(pk=self.person.pk) diff --git a/tests/select_related_regress/tests.py b/tests/select_related_regress/tests.py index 94a15bde24..b271c5b48c 100644 --- a/tests/select_related_regress/tests.py +++ b/tests/select_related_regress/tests.py @@ -32,12 +32,12 @@ class SelectRelatedRegressTests(TestCase): Regression test for bug #7110. When using select_related(), we must query the - Device and Building tables using two different aliases (each) in order to - differentiate the start and end Connection fields. The net result is that - both the "connections = ..." queries here should give the same results - without pulling in more than the absolute minimum number of tables - (history has shown that it's easy to make a mistake in the implementation - and include some unnecessary bonus joins). + Device and Building tables using two different aliases (each) in order + to differentiate the start and end Connection fields. The net result is + that both the "connections = ..." queries here should give the same + results without pulling in more than the absolute minimum number of + tables (history has shown that it's easy to make a mistake in the + implementation and include some unnecessary bonus joins). """ b = Building.objects.create(name="101") @@ -70,8 +70,9 @@ class SelectRelatedRegressTests(TestCase): [(c1.id, "router/4", "switch/7"), (c2.id, "switch/7", "server/1")], ) - # This final query should only have seven tables (port, device and building - # twice each, plus connection once). Thus, 6 joins plus the FROM table. + # This final query should only have seven tables (port, device and + # building twice each, plus connection once). Thus, 6 joins plus the + # FROM table. self.assertEqual(str(connections.query).count(" JOIN "), 6) def test_regression_8106(self): @@ -102,8 +103,8 @@ class SelectRelatedRegressTests(TestCase): the first related model in the tests below ("state") is empty and we try to select the more remotely related - state__country. The regression here was not skipping the empty column results - for country before getting status. + state__country. The regression here was not skipping the empty column + results for country before getting status. """ Country.objects.create(name="Australia") diff --git a/tests/serializers/test_data.py b/tests/serializers/test_data.py index 6a6de18033..c626f2550a 100644 --- a/tests/serializers/test_data.py +++ b/tests/serializers/test_data.py @@ -241,8 +241,8 @@ def inherited_compare(testcase, pk, klass, data): testcase.assertEqual(value, getattr(instance, key)) -# Define some test helpers. Each has a pair of functions: one to create objects and one -# to make assertions against objects of a particular type. +# Define some test helpers. Each has a pair of functions: one to create objects +# and one to make assertions against objects of a particular type. TestHelper = namedtuple("TestHelper", ["create_object", "compare_object"]) data_obj = TestHelper(data_create, data_compare) generic_obj = TestHelper(generic_create, generic_compare) diff --git a/tests/serializers/test_json.py b/tests/serializers/test_json.py index 65d521faac..2c8ad5708e 100644 --- a/tests/serializers/test_json.py +++ b/tests/serializers/test_json.py @@ -188,7 +188,8 @@ class JsonSerializerTestCase(SerializersTestBase, TestCase): def test_helpful_error_message_for_many2many_natural1(self): """ Invalid many-to-many keys should throw a helpful error message. - This tests the code path where one of a list of natural keys is invalid. + This tests the code path where one of a list of natural keys is + invalid. """ test_string = """[{ "pk": 1, diff --git a/tests/serializers/tests.py b/tests/serializers/tests.py index 04d6b4e9ee..4638cd93aa 100644 --- a/tests/serializers/tests.py +++ b/tests/serializers/tests.py @@ -391,7 +391,9 @@ class SerializersTestBase: self.assertEqual(Category.objects.count(), 5) def test_deterministic_mapping_ordering(self): - """Mapping such as fields should be deterministically ordered. (#24558)""" + """ + Mapping such as fields should be deterministically ordered. (#24558) + """ output = serializers.serialize(self.serializer_name, [self.a1], indent=2) categories = self.a1.categories.values_list("pk", flat=True) self.assertEqual( @@ -406,7 +408,9 @@ class SerializersTestBase: ) def test_deserialize_force_insert(self): - """Deserialized content can be saved with force_insert as a parameter.""" + """ + Deserialized content can be saved with force_insert as a parameter. + """ serial_str = serializers.serialize(self.serializer_name, [self.a1]) deserial_obj = list(serializers.deserialize(self.serializer_name, serial_str))[ 0 diff --git a/tests/servers/tests.py b/tests/servers/tests.py index 05898009d5..0ddf55df6f 100644 --- a/tests/servers/tests.py +++ b/tests/servers/tests.py @@ -198,8 +198,9 @@ class LiveServerViews(LiveServerBase): development server is rather simple we support it only in cases where we can detect a content length from the response. This should be doable for all simple views and streaming responses where an iterable with - length of one is passed. The latter follows as result of `set_content_length` - from https://github.com/python/cpython/blob/main/Lib/wsgiref/handlers.py. + length of one is passed. The latter follows as result of + `set_content_length` from + https://github.com/python/cpython/blob/main/Lib/wsgiref/handlers.py. If we cannot detect a content length we explicitly set the `Connection` header to `close` to notify the client that we do not actually support diff --git a/tests/sessions_tests/tests.py b/tests/sessions_tests/tests.py index 9eabb933a8..81c2e7a5de 100644 --- a/tests/sessions_tests/tests.py +++ b/tests/sessions_tests/tests.py @@ -41,9 +41,9 @@ from .models import SessionStore as CustomDatabaseSession class SessionTestsMixin: - # This does not inherit from TestCase to avoid any tests being run with this - # class, which wouldn't work, and to allow different TestCase subclasses to - # be used. + # This does not inherit from TestCase to avoid any tests being run with + # this class, which wouldn't work, and to allow different TestCase + # subclasses to be used. backend = None # subclasses must specify @@ -326,8 +326,8 @@ class SessionTestsMixin: self.assertEqual(await self.session.aget("a"), "b") def test_invalid_key(self): - # Submitting an invalid session key (either by guessing, or if the db has - # removed the key) results in a new key being generated. + # Submitting an invalid session key (either by guessing, or if the db + # has removed the key) results in a new key being generated. try: session = self.backend("1") session.save() @@ -340,8 +340,8 @@ class SessionTestsMixin: session.delete("1") async def test_invalid_key_async(self): - # Submitting an invalid session key (either by guessing, or if the db has - # removed the key) results in a new key being generated. + # Submitting an invalid session key (either by guessing, or if the db + # has removed the key) results in a new key being generated. try: session = self.backend("1") await session.asave() @@ -490,8 +490,8 @@ class SessionTestsMixin: ) def test_get_expire_at_browser_close(self): - # Tests get_expire_at_browser_close with different settings and different - # set_expiry calls + # Tests get_expire_at_browser_close with different settings and + # different set_expiry calls with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False): self.session.set_expiry(10) self.assertIs(self.session.get_expire_at_browser_close(), False) @@ -513,8 +513,8 @@ class SessionTestsMixin: self.assertIs(self.session.get_expire_at_browser_close(), True) async def test_get_expire_at_browser_close_async(self): - # Tests get_expire_at_browser_close with different settings and different - # set_expiry calls + # Tests get_expire_at_browser_close with different settings and + # different set_expiry calls with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False): await self.session.aset_expiry(10) self.assertIs(await self.session.aget_expire_at_browser_close(), False) @@ -868,7 +868,8 @@ class FileSessionTests(SessionTestsMixin, SimpleTestCase): backend = FileSession def setUp(self): - # Do file session tests in an isolated directory, and kill it after we're done. + # Do file session tests in an isolated directory, and kill it after + # we're done. self.original_session_file_path = settings.SESSION_FILE_PATH self.temp_session_store = settings.SESSION_FILE_PATH = self.mkdtemp() self.addCleanup(shutil.rmtree, self.temp_session_store) @@ -1247,7 +1248,8 @@ class CookieSessionTests(SessionTestsMixin, SimpleTestCase): @unittest.expectedFailure def test_actual_expiry(self): - # The cookie backend doesn't handle non-default expiry dates, see #19201 + # The cookie backend doesn't handle non-default expiry dates, see + # #19201 super().test_actual_expiry() async def test_actual_expiry_async(self): diff --git a/tests/settings_tests/tests.py b/tests/settings_tests/tests.py index c1fe2d042d..7794aeef9a 100644 --- a/tests/settings_tests/tests.py +++ b/tests/settings_tests/tests.py @@ -205,7 +205,8 @@ class SettingsTests(SimpleTestCase): getattr(settings, "TEST") def test_class_decorator(self): - # SimpleTestCase can be decorated by override_settings, but not ut.TestCase + # SimpleTestCase can be decorated by override_settings, but not + # ut.TestCase class SimpleTestCaseSubclass(SimpleTestCase): pass @@ -467,7 +468,8 @@ class IsOverriddenTest(SimpleTestCase): class TestListSettings(SimpleTestCase): """ Make sure settings that should be lists or tuples throw - ImproperlyConfigured if they are set to a string instead of a list or tuple. + ImproperlyConfigured if they are set to a string instead of a list or + tuple. """ list_or_tuple_settings = ( diff --git a/tests/signals/tests.py b/tests/signals/tests.py index 6c90c6aa52..7cb64f6e05 100644 --- a/tests/signals/tests.py +++ b/tests/signals/tests.py @@ -16,7 +16,8 @@ from .models import Author, Book, Car, Page, Person class BaseSignalSetup: def setUp(self): # Save up the number of connected signals so that we can check at the - # end that all the signals we register get properly unregistered (#9989) + # end that all the signals we register get properly unregistered + # (#9989) self.pre_signals = ( len(signals.pre_save.receivers), len(signals.post_save.receivers), @@ -88,7 +89,8 @@ class SignalTests(BaseSignalSetup, TestCase): ) data[:] = [] - # Calling an internal method purely so that we can trigger a "raw" save. + # Calling an internal method purely so that we can trigger a "raw" + # save. p1.save_base(raw=True) self.assertEqual( data, @@ -341,7 +343,8 @@ class SignalTests(BaseSignalSetup, TestCase): ) data[:] = [] - # Assigning and removing to/from m2m shouldn't generate an m2m signal. + # Assigning and removing to/from m2m shouldn't generate an m2m + # signal. b1.authors.set([a1]) self.assertEqual(data, []) b1.authors.set([]) diff --git a/tests/sites_tests/tests.py b/tests/sites_tests/tests.py index 4f5b07ee8f..79183f7aca 100644 --- a/tests/sites_tests/tests.py +++ b/tests/sites_tests/tests.py @@ -38,8 +38,8 @@ class SitesFrameworkTests(TestCase): Site.objects.get_current() def test_site_cache(self): - # After updating a Site object (e.g. via the admin), we shouldn't return a - # bogus value from the SITE_CACHE. + # After updating a Site object (e.g. via the admin), we shouldn't + # return a bogus value from the SITE_CACHE. site = Site.objects.get_current() self.assertEqual("example.com", site.name) s2 = Site.objects.get(id=settings.SITE_ID) diff --git a/tests/staticfiles_tests/test_management.py b/tests/staticfiles_tests/test_management.py index a3226f5aa1..e8873915e6 100644 --- a/tests/staticfiles_tests/test_management.py +++ b/tests/staticfiles_tests/test_management.py @@ -439,8 +439,8 @@ class TestCollectionDryRunManifestStaticFilesStorage(TestCollectionDryRun): class TestCollectionFilesOverride(CollectionTestCase): """ Test overriding duplicated files by ``collectstatic`` management command. - Check for proper handling of apps order in installed apps even if file modification - dates are in different order: + Check for proper handling of apps order in installed apps even if file + modification dates are in different order: 'staticfiles_test_app', 'staticfiles_tests.apps.no_label', """ @@ -457,9 +457,9 @@ class TestCollectionFilesOverride(CollectionTestCase): self.orig_atime = os.path.getatime(self.orig_path) # prepare duplicate of file2.txt from a temporary app - # this file will have modification time older than no_label/static/file2.txt - # anyway it should be taken to STATIC_ROOT because the temporary app is before - # 'no_label' app in installed apps + # this file will have modification time older than + # no_label/static/file2.txt anyway it should be taken to STATIC_ROOT + # because the temporary app is before 'no_label' app in installed apps self.temp_app_path = os.path.join(self.temp_dir, "staticfiles_test_app") self.testfile_path = os.path.join(self.temp_app_path, "static", "file2.txt") diff --git a/tests/staticfiles_tests/test_storage.py b/tests/staticfiles_tests/test_storage.py index 9ef49491ff..e09f9eda1c 100644 --- a/tests/staticfiles_tests/test_storage.py +++ b/tests/staticfiles_tests/test_storage.py @@ -131,7 +131,8 @@ class TestHashedFiles: def test_template_tag_absolute_root(self): """ - Like test_template_tag_absolute, but for a file in STATIC_ROOT (#26249). + Like test_template_tag_absolute, but for a file in STATIC_ROOT + (#26249). """ relpath = self.hashed_file_path("absolute_root.css") self.assertEqual(relpath, "absolute_root.f821df1b64f7.css") @@ -198,8 +199,8 @@ class TestHashedFiles: Files that are alterable should always be post-processed; files that aren't should be skipped. - collectstatic has already been called once in setUp() for this testcase, - therefore we check by verifying behavior on a second run. + collectstatic has already been called once in setUp() for this + testcase, therefore we check by verifying behavior on a second run. """ collectstatic_args = { "interactive": False, diff --git a/tests/string_lookup/tests.py b/tests/string_lookup/tests.py index cc7d36061a..4a0c6c32fc 100644 --- a/tests/string_lookup/tests.py +++ b/tests/string_lookup/tests.py @@ -72,7 +72,8 @@ class StringLookupTests(TestCase): """ Regression test for #708 - "like" queries on IP address fields require casting with HOST() (on PostgreSQL). + "like" queries on IP address fields require casting with HOST() (on + PostgreSQL). """ a = Article(name="IP test", text="The body", submitted_from="192.0.2.100") a.save() diff --git a/tests/syndication_tests/tests.py b/tests/syndication_tests/tests.py index 6403f7461a..17a408a686 100644 --- a/tests/syndication_tests/tests.py +++ b/tests/syndication_tests/tests.py @@ -292,7 +292,8 @@ class SyndicationFeedTest(FeedTestCase): def test_rss091_feed(self): """ - Test the structure and content of feeds generated by RssUserland091Feed. + Test the structure and content of feeds generated by + RssUserland091Feed. """ response = self.client.get("/syndication/rss091/") doc = minidom.parseString(response.content) diff --git a/tests/template_tests/filter_tests/test_urlize.py b/tests/template_tests/filter_tests/test_urlize.py index 043029c8dc..ca9f8fb7e1 100644 --- a/tests/template_tests/filter_tests/test_urlize.py +++ b/tests/template_tests/filter_tests/test_urlize.py @@ -296,7 +296,8 @@ class FunctionTests(SimpleTestCase): def test_trailing_period(self): """ - #18644 - Check urlize trims trailing period when followed by parenthesis + #18644 - Check urlize trims trailing period when followed by + parenthesis """ self.assertEqual( urlize("(Go to http://www.example.com/foo.)"), diff --git a/tests/template_tests/syntax_tests/i18n/test_blocktranslate.py b/tests/template_tests/syntax_tests/i18n/test_blocktranslate.py index 1425406960..479d1374f9 100644 --- a/tests/template_tests/syntax_tests/i18n/test_blocktranslate.py +++ b/tests/template_tests/syntax_tests/i18n/test_blocktranslate.py @@ -563,7 +563,9 @@ class TranslationBlockTranslateTagTests(SimpleTestCase): @override_settings(LOCALE_PATHS=extended_locale_paths) def test_template_tags_pgettext(self): - """{% blocktranslate %} takes message contexts into account (#14806).""" + """ + {% blocktranslate %} takes message contexts into account (#14806). + """ trans_real._active = Local() trans_real._translations = {} with translation.override("de"): @@ -733,8 +735,8 @@ class TranslationBlockTranslateTagTests(SimpleTestCase): def test_bad_placeholder_2(self): """ Error in translation file should not crash template rendering (#18393). - (%(person) misses a 's' in fr.po, causing the string formatting to fail) - . + (%(person) misses a 's' in fr.po, causing the string formatting to + fail) . """ with translation.override("fr"): t = Template( diff --git a/tests/template_tests/syntax_tests/test_if_changed.py b/tests/template_tests/syntax_tests/test_if_changed.py index fb0a693460..6e5d04c225 100644 --- a/tests/template_tests/syntax_tests/test_if_changed.py +++ b/tests/template_tests/syntax_tests/test_if_changed.py @@ -284,8 +284,9 @@ class IfChangedTests(SimpleTestCase): ) # Using generator to mimic concurrency. - # The generator is not passed to the 'for' loop, because it does a list(values) - # instead, call gen.next() in the template to control the generator. + # The generator is not passed to the 'for' loop, because it does a + # list(values) instead, call gen.next() in the template to control the + # generator. def gen(): yield 1 yield 2 diff --git a/tests/template_tests/templatetags/custom.py b/tests/template_tests/templatetags/custom.py index 2c0a1b7f3f..bf201e74e9 100644 --- a/tests/template_tests/templatetags/custom.py +++ b/tests/template_tests/templatetags/custom.py @@ -32,9 +32,9 @@ def div_custom_end(content): @register.filter def noop(value, param=None): - """A noop filter that always return its first argument and does nothing with - its second (optional) one. - Useful for testing out whitespace in filter arguments (see #19882).""" + """A noop filter that always return its first argument and does nothing + with its second (optional) one. Useful for testing out whitespace in filter + arguments (see #19882).""" return value diff --git a/tests/test_client/tests.py b/tests/test_client/tests.py index 652563d269..d1d329aff9 100644 --- a/tests/test_client/tests.py +++ b/tests/test_client/tests.py @@ -331,7 +331,9 @@ class ClientTest(TestCase): self.assertEqual(response.request["PATH_INFO"], "/accounts/login/") def test_follow_relative_redirect_no_trailing_slash(self): - "A URL with a relative redirect with no trailing slash can be followed." + """ + A URL with a relative redirect with no trailing slash can be followed. + """ response = self.client.get("/accounts/no_trailing_slash", follow=True) self.assertEqual(response.status_code, 200) self.assertEqual(response.request["PATH_INFO"], "/accounts/login/") @@ -681,7 +683,9 @@ class ClientTest(TestCase): ] ) def test_view_with_inactive_force_login(self): - "Request a page that is protected with @login, but use an inactive login" + """ + Request a page that is protected with @login, but use an inactive login + """ # Get the page without logging in. Should result in 302. response = self.client.get("/login_protected_view/") diff --git a/tests/test_client_regress/tests.py b/tests/test_client_regress/tests.py index 325ad17cdb..966eefa71b 100644 --- a/tests/test_client_regress/tests.py +++ b/tests/test_client_regress/tests.py @@ -50,10 +50,10 @@ class ExtraAssertMixin: :param method: The assertion method to test. :param method_args: Positional arguments to pass to the method. - :param expected_msg: The expected base error message (required keyword-only). - :param msg_prefix: Optional prefix to be added to the message in the second - subTest. - :param method_kwargs: Keyword arguments to pass to the method. + :param expected_msg: The expected base error message (required + keyword-only). :param msg_prefix: Optional prefix to be added to the + message in the second subTest. :param method_kwargs: Keyword arguments + to pass to the method. Used internally for testing Django's assertions. """ @@ -236,7 +236,10 @@ class AssertContainsTests(ExtraAssertMixin, SimpleTestCase): self.assertContains(r, b"\xe5\xb3\xa0".decode()) def test_unicode_not_contains(self): - "Unicode characters can be searched for, and not found in template context" + """ + Unicode characters can be searched for, and not found in template + context + """ # Regression test for #10183 r = self.client.get("/check_unicode/") self.assertNotContains(r, "はたけ") @@ -280,7 +283,8 @@ class AssertContainsTests(ExtraAssertMixin, SimpleTestCase): def test_assert_not_contains_renders_template_response(self): """ - An unrendered SimpleTemplateResponse may be used in assertNotContains(). + An unrendered SimpleTemplateResponse may be used in + assertNotContains(). """ template = engines["django"].from_string("Hello") response = SimpleTemplateResponse(template) @@ -378,7 +382,9 @@ class AssertTemplateUsedTests(TestDataMixin, TestCase): self.assertTemplateUsed(response, "base.html", count=2) def test_template_rendered_multiple_times(self): - """Template assertions work when a template is rendered multiple times.""" + """ + Template assertions work when a template is rendered multiple times. + """ response = self.client.get("/render_template_multiple_times/") self.assertTemplateUsed(response, "base.html", count=2) @@ -387,7 +393,10 @@ class AssertTemplateUsedTests(TestDataMixin, TestCase): @override_settings(ROOT_URLCONF="test_client_regress.urls") class AssertRedirectsTests(ExtraAssertMixin, SimpleTestCase): def test_redirect_page(self): - "An assertion is raised if the original page couldn't be retrieved as expected" + """ + An assertion is raised if the original page couldn't be retrieved as + expected + """ # This page will redirect with code 301, not 302 response = self.client.get("/permanent_redirect_view/") try: @@ -554,7 +563,8 @@ class AssertRedirectsTests(ExtraAssertMixin, SimpleTestCase): with self.assertRaises(RedirectCycleError) as context: self.client.get("/circular_redirect_1/", {}, follow=True) response = context.exception.last_response - # The chain of redirects will get back to the starting point, but stop there. + # The chain of redirects will get back to the starting point, but stop + # there. self.assertRedirects( response, "/circular_redirect_2/", status_code=302, target_status_code=302 ) @@ -640,7 +650,10 @@ class AssertRedirectsTests(ExtraAssertMixin, SimpleTestCase): ) def test_redirect_on_non_redirect_page(self): - "An assertion is raised if the original page couldn't be retrieved as expected" + """ + An assertion is raised if the original page couldn't be retrieved as + expected + """ # This page will redirect with code 301, not 302 response = self.client.get("/get_view/") try: @@ -800,7 +813,8 @@ class ExceptionTests(TestDataMixin, TestCase): # At this point, an exception has been raised, and should be cleared. - # This next operation should be successful; if it isn't we have a problem. + # This next operation should be successful; if it isn't we have a + # problem. login = self.client.login(username="staff", password="password") self.assertTrue(login, "Could not log in") self.client.get("/staff_only/") @@ -822,9 +836,10 @@ class TemplateExceptionTests(SimpleTestCase): self.client.get("/no_such_view/") -# We need two different tests to check URLconf substitution - one to check -# it was changed, and another one (without self.urls) to check it was reverted on -# teardown. This pair of tests relies upon the alphabetical ordering of test execution. +# We need two different tests to check URLconf substitution - one to check it +# was changed, and another one (without self.urls) to check it was reverted on +# teardown. This pair of tests relies upon the alphabetical ordering of test +# execution. @override_settings(ROOT_URLCONF="test_client_regress.urls") class UrlconfSubstitutionTests(SimpleTestCase): def test_urlconf_was_changed(self): @@ -837,7 +852,8 @@ class UrlconfSubstitutionTests(SimpleTestCase): # name is to ensure alphabetical ordering. class zzUrlconfSubstitutionTests(SimpleTestCase): def test_urlconf_was_reverted(self): - """URLconf is reverted to original value after modification in a TestCase + """URLconf is reverted to original value after modification in a + TestCase This will not find a match as the default ROOT_URLCONF is empty. """ @@ -984,7 +1000,9 @@ class SessionTests(TestDataMixin, TestCase): @override_settings(AUTH_USER_MODEL="test_client_regress.CustomUser") def test_logout_with_custom_user(self): - """Logout should send user_logged_out signal if custom user was logged in.""" + """ + Logout should send user_logged_out signal if custom user was logged in. + """ def listener(*args, **kwargs): self.assertEqual(kwargs["sender"], CustomUser) @@ -1151,7 +1169,9 @@ class RequestMethodStringDataTests(SimpleTestCase): self.assertEqual(response.content, b"request method: PATCH") def test_empty_string_data(self): - "Request a view with empty string data via request method GET/POST/HEAD" + """ + Request a view with empty string data via request method GET/POST/HEAD + """ # Regression test for #21740 response = self.client.get("/body/", data="", content_type="application/json") self.assertEqual(response.content, b"") diff --git a/tests/test_client_regress/views.py b/tests/test_client_regress/views.py index 91b8bdfefc..24091ea945 100644 --- a/tests/test_client_regress/views.py +++ b/tests/test_client_regress/views.py @@ -21,7 +21,10 @@ def no_template_view(request): def staff_only_view(request): - "A view that can only be visited by staff. Non staff members get an exception" + """ + A view that can only be visited by staff. Non staff members get an + exception + """ if request.user.is_staff: return HttpResponse() else: diff --git a/tests/test_runner/test_discover_runner.py b/tests/test_runner/test_discover_runner.py index 4c4a22397b..3c68a83595 100644 --- a/tests/test_runner/test_discover_runner.py +++ b/tests/test_runner/test_discover_runner.py @@ -364,7 +364,8 @@ class DiscoverRunnerTests(SimpleTestCase): def test_duplicates_ignored(self): """ - Tests shouldn't be discovered twice when discovering on overlapping paths. + Tests shouldn't be discovered twice when discovering on overlapping + paths. """ base_app = "forms_tests" sub_app = "forms_tests.field_tests" diff --git a/tests/test_runner/tests.py b/tests/test_runner/tests.py index a9fadc872b..2c1fc3ad68 100644 --- a/tests/test_runner/tests.py +++ b/tests/test_runner/tests.py @@ -907,7 +907,8 @@ class SetupDatabasesTests(unittest.TestCase): }, } ) - # Using the real current name as old_name to not mess with the test suite. + # Using the real current name as old_name to not mess with the test + # suite. old_name = settings.DATABASES[db.DEFAULT_DB_ALIAS]["NAME"] with mock.patch("django.db.connections", new=tested_connections): tested_connections["default"].creation.destroy_test_db( @@ -969,8 +970,9 @@ class AutoIncrementResetTest(TransactionTestCase): class EmptyDefaultDatabaseTest(unittest.TestCase): def test_empty_default_database(self): """ - An empty default database in settings does not raise an ImproperlyConfigured - error when running a unit test that does not use a database. + An empty default database in settings does not raise an + ImproperlyConfigured error when running a unit test that does not use a + database. """ tested_connections = db.ConnectionHandler({"default": {}}) with mock.patch("django.db.connections", new=tested_connections): diff --git a/tests/test_runner_apps/sample/tests_sample.py b/tests/test_runner_apps/sample/tests_sample.py index 6e876ebd41..46d1c07e84 100644 --- a/tests/test_runner_apps/sample/tests_sample.py +++ b/tests/test_runner_apps/sample/tests_sample.py @@ -18,7 +18,8 @@ class TestDjangoTestCase(DjangoTestCase): class TestZimpleTestCase(SimpleTestCase): - # Z is used to trick this test case to appear after Vanilla in default suite + # Z is used to trick this test case to appear after Vanilla in default + # suite def test_sample(self): self.assertEqual(1, 1) diff --git a/tests/timezones/tests.py b/tests/timezones/tests.py index fd8b49fdef..6c6cbebe17 100644 --- a/tests/timezones/tests.py +++ b/tests/timezones/tests.py @@ -75,7 +75,8 @@ def override_database_connection_timezone(timezone): try: orig_timezone = connection.settings_dict["TIME_ZONE"] connection.settings_dict["TIME_ZONE"] = timezone - # Clear cached properties, after first accessing them to ensure they exist. + # Clear cached properties, after first accessing them to ensure they + # exist. connection.timezone del connection.timezone connection.timezone_name @@ -83,7 +84,8 @@ def override_database_connection_timezone(timezone): yield finally: connection.settings_dict["TIME_ZONE"] = orig_timezone - # Clear cached properties, after first accessing them to ensure they exist. + # Clear cached properties, after first accessing them to ensure they + # exist. connection.timezone del connection.timezone connection.timezone_name @@ -450,8 +452,8 @@ class NewDatabaseTests(TestCase): Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT)) Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)) with timezone.override(UTC): - # These two dates fall in the same day in EAT, but in different days, - # years and months in UTC. + # These two dates fall in the same day in EAT, but in different + # days, years and months in UTC. self.assertEqual(Event.objects.filter(dt__year=2011).count(), 1) self.assertEqual(Event.objects.filter(dt__month=1).count(), 1) self.assertEqual(Event.objects.filter(dt__day=1).count(), 1) @@ -1169,7 +1171,8 @@ class TemplateTests(SimpleTestCase): @skipIf(sys.platform == "win32", "Windows uses non-standard time zone names") def test_tz_template_context_processor(self): """ - Test the django.template.context_processors.tz template context processor. + Test the django.template.context_processors.tz template context + processor. """ tpl = Template("{{ TIME_ZONE }}") context = Context() diff --git a/tests/transactions/tests.py b/tests/transactions/tests.py index 9fe8c58593..a3cd4cafee 100644 --- a/tests/transactions/tests.py +++ b/tests/transactions/tests.py @@ -244,7 +244,9 @@ class AtomicTests(TransactionTestCase): class AtomicInsideTransactionTests(AtomicTests): - """All basic tests for atomic should also pass within an existing transaction.""" + """ + All basic tests for atomic should also pass within an existing transaction. + """ def setUp(self): self.atomic = transaction.atomic() @@ -255,7 +257,9 @@ class AtomicInsideTransactionTests(AtomicTests): class AtomicWithoutAutocommitTests(AtomicTests): - """All basic tests for atomic should also pass when autocommit is turned off.""" + """ + All basic tests for atomic should also pass when autocommit is turned off. + """ def setUp(self): transaction.set_autocommit(False) @@ -393,7 +397,9 @@ class AtomicMySQLTests(TransactionTestCase): @skipIf(threading is None, "Test requires threading") def test_implicit_savepoint_rollback(self): - """MySQL implicitly rolls back savepoints when it deadlocks (#22291).""" + """ + MySQL implicitly rolls back savepoints when it deadlocks (#22291). + """ Reporter.objects.create(id=1) Reporter.objects.create(id=2) @@ -457,7 +463,8 @@ class AtomicMiscTests(TransactionTestCase): sid = connection.savepoint_ids[-1] raise Exception("Oops") - # This is expected to fail because the savepoint no longer exists. + # This is expected to fail because the savepoint no longer + # exists. connection.savepoint_rollback(sid) def test_mark_for_rollback_on_error_in_transaction(self): @@ -497,8 +504,8 @@ class AtomicMiscTests(TransactionTestCase): raise Exception("Oops") - # Ensure that `mark_for_rollback_on_error` did not mark the transaction - # as broken, since we are in autocommit mode … + # Ensure that `mark_for_rollback_on_error` did not mark the + # transaction as broken, since we are in autocommit mode … self.assertFalse(transaction.get_connection().needs_rollback) # … and further queries work nicely. @@ -526,7 +533,9 @@ class NonAutocommitTests(TransactionTestCase): Reporter.objects.last() def test_orm_query_without_autocommit(self): - """#24921 -- ORM queries must be possible after set_autocommit(False).""" + """ + #24921 -- ORM queries must be possible after set_autocommit(False). + """ Reporter.objects.create(first_name="Tintin") diff --git a/tests/unmanaged_models/tests.py b/tests/unmanaged_models/tests.py index dc75bdc993..81c19a83e5 100644 --- a/tests/unmanaged_models/tests.py +++ b/tests/unmanaged_models/tests.py @@ -42,7 +42,8 @@ class SimpleTests(TestCase): class ManyToManyUnmanagedTests(TestCase): def test_many_to_many_between_unmanaged(self): """ - The intermediary table between two unmanaged models should not be created. + The intermediary table between two unmanaged models should not be + created. """ table = Unmanaged2._meta.get_field("mm").m2m_db_table() tables = connection.introspection.table_names() diff --git a/tests/update/tests.py b/tests/update/tests.py index bb83440008..bbff0e4ff4 100644 --- a/tests/update/tests.py +++ b/tests/update/tests.py @@ -196,7 +196,8 @@ class AdvancedTests(TestCase): def test_update_annotated_multi_table_queryset(self): """ - Update of a queryset that's been annotated and involves multiple tables. + Update of a queryset that's been annotated and involves multiple + tables. """ # Trivial annotated update qs = DataPoint.objects.annotate(related_count=Count("relatedpoint")) diff --git a/tests/urlpatterns_reverse/tests.py b/tests/urlpatterns_reverse/tests.py index c396e24934..58cd2601db 100644 --- a/tests/urlpatterns_reverse/tests.py +++ b/tests/urlpatterns_reverse/tests.py @@ -117,7 +117,8 @@ resolve_test_data = ( (), {"arg2": "37"}, ), - # Unnamed views should have None as the url_name. Regression data for #21157. + # Unnamed views should have None as the url_name. Regression data for + # #21157. ( "/unnamed/normal/42/37/", None, @@ -445,7 +446,8 @@ class URLPatternReverse(SimpleTestCase): self.assertEqual(got, expected) def test_reverse_none(self): - # Reversing None should raise an error, not return the last un-named view. + # Reversing None should raise an error, not return the last un-named + # view. with self.assertRaises(NoReverseMatch): reverse(None) @@ -661,7 +663,8 @@ class ResolverTests(SimpleTestCase): ("name-conflict", (), {"first": "arg"}, "conflict-first/arg/"), ("name-conflict", (), {"middle": "arg"}, "conflict-middle/arg/"), ("name-conflict", (), {"last": "arg"}, "conflict-last/arg/"), - # The number and order of the arguments don't interfere with reversing. + # The number and order of the arguments don't interfere with + # reversing. ("name-conflict", ("arg", "arg"), {}, "conflict/arg/arg/"), ] for name, args, kwargs, expected in test_urls: @@ -851,8 +854,9 @@ class ReverseShortcutTests(SimpleTestCase): # Assert that we can redirect using UTF-8 strings res = redirect("/æøå/abc/") self.assertEqual(res.url, "/%C3%A6%C3%B8%C3%A5/abc/") - # Assert that no imports are attempted when dealing with a relative path - # (previously, the below would resolve in a UnicodeEncodeError from __import__ ) + # Assert that no imports are attempted when dealing with a relative + # path (previously, the below would resolve in a UnicodeEncodeError + # from __import__ ) res = redirect("/æøå.abc/") self.assertEqual(res.url, "/%C3%A6%C3%B8%C3%A5.abc/") res = redirect("os.path") diff --git a/tests/user_commands/tests.py b/tests/user_commands/tests.py index 4b03f54564..e282bd4bc9 100644 --- a/tests/user_commands/tests.py +++ b/tests/user_commands/tests.py @@ -488,8 +488,8 @@ class CommandRunTests(AdminScriptTestCase): "settings.py", apps=["django.contrib.staticfiles", "user_commands"], sdict={ - # (staticfiles.E001) The STATICFILES_DIRS setting is not a tuple or - # list. + # (staticfiles.E001) The STATICFILES_DIRS setting is not a + # tuple or list. "STATICFILES_DIRS": '"foo"', }, ) diff --git a/tests/utils_tests/test_csp.py b/tests/utils_tests/test_csp.py index 96c66538d0..86682544e2 100644 --- a/tests/utils_tests/test_csp.py +++ b/tests/utils_tests/test_csp.py @@ -67,8 +67,9 @@ class CSPBuildPolicyTest(SimpleTestCase): """ Test that a set can be passed as a value. - Sets are often used in Django settings to ensure uniqueness, however, sets are - unordered. The middleware ensures consistency via sorting if a set is passed. + Sets are often used in Django settings to ensure uniqueness, however, + sets are unordered. The middleware ensures consistency via sorting if a + set is passed. """ policy = {"default-src": {CSP.SELF, "foo.com", "bar.com"}} self.assertPolicyEqual( @@ -147,7 +148,8 @@ class LazyNonceTests(SimpleTestCase): return result with patch("django.utils.csp.secrets.token_urlsafe", memento_token_urlsafe): - # Force usage, similar to template rendering, to generate the nonce. + # Force usage, similar to template rendering, to generate the + # nonce. val = str(nonce) self.assertTrue(nonce) diff --git a/tests/utils_tests/test_dateformat.py b/tests/utils_tests/test_dateformat.py index 9604b0cef4..3f0498bd46 100644 --- a/tests/utils_tests/test_dateformat.py +++ b/tests/utils_tests/test_dateformat.py @@ -48,7 +48,8 @@ class DateFormatTests(SimpleTestCase): dt = make_aware(datetime(2009, 5, 16, 5, 30, 30), ltz) self.assertEqual(datetime.fromtimestamp(int(format(dt, "U")), tz), dt) self.assertEqual(datetime.fromtimestamp(int(format(dt, "U")), ltz), dt) - # astimezone() is safe here because the target timezone doesn't have DST + # astimezone() is safe here because the target timezone doesn't have + # DST self.assertEqual( datetime.fromtimestamp(int(format(dt, "U"))), dt.astimezone(ltz).replace(tzinfo=None), diff --git a/tests/utils_tests/test_decorators.py b/tests/utils_tests/test_decorators.py index 8c0244e819..8e14b53cb7 100644 --- a/tests/utils_tests/test_decorators.py +++ b/tests/utils_tests/test_decorators.py @@ -69,7 +69,8 @@ class DecoratorFromMiddlewareTests(SimpleTestCase): def test_callable_process_view_middleware(self): """ - Test a middleware that implements process_view, operating on a callable class. + Test a middleware that implements process_view, operating on a callable + class. """ class_process_view(self.rf.get("/")) diff --git a/tests/utils_tests/test_functional.py b/tests/utils_tests/test_functional.py index 8b5c330bcf..e0a1a76471 100644 --- a/tests/utils_tests/test_functional.py +++ b/tests/utils_tests/test_functional.py @@ -127,7 +127,9 @@ class FunctionalTests(SimpleTestCase): self.assertCachedPropertyWorks(attr, Class) def test_cached_property_reuse_different_names(self): - """Disallow this case because the decorated function wouldn't be cached.""" + """ + Disallow this case because the decorated function wouldn't be cached. + """ type_msg = ( "Cannot assign the same cached_property to two different names ('a' and " "'b')." diff --git a/tests/utils_tests/test_html.py b/tests/utils_tests/test_html.py index 4ce552e79a..284f33aedc 100644 --- a/tests/utils_tests/test_html.py +++ b/tests/utils_tests/test_html.py @@ -170,7 +170,8 @@ class TestUtilsHtml(SimpleTestCase): strip_tags(value) def test_strip_tags_files(self): - # Test with more lengthy content (also catching performance regressions) + # Test with more lengthy content (also catching performance + # regressions) for filename in ("strip_tags1.html", "strip_tags2.txt"): with self.subTest(filename=filename): path = os.path.join(os.path.dirname(__file__), "files", filename) diff --git a/tests/utils_tests/test_lorem_ipsum.py b/tests/utils_tests/test_lorem_ipsum.py index deda09c717..3471053778 100644 --- a/tests/utils_tests/test_lorem_ipsum.py +++ b/tests/utils_tests/test_lorem_ipsum.py @@ -18,7 +18,9 @@ class LoremIpsumTests(unittest.TestCase): self.assertEqual(words(7), "lorem ipsum dolor sit amet consectetur adipisicing") def test_common_words_in_string(self): - """words(n) starts with the 19 standard lorem ipsum words for n > 19.""" + """ + words(n) starts with the 19 standard lorem ipsum words for n > 19. + """ self.assertTrue( words(25).startswith( "lorem ipsum dolor sit amet consectetur adipisicing elit sed " diff --git a/tests/utils_tests/test_timesince.py b/tests/utils_tests/test_timesince.py index fdcfa4b281..8305a801fc 100644 --- a/tests/utils_tests/test_timesince.py +++ b/tests/utils_tests/test_timesince.py @@ -138,7 +138,9 @@ class TimesinceTests(TestCase): self.assertEqual(timeuntil(now_tz, now_tz_i), "0\xa0minutes") def test_date_objects(self): - """Both timesince and timeuntil should work on date objects (#17937).""" + """ + Both timesince and timeuntil should work on date objects (#17937). + """ today = datetime.date.today() self.assertEqual(timesince(today + self.oneday), "0\xa0minutes") self.assertEqual(timeuntil(today - self.oneday), "0\xa0minutes") diff --git a/tests/view_tests/tests/test_debug.py b/tests/view_tests/tests/test_debug.py index f5cb82cd2d..8e36ab7eb1 100644 --- a/tests/view_tests/tests/test_debug.py +++ b/tests/view_tests/tests/test_debug.py @@ -396,7 +396,8 @@ class DebugViewTests(SimpleTestCase): def test_no_template_source_loaders(self): """ - Make sure if you don't specify a template, the debug view doesn't blow up. + Make sure if you don't specify a template, the debug view doesn't blow + up. """ with self.assertLogs("django.request", "ERROR"): with self.assertRaises(TemplateDoesNotExist): @@ -493,7 +494,8 @@ class DebugViewQueriesAllowedTests(SimpleTestCase): def test_handle_db_exception(self): """ Ensure the debug view works when a database exception is raised by - performing an invalid query and passing the exception to the debug view. + performing an invalid query and passing the exception to the debug + view. """ with connection.cursor() as cursor: try: @@ -614,7 +616,9 @@ class ExceptionReporterTests(SimpleTestCase): ) def test_eol_support(self): - """The ExceptionReporter supports Unix, Windows and Macintosh EOL markers""" + """ + The ExceptionReporter supports Unix, Windows and Macintosh EOL markers + """ LINES = ["print %d" % i for i in range(1, 6)] reporter = ExceptionReporter(None, None, None, None) @@ -1040,7 +1044,10 @@ class ExceptionReporterTests(SimpleTestCase): self.assertIn("<p>Request data not supplied</p>", html) def test_non_utf8_values_handling(self): - "Non-UTF-8 exceptions/values should not make the output generation choke." + """ + Non-UTF-8 exceptions/values should not make the output generation + choke. + """ try: class NonUtf8Output(Exception): @@ -1446,7 +1453,8 @@ class ExceptionReportTestMixin: self, view, check_for_vars=True, check_for_POST_params=True ): """ - Asserts that no variables or POST parameters are displayed in the response. + Asserts that no variables or POST parameters are displayed in the + response. """ request = self.rf.post("/some_url/", self.breakfast_data) response = view(request) @@ -1465,7 +1473,8 @@ class ExceptionReportTestMixin: def verify_unsafe_email(self, view, check_for_POST_params=True): """ - Asserts that potentially sensitive info are displayed in the email report. + Asserts that potentially sensitive info are displayed in the email + report. """ with self.settings(ADMINS=["admin@example.com"]): mail.outbox = [] # Empty outbox @@ -1501,7 +1510,8 @@ class ExceptionReportTestMixin: def verify_safe_email(self, view, check_for_POST_params=True): """ - Asserts that certain sensitive info are not displayed in the email report. + Asserts that certain sensitive info are not displayed in the email + report. """ with self.settings(ADMINS=["admin@example.com"]): mail.outbox = [] # Empty outbox @@ -1544,7 +1554,8 @@ class ExceptionReportTestMixin: def verify_paranoid_email(self, view): """ - Asserts that no variables or POST parameters are displayed in the email report. + Asserts that no variables or POST parameters are displayed in the email + report. """ with self.settings(ADMINS=["admin@example.com"]): mail.outbox = [] # Empty outbox diff --git a/tests/view_tests/tests/test_i18n.py b/tests/view_tests/tests/test_i18n.py index 229ce68bfc..f4ba5249a2 100644 --- a/tests/view_tests/tests/test_i18n.py +++ b/tests/view_tests/tests/test_i18n.py @@ -272,8 +272,9 @@ class I18NViewTests(SimpleTestCase): response.headers["Content-Type"], 'text/javascript; charset="utf-8"' ) # response content must include a line like: - # "this is to be translated": <value of trans_txt Python variable> - # json.dumps() is used to be able to check Unicode strings. + # "this is to be translated": <value of trans_txt Python + # variable> json.dumps() is used to be able to check Unicode + # strings. self.assertContains(response, json.dumps(trans_txt), 1) if lang_code == "fr": # Message with context (msgctxt) @@ -417,8 +418,9 @@ class I18NViewTests(SimpleTestCase): def test_non_english_default_english_userpref(self): """ Same as above with the difference that there IS an 'en' translation - available. The JavaScript i18n view must return a NON empty language catalog - with the proper English translations. See #13726 for more details. + available. The JavaScript i18n view must return a NON empty language + catalog with the proper English translations. See #13726 for more + details. """ with self.settings(LANGUAGE_CODE="fr"), override("en-us"): response = self.client.get("/jsi18n_english_translation/") diff --git a/tests/view_tests/tests/test_static.py b/tests/view_tests/tests/test_static.py index f60ca88cd2..ca1b5f1638 100644 --- a/tests/view_tests/tests/test_static.py +++ b/tests/view_tests/tests/test_static.py @@ -39,7 +39,10 @@ class StaticTests(SimpleTestCase): ) def test_chunked(self): - "The static view should stream files in chunks to avoid large memory usage" + """ + The static view should stream files in chunks to avoid large memory + usage + """ response = self.client.get("/%s/%s" % (self.prefix, "long-line.txt")) response_iterator = iter(response) first_chunk = next(response_iterator) diff --git a/tests/view_tests/views.py b/tests/view_tests/views.py index 9eb7a352d6..1986341177 100644 --- a/tests/view_tests/views.py +++ b/tests/view_tests/views.py @@ -125,10 +125,10 @@ def render_no_template(request): def send_log(request, exc_info): logger = logging.getLogger("django") - # The default logging config has a logging filter to ensure admin emails are - # only sent with DEBUG=False, but since someone might choose to remove that - # filter, we still want to be able to test the behavior of error emails - # with DEBUG=True. So we need to remove the filter temporarily. + # The default logging config has a logging filter to ensure admin emails + # are only sent with DEBUG=False, but since someone might choose to remove + # that filter, we still want to be able to test the behavior of error + # emails with DEBUG=True. So we need to remove the filter temporarily. admin_email_handler = [ h for h in logger.handlers if h.__class__.__name__ == "AdminEmailHandler" ][0] @@ -417,7 +417,8 @@ def json_response_view(request): { "a": [1, 2, 3], "foo": {"bar": "baz"}, - # Make sure datetime and Decimal objects would be serialized properly + # Make sure datetime and Decimal objects would be serialized + # properly "timestamp": datetime.datetime(2013, 5, 19, 20), "value": decimal.Decimal("3.14"), }