Configuration#

This section auto-documents the repository application configuration.

pydantic settings app.core.config.Settings[source]#

Settings model for API.

Config:
  • extra: str = ignore

  • env_file: str = .env

  • env_file_encoding: str = utf-8

Fields:
field app_name: str [Required][source]#
field azure_application_id: str [Required][source]#
field azure_blob_config: AzureBlobConfig | None = None[source]#
field azure_login_url: HttpUrl = HttpUrl('https://login.microsoftonline.com/')[source]#
field azure_tenant_id: str [Required][source]#
field cli_client_id: str | None = None[source]#
field db_config: DatabaseConfig [Required][source]#
field default_download_file_chunk_size: Literal[1] = 1[source]#

Number of records to process in a single file chunk when downloading.Not configurable or used, just representing that we stream line-by-line at this point.

field default_es_indexing_chunk_size: int = 1000[source]#

Number of records to process in a single chunk when indexing to Elasticsearch.

field default_es_percolation_chunk_size: int = 1000[source]#

Number of records to process in a single chunk when percolating to Elasticsearch.

field default_upload_file_chunk_size: int = 1[source]#

Number of records to process in a single file chunk when uploading.

field env: Environment = Environment.PRODUCTION[source]#

The environment the app is running in.

field es_config: ESConfig [Required][source]#
field es_indexing_chunk_size_override: dict[ESIndexingOperation, int] [Optional][source]#

Override the default Elasticsearch indexing chunk size.

field es_percolation_chunk_size_override: dict[ESPercolationOperation, int] [Optional][source]#

Override the default Elasticsearch percolation chunk size.

field feature_flags: FeatureFlags = FeatureFlags()[source]#
field import_reference_retry_count: int = 3[source]#

Number of times to retry importing a reference before marking it as failed. We only retry on errors we are confident can be resolved - eg network issues or inconsistent database state being loaded in parallel.

field log_level: LogLevel = LogLevel.INFO[source]#

The log level for the application.

field max_pending_enhancements_batch_size: int = 10000[source]#

Maximum number of pending enhancements to return in a single batch.

field max_reference_duplicate_depth: Literal[2] = 2[source]#

The maximum depth to which reference duplicates are propagated. A depth of 2, as in the default, means only direct duplicates are allowed. Higher values allow for duplicate chaining, at the significant cost of performance and data model complexity.

field message_broker_namespace: str | None = None[source]#
field message_broker_queue_name: str = 'taskiq'[source]#
field message_broker_url: str | None = None[source]#
field message_lock_renewal_duration: int = 10800[source]#

Duration in seconds to keep renewing message locks. Should be longer than expected processing time.

field minio_config: MinioConfig | None = None[source]#
field otel_config: OTelConfig | None = None[source]#
field otel_enabled: bool = False[source]#
field presigned_url_expiry_seconds: int = 3600[source]#

The number of seconds a signed URL is valid for.

field project_root: Path = PosixPath('/home/runner/work/destiny-repository/destiny-repository')[source]#
field tests_use_rabbitmq: bool = False[source]#

Whether to use RabbitMQ for tests. Only used in test environment. If false, uses in-memory broker.

field upload_file_chunk_size_override: dict[UploadFile, int] [Optional][source]#

Override the default upload file chunk size.

property app_version: str[source]#

Get the application version from pyproject.toml.

property default_blob_container: str[source]#

Return the default blob container.

property default_blob_location: str[source]#

Return the default blob location.

property pyproject_toml: dict[str, Any][source]#

Get the contents of pyproject.toml.

property running_locally: bool[source]#

Return True if the app is running locally.