From cfca4a2a0116d7af9ca2a92c479455d6e75fb2ce Mon Sep 17 00:00:00 2001 From: David Whittington Date: Sun, 25 May 2025 13:59:59 -0500 Subject: [PATCH] refactor: add env helpers and simplify config --- src/config.ts | 404 +++++++++++++++++++++++-------------------------- src/lib/env.ts | 31 ++++ 2 files changed, 218 insertions(+), 217 deletions(-) diff --git a/src/config.ts b/src/config.ts index 55e4ab6c..7e42ffe3 100644 --- a/src/config.ts +++ b/src/config.ts @@ -29,7 +29,7 @@ import logger from './log.js'; // // HTTP server port -export const PORT = +env.varOrDefault('PORT', '4000'); +export const PORT = env.intVar('PORT', 4000); // API key for accessing admin HTTP endpoints // It's set once in the main thread @@ -58,13 +58,12 @@ export const REDIS_CACHE_URL = env.varOrDefault( 'redis://localhost:6379', ); -export const REDIS_USE_TLS = - env.varOrDefault('REDIS_USE_TLS', 'false') === 'true'; +export const REDIS_USE_TLS = env.boolVar('REDIS_USE_TLS', false); // Default Redis TTL -export const REDIS_CACHE_TTL_SECONDS = +env.varOrDefault( +export const REDIS_CACHE_TTL_SECONDS = env.intVar( 'REDIS_CACHE_TTL_SECONDS', - `${60 * 60 * 8}`, // 8 hours by default + 60 * 60 * 8, ); // @@ -122,76 +121,66 @@ export const GATEWAY_PEERS_WEIGHTS_CACHE_DURATION_MS = +env.varOrDefault( // the size of the array from which the average performance is calculated // this average is used to give a weight to the peers. Longer window means // a better average is calculated, but it shouldn't matter too much. -export const GATEWAY_PEERS_REQUEST_WINDOW_COUNT = +env.varOrDefault( +export const GATEWAY_PEERS_REQUEST_WINDOW_COUNT = env.intVar( 'GATEWAY_PEERS_REQUEST_WINDOW_COUNT', - '20', + 20, ); -export const ARWEAVE_NODE_IGNORE_URLS: string[] = - env.varOrUndefined('ARWEAVE_NODE_IGNORE_URLS')?.split(',') ?? []; +export const ARWEAVE_NODE_IGNORE_URLS: string[] = env.optionalListVar( + 'ARWEAVE_NODE_IGNORE_URLS', +); // Trusted chunk POST URLs (for posting chunks received at /chunk) -export const CHUNK_POST_URLS = env - .varOrDefault('CHUNK_POST_URLS', `${TRUSTED_NODE_URL}/chunk`) - .split(','); +export const CHUNK_POST_URLS = env.listVar( + 'CHUNK_POST_URLS', + `${TRUSTED_NODE_URL}/chunk`, +); -export const CHUNK_POST_CONCURRENCY_LIMIT = +env.varOrDefault( +export const CHUNK_POST_CONCURRENCY_LIMIT = env.intVar( 'CHUNK_POST_CONCURRENCY_LIMIT', - '2', + 2, ); -const SECONDARY_CHUNK_POST_URLS_STRING = env.varOrUndefined( +export const SECONDARY_CHUNK_POST_URLS = env.optionalListVar( 'SECONDARY_CHUNK_POST_URLS', ); -export const SECONDARY_CHUNK_POST_URLS = - SECONDARY_CHUNK_POST_URLS_STRING !== undefined - ? SECONDARY_CHUNK_POST_URLS_STRING.split(',') - : []; -export const SECONDARY_CHUNK_POST_CONCURRENCY_LIMIT = +env.varOrDefault( +export const SECONDARY_CHUNK_POST_CONCURRENCY_LIMIT = env.intVar( 'SECONDARY_CHUNK_POST_CONCURRENCY_LIMIT', - '2', + 2, ); -export const SECONDARY_CHUNK_POST_MIN_SUCCESS_COUNT = +env.varOrDefault( +export const SECONDARY_CHUNK_POST_MIN_SUCCESS_COUNT = env.intVar( 'SECONDARY_CHUNK_POST_MIN_SUCCESS_COUNT', - '1', + 1, ); // Chunk POST response timeout in milliseconds -const CHUNK_POST_RESPONSE_TIMEOUT_MS_STRING = env.varOrUndefined( +export const CHUNK_POST_RESPONSE_TIMEOUT_MS = env.optionalIntVar( 'CHUNK_POST_RESPONSE_TIMEOUT_MS', ); -export const CHUNK_POST_RESPONSE_TIMEOUT_MS = - CHUNK_POST_RESPONSE_TIMEOUT_MS_STRING !== undefined - ? +CHUNK_POST_RESPONSE_TIMEOUT_MS_STRING - : undefined; // Chunk POST abort timeout in milliseconds -const CHUNK_POST_ABORT_TIMEOUT_MS_STRING = env.varOrUndefined( +export const CHUNK_POST_ABORT_TIMEOUT_MS = env.optionalIntVar( 'CHUNK_POST_ABORT_TIMEOUT_MS', ); -export const CHUNK_POST_ABORT_TIMEOUT_MS = - CHUNK_POST_ABORT_TIMEOUT_MS_STRING !== undefined - ? +CHUNK_POST_ABORT_TIMEOUT_MS_STRING - : undefined; -export const CHUNK_POST_MIN_SUCCESS_COUNT = +env.varOrDefault( +export const CHUNK_POST_MIN_SUCCESS_COUNT = env.intVar( 'CHUNK_POST_MIN_SUCCESS_COUNT', - '3', + 3, ); // Arweave network peer post success goal // setting to 0 means this behaviour is disabled. -export const ARWEAVE_PEER_CHUNK_POST_MIN_SUCCESS_COUNT = +env.varOrDefault( +export const ARWEAVE_PEER_CHUNK_POST_MIN_SUCCESS_COUNT = env.intVar( 'ARWEAVE_PEER_CHUNK_POST_MIN_SUCCESS_COUNT', - '2', + 2, ); // The maximum number of peers to attempt to POST to before giving up -export const ARWEAVE_PEER_CHUNK_POST_MAX_PEER_ATTEMPT_COUNT = +env.varOrDefault( +export const ARWEAVE_PEER_CHUNK_POST_MAX_PEER_ATTEMPT_COUNT = env.intVar( 'ARWEAVE_PEER_CHUNK_POST_MAX_PEER_ATTEMPT_COUNT', - '5', + 5, ); if ( @@ -215,20 +204,16 @@ export const ARWEAVE_PEER_CHUNK_POST_CONCURRENCY_LIMIT = +env.varOrDefault( // // On-demand data retrieval priority order -export const ON_DEMAND_RETRIEVAL_ORDER = env - .varOrDefault( - 'ON_DEMAND_RETRIEVAL_ORDER', - 's3,trusted-gateways,chunks,tx-data', - ) - .split(','); +export const ON_DEMAND_RETRIEVAL_ORDER = env.listVar( + 'ON_DEMAND_RETRIEVAL_ORDER', + 's3,trusted-gateways,chunks,tx-data', +); // Background data retrieval priority order -export const BACKGROUND_RETRIEVAL_ORDER = env - .varOrDefault( - 'BACKGROUND_RETRIEVAL_ORDER', - 'chunks,s3,trusted-gateways,tx-data', - ) - .split(','); +export const BACKGROUND_RETRIEVAL_ORDER = env.listVar( + 'BACKGROUND_RETRIEVAL_ORDER', + 'chunks,s3,trusted-gateways,tx-data', +); // Cache type for contigous metadata (access time, etc.). Defaults to 'node' // here for development but is set to 'redis' in 'docker-compose.yaml'. @@ -260,16 +245,16 @@ export const CHUNK_METADATA_SOURCE_TYPE = env.varOrDefault( // Whether or not to run indexing processes (used on readers when running with // replication) -export const START_WRITERS = - env.varOrDefault('START_WRITERS', 'true') === 'true'; +export const START_WRITERS = env.boolVar('START_WRITERS', true); // Indexing range -export const START_HEIGHT = +env.varOrDefault('START_HEIGHT', '0'); +export const START_HEIGHT = env.intVar('START_HEIGHT', 0); export const STOP_HEIGHT = +env.varOrDefault('STOP_HEIGHT', 'Infinity'); // Filter determining which ANS-104 bundles to unbundle -export const ANS104_UNBUNDLE_FILTER_PARSED = JSON.parse( - env.varOrDefault('ANS104_UNBUNDLE_FILTER', '{"never": true}'), +export const ANS104_UNBUNDLE_FILTER_PARSED = env.jsonVar( + 'ANS104_UNBUNDLE_FILTER', + { never: true }, ); export const ANS104_UNBUNDLE_FILTER_STRING = canonicalize( ANS104_UNBUNDLE_FILTER_PARSED, @@ -280,9 +265,9 @@ export const ANS104_UNBUNDLE_FILTER = createFilter( ); // Filter determining which ANS-104 data items to index -export const ANS104_INDEX_FILTER_PARSED = JSON.parse( - env.varOrDefault('ANS104_INDEX_FILTER', '{"never": true}'), -); +export const ANS104_INDEX_FILTER_PARSED = env.jsonVar('ANS104_INDEX_FILTER', { + never: true, +}); export const ANS104_INDEX_FILTER_STRING = canonicalize( ANS104_INDEX_FILTER_PARSED, ); @@ -292,94 +277,102 @@ export const ANS104_INDEX_FILTER = createFilter( ); // The number of ANS-104 worker threads to run -export const ANS104_UNBUNDLE_WORKERS = +env.varOrDefault( +export const ANS104_UNBUNDLE_WORKERS = env.intVar( 'ANS104_UNBUNDLE_WORKERS', - ANS104_UNBUNDLE_FILTER.constructor.name === 'NeverMatch' ? '0' : '1', + ANS104_UNBUNDLE_FILTER.constructor.name === 'NeverMatch' ? 0 : 1, ); // The number of ANS-104 bundle downloads to attempt in parallel -export const ANS104_DOWNLOAD_WORKERS = +env.varOrDefault( +export const ANS104_DOWNLOAD_WORKERS = env.intVar( 'ANS104_DOWNLOAD_WORKERS', - ANS104_UNBUNDLE_FILTER.constructor.name === 'NeverMatch' ? '0' : '5', + ANS104_UNBUNDLE_FILTER.constructor.name === 'NeverMatch' ? 0 : 5, ); // Whether or not to attempt to rematch old bundles using the current filter -export const FILTER_CHANGE_REPROCESS = - env.varOrDefault('FILTER_CHANGE_REPROCESS', 'false') === 'true'; +export const FILTER_CHANGE_REPROCESS = env.boolVar( + 'FILTER_CHANGE_REPROCESS', + false, +); // Whether or not to backfill bundle records (only needed for DBs that existed // before unbundling was implemented) -export const BACKFILL_BUNDLE_RECORDS = - env.varOrDefault('BACKFILL_BUNDLE_RECORDS', 'false') === 'true'; +export const BACKFILL_BUNDLE_RECORDS = env.boolVar( + 'BACKFILL_BUNDLE_RECORDS', + false, +); // Whether or not to write the data item signatures to the database -export const WRITE_ANS104_DATA_ITEM_DB_SIGNATURES = - env.varOrDefault('WRITE_ANS104_DATA_ITEM_DB_SIGNATURES', 'false') === 'true'; +export const WRITE_ANS104_DATA_ITEM_DB_SIGNATURES = env.boolVar( + 'WRITE_ANS104_DATA_ITEM_DB_SIGNATURES', + false, +); // Whether or not to write the transaction signatures to the database -export const WRITE_TRANSACTION_DB_SIGNATURES = - env.varOrDefault('WRITE_TRANSACTION_DB_SIGNATURES', 'false') === 'true'; +export const WRITE_TRANSACTION_DB_SIGNATURES = env.boolVar( + 'WRITE_TRANSACTION_DB_SIGNATURES', + false, +); // Whether or not to enable the data database WAL cleanup worker -export const ENABLE_DATA_DB_WAL_CLEANUP = - env.varOrDefault('ENABLE_DATA_DB_WAL_CLEANUP', 'false') === 'true'; +export const ENABLE_DATA_DB_WAL_CLEANUP = env.boolVar( + 'ENABLE_DATA_DB_WAL_CLEANUP', + false, +); // The maximum number of data items to queue for indexing before skipping // indexing new data items -export const MAX_DATA_ITEM_QUEUE_SIZE = +env.varOrDefault( +export const MAX_DATA_ITEM_QUEUE_SIZE = env.intVar( 'MAX_DATA_ITEM_QUEUE_SIZE', - '100000', + 100000, ); // The maximum number of bundles to queue for unbundling before skipping -export const BUNDLE_DATA_IMPORTER_QUEUE_SIZE = +env.varOrDefault( +export const BUNDLE_DATA_IMPORTER_QUEUE_SIZE = env.intVar( 'BUNDLE_DATA_IMPORTER_QUEUE_SIZE', - '1000', + 1000, ); // The maximum number of data imports to queue for verification purposes -export const VERIFICATION_DATA_IMPORTER_QUEUE_SIZE = +env.varOrDefault( +export const VERIFICATION_DATA_IMPORTER_QUEUE_SIZE = env.intVar( 'VERIFICATION_DATA_IMPORTER_QUEUE_SIZE', - '1000', + 1000, ); // The maximum number of data items indexed to flush stable data items -export const DATA_ITEM_FLUSH_COUNT_THRESHOLD = +env.varOrDefault( +export const DATA_ITEM_FLUSH_COUNT_THRESHOLD = env.intVar( 'DATA_ITEM_FLUSH_COUNT_THRESHOLD', - '1000', + 1000, ); // The interval in seconds to flush stable data items -export const MAX_FLUSH_INTERVAL_SECONDS = +env.varOrDefault( +export const MAX_FLUSH_INTERVAL_SECONDS = env.intVar( 'MAX_FLUSH_INTERVAL_SECONDS', - '600', + 600, ); -export const BUNDLE_REPAIR_RETRY_INTERVAL_SECONDS = +env.varOrDefault( +export const BUNDLE_REPAIR_RETRY_INTERVAL_SECONDS = env.intVar( 'BUNDLE_REPAIR_RETRY_INTERVAL_SECONDS', - '300', // 5 minutes -); + 300, +); // 5 minutes -export const BUNDLE_REPAIR_UPDATE_TIMESTAMPS_INTERVAL_SECONDS = - +env.varOrDefault( - 'BUNDLE_REPAIR_UPDATE_TIMESTAMPS_INTERVAL_SECONDS', - '300', // 5 minutes - ); +export const BUNDLE_REPAIR_UPDATE_TIMESTAMPS_INTERVAL_SECONDS = env.intVar( + 'BUNDLE_REPAIR_UPDATE_TIMESTAMPS_INTERVAL_SECONDS', + 300, +); -export const BUNDLE_REPAIR_BACKFILL_INTERVAL_SECONDS = +env.varOrDefault( +export const BUNDLE_REPAIR_BACKFILL_INTERVAL_SECONDS = env.intVar( 'BUNDLE_REPAIR_BACKFILL_INTERVAL_SECONDS', - '900', // 15 minutes -); + 900, +); // 15 minutes -export const BUNDLE_REPAIR_FILTER_REPROCESS_INTERVAL_SECONDS = - +env.varOrDefault( - 'BUNDLE_REPAIR_FILTER_REPROCESS_INTERVAL_SECONDS', - '300', // 15 minutes - ); +export const BUNDLE_REPAIR_FILTER_REPROCESS_INTERVAL_SECONDS = env.intVar( + 'BUNDLE_REPAIR_FILTER_REPROCESS_INTERVAL_SECONDS', + 300, +); -export const BUNDLE_REPAIR_RETRY_BATCH_SIZE = +env.varOrDefault( +export const BUNDLE_REPAIR_RETRY_BATCH_SIZE = env.intVar( 'BUNDLE_REPAIR_RETRY_BATCH_SIZE', - '5000', + 5000, ); // @@ -403,52 +396,56 @@ export const LEGACY_PSQL_PASSWORD_FILE = env.varOrUndefined( // very common workaround needed for various cloud providers // see more: https://github.com/porsager/postgres?tab=readme-ov-file#ssl -export const LEGACY_PSQL_SSL_REJECT_UNAUTHORIZED = - env.varOrDefault('LEGACY_PSQL_SSL_REJECT_UNAUTHORIZED', 'true') === 'true'; +export const LEGACY_PSQL_SSL_REJECT_UNAUTHORIZED = env.boolVar( + 'LEGACY_PSQL_SSL_REJECT_UNAUTHORIZED', + true, +); // // File system cleanup // // The number of files to process in each batch during cleanup -export const FS_CLEANUP_WORKER_BATCH_SIZE = +env.varOrDefault( +export const FS_CLEANUP_WORKER_BATCH_SIZE = env.intVar( 'FS_CLEANUP_WORKER_BATCH_SIZE', - '2000', + 2000, ); // The pause duration between cleanup batches in milliseconds -export const FS_CLEANUP_WORKER_BATCH_PAUSE_DURATION = +env.varOrDefault( +export const FS_CLEANUP_WORKER_BATCH_PAUSE_DURATION = env.intVar( 'FS_CLEANUP_WORKER_BATCH_PAUSE_DURATION', - '5000', + 5000, ); // The pause duration before restarting cleanup from the beginning in milliseconds -export const FS_CLEANUP_WORKER_RESTART_PAUSE_DURATION = +env.varOrDefault( +export const FS_CLEANUP_WORKER_RESTART_PAUSE_DURATION = env.intVar( 'FS_CLEANUP_WORKER_RESTART_PAUSE_DURATION', - `${1000 * 60 * 60 * 4}`, // every 4 hours -); + 1000 * 60 * 60 * 4, +); // every 4 hours // // Verification // // Whether or not to enable the background data verification worker -export const ENABLE_BACKGROUND_DATA_VERIFICATION = - env.varOrDefault('ENABLE_BACKGROUND_DATA_VERIFICATION', 'false') === 'true'; +export const ENABLE_BACKGROUND_DATA_VERIFICATION = env.boolVar( + 'ENABLE_BACKGROUND_DATA_VERIFICATION', + false, +); -export const BACKGROUND_DATA_VERIFICATION_INTERVAL_SECONDS = +env.varOrDefault( +export const BACKGROUND_DATA_VERIFICATION_INTERVAL_SECONDS = env.intVar( 'BACKGROUND_DATA_VERIFICATION_INTERVAL_SECONDS', - '600', // 10 minutes -); + 600, +); // 10 minutes -export const BACKGROUND_DATA_VERIFICATION_WORKER_COUNT = +env.varOrDefault( +export const BACKGROUND_DATA_VERIFICATION_WORKER_COUNT = env.intVar( 'BACKGROUND_DATA_VERIFICATION_WORKER_COUNT', - '1', + 1, ); -export const BACKGROUND_DATA_VERIFICATION_STREAM_TIMEOUT_MS = +env.varOrDefault( +export const BACKGROUND_DATA_VERIFICATION_STREAM_TIMEOUT_MS = env.intVar( 'BACKGROUND_DATA_VERIFICATION_STREAM_TIMEOUT_MS', - `${1000 * 30}`, // 30 seconds + 1000 * 30, ); // @@ -480,13 +477,8 @@ export const CLICKHOUSE_PASSWORD = env.varOrUndefined('CLICKHOUSE_PASSWORD'); // Healthchecks // -export const MAX_EXPECTED_DATA_ITEM_INDEXING_INTERVAL_SECONDS_STRING = - env.varOrUndefined('MAX_EXPECTED_DATA_ITEM_INDEXING_INTERVAL_SECONDS'); - export const MAX_EXPECTED_DATA_ITEM_INDEXING_INTERVAL_SECONDS = - MAX_EXPECTED_DATA_ITEM_INDEXING_INTERVAL_SECONDS_STRING !== undefined - ? +MAX_EXPECTED_DATA_ITEM_INDEXING_INTERVAL_SECONDS_STRING - : undefined; + env.optionalIntVar('MAX_EXPECTED_DATA_ITEM_INDEXING_INTERVAL_SECONDS'); // // ArNS and sandboxing @@ -552,8 +544,10 @@ export const ARNS_NOT_FOUND_ARNS_NAME = env.varOrDefault( export const CHAIN_CACHE_TYPE = env.varOrDefault('CHAIN_CACHE_TYPE', 'lmdb'); // Whether or not to cleanup filesystem header cache files -export const ENABLE_FS_HEADER_CACHE_CLEANUP = - env.varOrDefault('ENABLE_FS_HEADER_CACHE_CLEANUP', 'false') === 'true'; +export const ENABLE_FS_HEADER_CACHE_CLEANUP = env.boolVar( + 'ENABLE_FS_HEADER_CACHE_CLEANUP', + false, +); // // Contiguous data caching @@ -575,12 +569,12 @@ export const PREFERRED_ARNS_CONTIGUOUS_DATA_CACHE_CLEANUP_THRESHOLD = // The set of full (not base or undernames) ArNS names to preferentially cache export const PREFERRED_ARNS_NAMES = new Set( - env.varOrDefault('PREFERRED_ARNS_NAMES', '').split(','), + env.listVar('PREFERRED_ARNS_NAMES', ''), ); // The set of base ArNS names to preferentially cache export const PREFERRED_ARNS_BASE_NAMES = new Set( - env.varOrDefault('PREFERRED_ARNS_BASE_NAMES', '').split(','), + env.listVar('PREFERRED_ARNS_BASE_NAMES', ''), ); // @@ -588,17 +582,13 @@ export const PREFERRED_ARNS_BASE_NAMES = new Set( // // The webhook target servers -export const WEBHOOK_TARGET_SERVERS_VALUE = env.varOrUndefined( +export const WEBHOOK_TARGET_SERVERS = env.optionalListVar( 'WEBHOOK_TARGET_SERVERS', ); -export const WEBHOOK_TARGET_SERVERS = - WEBHOOK_TARGET_SERVERS_VALUE !== undefined - ? WEBHOOK_TARGET_SERVERS_VALUE.split(',') - : []; // The index filter to use for webhooks export const WEBHOOK_INDEX_FILTER_STRING = canonicalize( - JSON.parse(env.varOrDefault('WEBHOOK_INDEX_FILTER', '{"never": true}')), + env.jsonVar('WEBHOOK_INDEX_FILTER', { never: true }), ); export const WEBHOOK_INDEX_FILTER = createFilter( JSON.parse(WEBHOOK_INDEX_FILTER_STRING), @@ -607,7 +597,7 @@ export const WEBHOOK_INDEX_FILTER = createFilter( // Block filter to use for webhooks export const WEBHOOK_BLOCK_FILTER_STRING = canonicalize( - JSON.parse(env.varOrDefault('WEBHOOK_BLOCK_FILTER', '{"never": true}')), + env.jsonVar('WEBHOOK_BLOCK_FILTER', { never: true }), ); export const WEBHOOK_BLOCK_FILTER = createFilter( JSON.parse(WEBHOOK_BLOCK_FILTER_STRING), @@ -627,58 +617,52 @@ export const ARNS_CACHE_TYPE = env.varOrDefault('ARNS_CACHE_TYPE', 'node'); // Amount of time that entries stay in the cache (ArNS record TTL still applies // on top of this) -export const ARNS_CACHE_TTL_SECONDS = +env.varOrDefault( +export const ARNS_CACHE_TTL_SECONDS = env.intVar( 'ARNS_CACHE_TTL_SECONDS', - `${60 * 60 * 24}`, // 24 hours + 60 * 60 * 24, ); // The maximum amount of time to wait for resolution from AO if there is a // cached value that can be served. When the timeout occurs, caches will still // be refreshed in the background. -export const ARNS_CACHED_RESOLUTION_FALLBACK_TIMEOUT_MS = +env.varOrDefault( +export const ARNS_CACHED_RESOLUTION_FALLBACK_TIMEOUT_MS = env.intVar( 'ARNS_CACHED_RESOLUTION_FALLBACK_TIMEOUT_MS', - '250', + 250, ); -export const ARNS_RESOLVER_OVERRIDE_TTL_SECONDS_STRING = env.varOrUndefined( +export const ARNS_RESOLVER_OVERRIDE_TTL_SECONDS = env.optionalIntVar( 'ARNS_RESOLVER_OVERRIDE_TTL_SECONDS', ); +export const ARNS_RESOLVER_ENFORCE_UNDERNAME_LIMIT = env.boolVar( + 'ARNS_RESOLVER_ENFORCE_UNDERNAME_LIMIT', + true, +); -export const ARNS_RESOLVER_ENFORCE_UNDERNAME_LIMIT = - env.varOrDefault('ARNS_RESOLVER_ENFORCE_UNDERNAME_LIMIT', 'true') === 'true'; - -export const ARNS_RESOLVER_OVERRIDE_TTL_SECONDS = - ARNS_RESOLVER_OVERRIDE_TTL_SECONDS_STRING !== undefined - ? +ARNS_RESOLVER_OVERRIDE_TTL_SECONDS_STRING - : undefined; +export const ARNS_CACHE_MAX_KEYS = env.intVar('ARNS_CACHE_MAX_KEYS', 10000); -export const ARNS_CACHE_MAX_KEYS = +env.varOrDefault( - 'ARNS_CACHE_MAX_KEYS', - '10000', +export const ARNS_RESOLVER_PRIORITY_ORDER = env.listVar( + 'ARNS_RESOLVER_PRIORITY_ORDER', + 'gateway,on-demand', ); -export const ARNS_RESOLVER_PRIORITY_ORDER = env - .varOrDefault('ARNS_RESOLVER_PRIORITY_ORDER', 'gateway,on-demand') - .split(','); - -export const ARNS_COMPOSITE_RESOLVER_TIMEOUT_MS = +env.varOrDefault( +export const ARNS_COMPOSITE_RESOLVER_TIMEOUT_MS = env.intVar( 'ARNS_COMPOSITE_RESOLVER_TIMEOUT_MS', - '3000', + 3000, ); -export const ARNS_COMPOSITE_LAST_RESOLVER_TIMEOUT_MS = +env.varOrDefault( +export const ARNS_COMPOSITE_LAST_RESOLVER_TIMEOUT_MS = env.intVar( 'ARNS_COMPOSITE_LAST_RESOLVER_TIMEOUT_MS', - '30000', + 30000, ); -export const ARNS_NAMES_CACHE_TTL_SECONDS = +env.varOrDefault( +export const ARNS_NAMES_CACHE_TTL_SECONDS = env.intVar( 'ARNS_NAMES_CACHE_TTL_SECONDS', - `${60 * 60}`, // 1 hour + 60 * 60, ); -export const ARNS_MAX_CONCURRENT_RESOLUTIONS = +env.varOrDefault( +export const ARNS_MAX_CONCURRENT_RESOLUTIONS = env.intVar( 'ARNS_MAX_CONCURRENT_RESOLUTIONS', - '1', + 1, ); // Controls the maximum time allowed for requests to AO for ARIO process state. @@ -686,58 +670,53 @@ export const ARNS_MAX_CONCURRENT_RESOLUTIONS = +env.varOrDefault( // seconds to account for the worst case scenario. If requests exceed this // timeout, they will be considered failed and may trigger the circuit breaker // if the error threshold is reached. -export const ARIO_PROCESS_DEFAULT_CIRCUIT_BREAKER_TIMEOUT_MS = - +env.varOrDefault( - 'ARIO_PROCESS_DEFAULT_CIRCUIT_BREAKER_TIMEOUT_MS', - `${60 * 1000}`, // 60 seconds - ); +export const ARIO_PROCESS_DEFAULT_CIRCUIT_BREAKER_TIMEOUT_MS = env.intVar( + 'ARIO_PROCESS_DEFAULT_CIRCUIT_BREAKER_TIMEOUT_MS', + 60 * 1000, +); // Controls the percentage of failed requests to AO for ARIO process state that // will trigger the circuit breaker to open. This is set to a relatively low // threshold (30%) to compensate for the extended timeout (10 seconds) // configured above. export const ARIO_PROCESS_DEFAULT_CIRCUIT_BREAKER_ERROR_THRESHOLD_PERCENTAGE = - +env.varOrDefault( + env.intVar( 'ARIO_PROCESS_DEFAULT_CIRCUIT_BREAKER_ERROR_THRESHOLD_PERCENTAGE', - '30', // 30% failure limit before circuit breaker opens + 30, ); // Defines the time window for tracking errors when retrieving ARIO process // state from AO The circuit breaker counts failures within this rolling time // window to determine if the error threshold percentage has been exceeded export const ARIO_PROCESS_DEFAULT_CIRCUIT_BREAKER_ROLLING_COUNT_TIMEOUT_MS = - +env.varOrDefault( + env.intVar( 'ARIO_PROCESS_DEFAULT_CIRCUIT_BREAKER_ROLLING_COUNT_TIMEOUT_MS', - `${10 * 60 * 1000}`, // 10 minutes + 10 * 60 * 1000, ); // Defines how long the circuit breaker stays in the open state after being // triggered During this period, all requests to AO for ARIO process state will // be rejected immediately After this timeout expires, the circuit breaker // transitions to half-open state to test if AO is responsive again -export const ARIO_PROCESS_DEFAULT_CIRCUIT_BREAKER_RESET_TIMEOUT_MS = - +env.varOrDefault( - 'ARIO_PROCESS_DEFAULT_CIRCUIT_BREAKER_RESET_TIMEOUT_MS', - `${20 * 60 * 1000}`, // 20 minutes - ); +export const ARIO_PROCESS_DEFAULT_CIRCUIT_BREAKER_RESET_TIMEOUT_MS = env.intVar( + 'ARIO_PROCESS_DEFAULT_CIRCUIT_BREAKER_RESET_TIMEOUT_MS', + 20 * 60 * 1000, +); -export const ARNS_NAME_LIST_CACHE_MISS_REFRESH_INTERVAL_SECONDS = - +env.varOrDefault( - 'ARNS_NAME_LIST_CACHE_MISS_REFRESH_INTERVAL_SECONDS', - `${2 * 60}`, // 2 minutes - ); +export const ARNS_NAME_LIST_CACHE_MISS_REFRESH_INTERVAL_SECONDS = env.intVar( + 'ARNS_NAME_LIST_CACHE_MISS_REFRESH_INTERVAL_SECONDS', + 2 * 60, +); -export const ARNS_NAME_LIST_CACHE_HIT_REFRESH_INTERVAL_SECONDS = - +env.varOrDefault( - 'ARNS_NAME_LIST_CACHE_HIT_REFRESH_INTERVAL_SECONDS', - `${60 * 60}`, // 1 hour - ); +export const ARNS_NAME_LIST_CACHE_HIT_REFRESH_INTERVAL_SECONDS = env.intVar( + 'ARNS_NAME_LIST_CACHE_HIT_REFRESH_INTERVAL_SECONDS', + 60 * 60, +); -export const ARNS_ANT_STATE_CACHE_HIT_REFRESH_WINDOW_SECONDS = - +env.varOrDefault( - 'ARNS_ANT_STATE_CACHE_HIT_REFRESH_WINDOW_SECONDS', - `${30}`, // 30 seconds - ); +export const ARNS_ANT_STATE_CACHE_HIT_REFRESH_WINDOW_SECONDS = env.intVar( + 'ARNS_ANT_STATE_CACHE_HIT_REFRESH_WINDOW_SECONDS', + 30, +); // TODO: support multiple gateway urls export const TRUSTED_ARNS_GATEWAY_URL = env.varOrDefault( @@ -749,13 +728,15 @@ export const TRUSTED_ARNS_GATEWAY_URL = env.varOrDefault( // Mempool watcher // -export const ENABLE_MEMPOOL_WATCHER = - env.varOrDefault('ENABLE_MEMPOOL_WATCHER', 'false') === 'true'; +export const ENABLE_MEMPOOL_WATCHER = env.boolVar( + 'ENABLE_MEMPOOL_WATCHER', + false, +); -export const MEMPOOL_POLLING_INTERVAL_MS = +env.varOrDefault( +export const MEMPOOL_POLLING_INTERVAL_MS = env.intVar( 'MEMPOOL_POLLING_INTERVAL_MS', - '30000', // 30 seconds -); + 30000, +); // 30 seconds // // AWS settings @@ -794,18 +775,18 @@ export const LEGACY_AWS_S3_CHUNK_DATA_PREFIX = env.varOrUndefined( // // Whether or not to bypass the header cache -export const SKIP_CACHE = env.varOrDefault('SKIP_CACHE', 'false') === 'true'; +export const SKIP_CACHE = env.boolVar('SKIP_CACHE', false); // The rate (0 - 1) at which to simulate request failures -export const SIMULATED_REQUEST_FAILURE_RATE = +env.varOrDefault( +export const SIMULATED_REQUEST_FAILURE_RATE = env.intVar( 'SIMULATED_REQUEST_FAILURE_RATE', - '0', + 0, ); // Circuit breaker timeout for getDataParentCircuitBreaker and getDataAttributesCircuitBreaker -export const GET_DATA_CIRCUIT_BREAKER_TIMEOUT_MS = +env.varOrDefault( +export const GET_DATA_CIRCUIT_BREAKER_TIMEOUT_MS = env.intVar( 'GET_DATA_CIRCUIT_BREAKER_TIMEOUT_MS', - '500', + 500, ); // @@ -818,20 +799,9 @@ export const GET_DATA_CIRCUIT_BREAKER_TIMEOUT_MS = +env.varOrDefault( * @param url The URL to sanitize * @returns The sanitized URL without trailing slashes or undefined if input was undefined */ -function sanitizeUrl(url: string | undefined): string | undefined { - if (url === undefined) { - return undefined; - } - return url.replace(/\/+$/, ''); -} - -export const AO_MU_URL = sanitizeUrl(env.varOrUndefined('AO_MU_URL')); -export const AO_CU_URL = sanitizeUrl(env.varOrUndefined('AO_CU_URL')); -export const NETWORK_AO_CU_URL = sanitizeUrl( - env.varOrUndefined('NETWORK_AO_CU_URL') ?? AO_CU_URL, -); -export const ANT_AO_CU_URL = sanitizeUrl( - env.varOrUndefined('ANT_AO_CU_URL') ?? AO_CU_URL, -); +export const AO_MU_URL = env.urlVar('AO_MU_URL'); +export const AO_CU_URL = env.urlVar('AO_CU_URL'); +export const NETWORK_AO_CU_URL = env.urlVar('NETWORK_AO_CU_URL') ?? AO_CU_URL; +export const ANT_AO_CU_URL = env.urlVar('ANT_AO_CU_URL') ?? AO_CU_URL; export const AO_GRAPHQL_URL = env.varOrUndefined('AO_GRAPHQL_URL'); export const AO_GATEWAY_URL = env.varOrUndefined('AO_GATEWAY_URL'); diff --git a/src/lib/env.ts b/src/lib/env.ts index 4217aba4..e2b66d75 100644 --- a/src/lib/env.ts +++ b/src/lib/env.ts @@ -38,3 +38,34 @@ export function varOrRandom(envVarName: string): string { } return value; } + +export function boolVar(envVarName: string, defaultValue: boolean): boolean { + return varOrDefault(envVarName, defaultValue ? 'true' : 'false') === 'true'; +} + +export function intVar(envVarName: string, defaultValue: number): number { + return parseInt(varOrDefault(envVarName, String(defaultValue)), 10); +} + +export function optionalIntVar(envVarName: string): number | undefined { + const value = varOrUndefined(envVarName); + return value !== undefined ? parseInt(value, 10) : undefined; +} + +export function listVar(envVarName: string, defaultValue: string): string[] { + return varOrDefault(envVarName, defaultValue).split(',').filter(Boolean); +} + +export function optionalListVar(envVarName: string): string[] { + const value = varOrUndefined(envVarName); + return value !== undefined ? value.split(',').filter(Boolean) : []; +} + +export function jsonVar(envVarName: string, defaultValue: T): T { + return JSON.parse(varOrDefault(envVarName, JSON.stringify(defaultValue))); +} + +export function urlVar(envVarName: string): string | undefined { + const url = varOrUndefined(envVarName); + return url?.replace(/\/+$/, ''); +}